● compute-0
    State: running
    Units: 475 loaded (incl. loaded aliases)
     Jobs: 0 queued
   Failed: 0 units
    Since: Mon 2026-02-02 16:46:02 UTC; 1h 27min ago
  systemd: 252-64.el9
   CGroup: /
           ├─294028 turbostat --debug sleep 10
           ├─294037 sleep 10
           ├─init.scope
           │ └─1 /usr/lib/systemd/systemd --switched-root --system --deserialize 31
           ├─machine.slice
           │ ├─libpod-17e8fd462ba5e72ae3430209748fcdbe5242ab00cbe0d2070f3bc042ab5b9681.scope
           │ │ └─container
           │ │   ├─239848 dumb-init --single-child -- kolla_start
           │ │   ├─239853 /usr/bin/python3 /usr/bin/nova-compute
           │ │   ├─246527 /usr/bin/python3 /bin/privsep-helper --config-file /etc/nova/nova.conf --config-file /etc/nova/nova-compute.conf --config-dir /etc/nova/nova.conf.d --privsep_context vif_plug_ovs.privsep.vif_plug --privsep_sock_path /tmp/tmp70ca1rbh/privsep.sock
           │ │   └─249256 /usr/bin/python3 /bin/privsep-helper --config-file /etc/nova/nova.conf --config-file /etc/nova/nova-compute.conf --config-dir /etc/nova/nova.conf.d --privsep_context os_brick.privileged.default --privsep_sock_path /tmp/tmp9qdvacqx/privsep.sock
           │ ├─libpod-70e0d83cb45fbe649a29519f5074ad11df900a5702b7e7e666708ce90ca8d783.scope
           │ │ └─container
           │ │   ├─146043 dumb-init --single-child -- kolla_start
           │ │   └─146046 /usr/bin/ovn-controller --pidfile unix:/run/openvswitch/db.sock -p /etc/pki/tls/private/ovndb.key -c /etc/pki/tls/certs/ovndb.crt -C /etc/pki/tls/certs/ovndbca.crt
           │ └─libpod-983aad36fbefc6eb42f7b2455e6339d70a90e29e2ce721c2d9ecdd2cd91b9e7b.scope
           │   └─container
           │     ├─155388 dumb-init --single-child -- kolla_start
           │     ├─155391 "neutron-ovn-metadata-agent (/usr/bin/python3 /usr/bin/neutron-ovn-metadata-agent)"
           │     ├─155696 "neutron-ovn-metadata-agent (/usr/bin/python3 /usr/bin/neutron-ovn-metadata-agent)"
           │     ├─155891 /usr/bin/python3 /bin/privsep-helper --config-file /etc/neutron/neutron.conf --config-dir /etc/neutron.conf.d --privsep_context neutron.privileged.namespace_cmd --privsep_sock_path /tmp/tmpty0fqf9d/privsep.sock
           │     ├─246686 /usr/bin/python3 /bin/privsep-helper --config-file /etc/neutron/neutron.conf --config-dir /etc/neutron.conf.d --privsep_context neutron.privileged.default --privsep_sock_path /tmp/tmpx1bw3qnr/privsep.sock
           │     └─246720 /usr/bin/python3 /bin/privsep-helper --config-file /etc/neutron/neutron.conf --config-dir /etc/neutron.conf.d --privsep_context neutron.privileged.link_cmd --privsep_sock_path /tmp/tmp037046cp/privsep.sock
           ├─system.slice
           │ ├─NetworkManager.service
           │ │ └─49022 /usr/sbin/NetworkManager --no-daemon
           │ ├─auditd.service
           │ │ ├─704 /sbin/auditd
           │ │ └─706 /usr/sbin/sedispatch
           │ ├─chronyd.service
           │ │ └─58579 /usr/sbin/chronyd -F 2
           │ ├─crond.service
           │ │ ├─ 1010 /usr/sbin/crond -n
           │ │ └─29996 /usr/sbin/anacron -s
           │ ├─dbus-broker.service
           │ │ ├─770 /usr/bin/dbus-broker-launch --scope system --audit
           │ │ └─777 dbus-broker --log 4 --controller 9 --machine-id bf0bc0bb03de29b24cba1cc9599cf5d0 --max-bytes 536870912 --max-fds 4096 --max-matches 131072 --audit
           │ ├─edpm_nova_compute.service
           │ │ └─239846 /usr/bin/conmon --api-version 1 -c 17e8fd462ba5e72ae3430209748fcdbe5242ab00cbe0d2070f3bc042ab5b9681 -u 17e8fd462ba5e72ae3430209748fcdbe5242ab00cbe0d2070f3bc042ab5b9681 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/17e8fd462ba5e72ae3430209748fcdbe5242ab00cbe0d2070f3bc042ab5b9681/userdata -p /run/containers/storage/overlay-containers/17e8fd462ba5e72ae3430209748fcdbe5242ab00cbe0d2070f3bc042ab5b9681/userdata/pidfile -n nova_compute --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/17e8fd462ba5e72ae3430209748fcdbe5242ab00cbe0d2070f3bc042ab5b9681 --full-attach -s -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/17e8fd462ba5e72ae3430209748fcdbe5242ab00cbe0d2070f3bc042ab5b9681/userdata/oci-log --conmon-pidfile /run/nova_compute.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg 17e8fd462ba5e72ae3430209748fcdbe5242ab00cbe0d2070f3bc042ab5b9681
           │ ├─edpm_ovn_controller.service
           │ │ └─146041 /usr/bin/conmon --api-version 1 -c 70e0d83cb45fbe649a29519f5074ad11df900a5702b7e7e666708ce90ca8d783 -u 70e0d83cb45fbe649a29519f5074ad11df900a5702b7e7e666708ce90ca8d783 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/70e0d83cb45fbe649a29519f5074ad11df900a5702b7e7e666708ce90ca8d783/userdata -p /run/containers/storage/overlay-containers/70e0d83cb45fbe649a29519f5074ad11df900a5702b7e7e666708ce90ca8d783/userdata/pidfile -n ovn_controller --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/70e0d83cb45fbe649a29519f5074ad11df900a5702b7e7e666708ce90ca8d783 --full-attach -s -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/70e0d83cb45fbe649a29519f5074ad11df900a5702b7e7e666708ce90ca8d783/userdata/oci-log --conmon-pidfile /run/ovn_controller.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg 70e0d83cb45fbe649a29519f5074ad11df900a5702b7e7e666708ce90ca8d783
           │ ├─edpm_ovn_metadata_agent.service
           │ │ └─155386 /usr/bin/conmon --api-version 1 -c 983aad36fbefc6eb42f7b2455e6339d70a90e29e2ce721c2d9ecdd2cd91b9e7b -u 983aad36fbefc6eb42f7b2455e6339d70a90e29e2ce721c2d9ecdd2cd91b9e7b -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/983aad36fbefc6eb42f7b2455e6339d70a90e29e2ce721c2d9ecdd2cd91b9e7b/userdata -p /run/containers/storage/overlay-containers/983aad36fbefc6eb42f7b2455e6339d70a90e29e2ce721c2d9ecdd2cd91b9e7b/userdata/pidfile -n ovn_metadata_agent --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/983aad36fbefc6eb42f7b2455e6339d70a90e29e2ce721c2d9ecdd2cd91b9e7b --full-attach -s -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/983aad36fbefc6eb42f7b2455e6339d70a90e29e2ce721c2d9ecdd2cd91b9e7b/userdata/oci-log --conmon-pidfile /run/ovn_metadata_agent.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg 983aad36fbefc6eb42f7b2455e6339d70a90e29e2ce721c2d9ecdd2cd91b9e7b
           │ ├─gssproxy.service
           │ │ └─873 /usr/sbin/gssproxy -D
           │ ├─irqbalance.service
           │ │ └─795 /usr/sbin/irqbalance
           │ ├─iscsid.service
           │ │ └─224391 /usr/sbin/iscsid -f
           │ ├─multipathd.service
           │ │ └─224550 /sbin/multipathd -d -s
           │ ├─ovs-vswitchd.service
           │ │ └─47317 ovs-vswitchd unix:/var/run/openvswitch/db.sock -vconsole:emer -vsyslog:err -vfile:info --mlockall --user openvswitch:hugetlbfs --no-chdir --log-file=/var/log/openvswitch/ovs-vswitchd.log --pidfile=/var/run/openvswitch/ovs-vswitchd.pid --detach
           │ ├─ovsdb-server.service
           │ │ └─47236 ovsdb-server /etc/openvswitch/conf.db -vconsole:emer -vsyslog:err -vfile:info --remote=punix:/var/run/openvswitch/db.sock --private-key=db:Open_vSwitch,SSL,private_key --certificate=db:Open_vSwitch,SSL,certificate --bootstrap-ca-cert=db:Open_vSwitch,SSL,ca_cert --user openvswitch:hugetlbfs --no-chdir --log-file=/var/log/openvswitch/ovsdb-server.log --pidfile=/var/run/openvswitch/ovsdb-server.pid --detach
           │ ├─polkit.service
           │ │ └─43458 /usr/lib/polkit-1/polkitd --no-debug
           │ ├─rpcbind.service
           │ │ └─702 /usr/bin/rpcbind -w -f
           │ ├─rsyslog.service
           │ │ └─1006 /usr/sbin/rsyslogd -n
           │ ├─sshd.service
           │ │ └─182591 "sshd: /usr/sbin/sshd -D [listener] 0 of 10-100 startups"
           │ ├─system-ceph\x2deb48d0ef\x2d3496\x2d563c\x2db73d\x2d661fb962013e.slice
           │ │ ├─ceph-eb48d0ef-3496-563c-b73d-661fb962013e@crash.compute-0.service
           │ │ │ ├─libpod-payload-43c95f4962571d6ee3a5291e1f020e4230f3e56f098995891d143922a65cac4b
           │ │ │ │ ├─80152 /run/podman-init -- /usr/bin/ceph-crash -n client.crash.compute-0
           │ │ │ │ └─80154 /usr/bin/python3 -s /usr/bin/ceph-crash -n client.crash.compute-0
           │ │ │ └─runtime
           │ │ │   └─80150 /usr/bin/conmon --api-version 1 -c 43c95f4962571d6ee3a5291e1f020e4230f3e56f098995891d143922a65cac4b -u 43c95f4962571d6ee3a5291e1f020e4230f3e56f098995891d143922a65cac4b -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/43c95f4962571d6ee3a5291e1f020e4230f3e56f098995891d143922a65cac4b/userdata -p /run/containers/storage/overlay-containers/43c95f4962571d6ee3a5291e1f020e4230f3e56f098995891d143922a65cac4b/userdata/pidfile -n ceph-eb48d0ef-3496-563c-b73d-661fb962013e-crash-compute-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/43c95f4962571d6ee3a5291e1f020e4230f3e56f098995891d143922a65cac4b --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/43c95f4962571d6ee3a5291e1f020e4230f3e56f098995891d143922a65cac4b/userdata/oci-log --conmon-pidfile /run/ceph-eb48d0ef-3496-563c-b73d-661fb962013e@crash.compute-0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 43c95f4962571d6ee3a5291e1f020e4230f3e56f098995891d143922a65cac4b
           │ │ ├─ceph-eb48d0ef-3496-563c-b73d-661fb962013e@mds.cephfs.compute-0.vvdoei.service
           │ │ │ ├─libpod-payload-30ea8cb4e62f17278ebefb0cae478bc9b6467ba0762d054bfe95a0976430da56
           │ │ │ │ ├─95612 /run/podman-init -- /usr/bin/ceph-mds -n mds.cephfs.compute-0.vvdoei -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
           │ │ │ │ └─95614 /usr/bin/ceph-mds -n mds.cephfs.compute-0.vvdoei -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
           │ │ │ └─runtime
           │ │ │   └─95610 /usr/bin/conmon --api-version 1 -c 30ea8cb4e62f17278ebefb0cae478bc9b6467ba0762d054bfe95a0976430da56 -u 30ea8cb4e62f17278ebefb0cae478bc9b6467ba0762d054bfe95a0976430da56 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/30ea8cb4e62f17278ebefb0cae478bc9b6467ba0762d054bfe95a0976430da56/userdata -p /run/containers/storage/overlay-containers/30ea8cb4e62f17278ebefb0cae478bc9b6467ba0762d054bfe95a0976430da56/userdata/pidfile -n ceph-eb48d0ef-3496-563c-b73d-661fb962013e-mds-cephfs-compute-0-vvdoei --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/30ea8cb4e62f17278ebefb0cae478bc9b6467ba0762d054bfe95a0976430da56 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/30ea8cb4e62f17278ebefb0cae478bc9b6467ba0762d054bfe95a0976430da56/userdata/oci-log --conmon-pidfile /run/ceph-eb48d0ef-3496-563c-b73d-661fb962013e@mds.cephfs.compute-0.vvdoei.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 30ea8cb4e62f17278ebefb0cae478bc9b6467ba0762d054bfe95a0976430da56
           │ │ ├─ceph-eb48d0ef-3496-563c-b73d-661fb962013e@mgr.compute-0.hccdnu.service
           │ │ │ ├─libpod-payload-f51dea2484a885b1ef464f71470d5eb130f74e7dd5065bf2b6342dc1451e6a4c
           │ │ │ │ ├─75491 /run/podman-init -- /usr/bin/ceph-mgr -n mgr.compute-0.hccdnu -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
           │ │ │ │ └─75493 /usr/bin/ceph-mgr -n mgr.compute-0.hccdnu -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
           │ │ │ └─runtime
           │ │ │   └─75489 /usr/bin/conmon --api-version 1 -c f51dea2484a885b1ef464f71470d5eb130f74e7dd5065bf2b6342dc1451e6a4c -u f51dea2484a885b1ef464f71470d5eb130f74e7dd5065bf2b6342dc1451e6a4c -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/f51dea2484a885b1ef464f71470d5eb130f74e7dd5065bf2b6342dc1451e6a4c/userdata -p /run/containers/storage/overlay-containers/f51dea2484a885b1ef464f71470d5eb130f74e7dd5065bf2b6342dc1451e6a4c/userdata/pidfile -n ceph-eb48d0ef-3496-563c-b73d-661fb962013e-mgr-compute-0-hccdnu --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/f51dea2484a885b1ef464f71470d5eb130f74e7dd5065bf2b6342dc1451e6a4c --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/f51dea2484a885b1ef464f71470d5eb130f74e7dd5065bf2b6342dc1451e6a4c/userdata/oci-log --conmon-pidfile /run/ceph-eb48d0ef-3496-563c-b73d-661fb962013e@mgr.compute-0.hccdnu.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg f51dea2484a885b1ef464f71470d5eb130f74e7dd5065bf2b6342dc1451e6a4c
           │ │ ├─ceph-eb48d0ef-3496-563c-b73d-661fb962013e@mon.compute-0.service
           │ │ │ ├─libpod-payload-49cf601899989c75e64c17f569867f9dc2bb2a6ce1f21706d993305a3a0b4d26
           │ │ │ │ ├─75195 /run/podman-init -- /usr/bin/ceph-mon -n mon.compute-0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false
           │ │ │ │ └─75197 /usr/bin/ceph-mon -n mon.compute-0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false
           │ │ │ └─runtime
           │ │ │   └─75193 /usr/bin/conmon --api-version 1 -c 49cf601899989c75e64c17f569867f9dc2bb2a6ce1f21706d993305a3a0b4d26 -u 49cf601899989c75e64c17f569867f9dc2bb2a6ce1f21706d993305a3a0b4d26 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/49cf601899989c75e64c17f569867f9dc2bb2a6ce1f21706d993305a3a0b4d26/userdata -p /run/containers/storage/overlay-containers/49cf601899989c75e64c17f569867f9dc2bb2a6ce1f21706d993305a3a0b4d26/userdata/pidfile -n ceph-eb48d0ef-3496-563c-b73d-661fb962013e-mon-compute-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/49cf601899989c75e64c17f569867f9dc2bb2a6ce1f21706d993305a3a0b4d26 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/49cf601899989c75e64c17f569867f9dc2bb2a6ce1f21706d993305a3a0b4d26/userdata/oci-log --conmon-pidfile /run/ceph-eb48d0ef-3496-563c-b73d-661fb962013e@mon.compute-0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 49cf601899989c75e64c17f569867f9dc2bb2a6ce1f21706d993305a3a0b4d26
           │ │ ├─ceph-eb48d0ef-3496-563c-b73d-661fb962013e@osd.0.service
           │ │ │ ├─libpod-payload-5ec5d30977a06416493ca65ffb2bc1efb368bd71870a20ffcf4a99fc8c30655f
           │ │ │ │ ├─85694 /run/podman-init -- /usr/bin/ceph-osd -n osd.0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --osd-objectstore=bluestore
           │ │ │ │ └─85696 /usr/bin/ceph-osd -n osd.0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --osd-objectstore=bluestore
           │ │ │ └─runtime
           │ │ │   └─85692 /usr/bin/conmon --api-version 1 -c 5ec5d30977a06416493ca65ffb2bc1efb368bd71870a20ffcf4a99fc8c30655f -u 5ec5d30977a06416493ca65ffb2bc1efb368bd71870a20ffcf4a99fc8c30655f -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/5ec5d30977a06416493ca65ffb2bc1efb368bd71870a20ffcf4a99fc8c30655f/userdata -p /run/containers/storage/overlay-containers/5ec5d30977a06416493ca65ffb2bc1efb368bd71870a20ffcf4a99fc8c30655f/userdata/pidfile -n ceph-eb48d0ef-3496-563c-b73d-661fb962013e-osd-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/5ec5d30977a06416493ca65ffb2bc1efb368bd71870a20ffcf4a99fc8c30655f --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/5ec5d30977a06416493ca65ffb2bc1efb368bd71870a20ffcf4a99fc8c30655f/userdata/oci-log --conmon-pidfile /run/ceph-eb48d0ef-3496-563c-b73d-661fb962013e@osd.0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 5ec5d30977a06416493ca65ffb2bc1efb368bd71870a20ffcf4a99fc8c30655f
           │ │ ├─ceph-eb48d0ef-3496-563c-b73d-661fb962013e@osd.1.service
           │ │ │ ├─libpod-payload-849770b4bec6f7bda5e71b5d2b63467f261481956bf4b714be6269cbddd8aa42
           │ │ │ │ ├─86735 /run/podman-init -- /usr/bin/ceph-osd -n osd.1 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --osd-objectstore=bluestore
           │ │ │ │ └─86737 /usr/bin/ceph-osd -n osd.1 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --osd-objectstore=bluestore
           │ │ │ └─runtime
           │ │ │   └─86733 /usr/bin/conmon --api-version 1 -c 849770b4bec6f7bda5e71b5d2b63467f261481956bf4b714be6269cbddd8aa42 -u 849770b4bec6f7bda5e71b5d2b63467f261481956bf4b714be6269cbddd8aa42 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/849770b4bec6f7bda5e71b5d2b63467f261481956bf4b714be6269cbddd8aa42/userdata -p /run/containers/storage/overlay-containers/849770b4bec6f7bda5e71b5d2b63467f261481956bf4b714be6269cbddd8aa42/userdata/pidfile -n ceph-eb48d0ef-3496-563c-b73d-661fb962013e-osd-1 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/849770b4bec6f7bda5e71b5d2b63467f261481956bf4b714be6269cbddd8aa42 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/849770b4bec6f7bda5e71b5d2b63467f261481956bf4b714be6269cbddd8aa42/userdata/oci-log --conmon-pidfile /run/ceph-eb48d0ef-3496-563c-b73d-661fb962013e@osd.1.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 849770b4bec6f7bda5e71b5d2b63467f261481956bf4b714be6269cbddd8aa42
           │ │ ├─ceph-eb48d0ef-3496-563c-b73d-661fb962013e@osd.2.service
           │ │ │ ├─libpod-payload-49ee9de1004adc227a28add932102ae6c092e83b89c3463cc66529c1092a3071
           │ │ │ │ ├─87790 /run/podman-init -- /usr/bin/ceph-osd -n osd.2 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --osd-objectstore=bluestore
           │ │ │ │ └─87792 /usr/bin/ceph-osd -n osd.2 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --osd-objectstore=bluestore
           │ │ │ └─runtime
           │ │ │   └─87788 /usr/bin/conmon --api-version 1 -c 49ee9de1004adc227a28add932102ae6c092e83b89c3463cc66529c1092a3071 -u 49ee9de1004adc227a28add932102ae6c092e83b89c3463cc66529c1092a3071 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/49ee9de1004adc227a28add932102ae6c092e83b89c3463cc66529c1092a3071/userdata -p /run/containers/storage/overlay-containers/49ee9de1004adc227a28add932102ae6c092e83b89c3463cc66529c1092a3071/userdata/pidfile -n ceph-eb48d0ef-3496-563c-b73d-661fb962013e-osd-2 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/49ee9de1004adc227a28add932102ae6c092e83b89c3463cc66529c1092a3071 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/49ee9de1004adc227a28add932102ae6c092e83b89c3463cc66529c1092a3071/userdata/oci-log --conmon-pidfile /run/ceph-eb48d0ef-3496-563c-b73d-661fb962013e@osd.2.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 49ee9de1004adc227a28add932102ae6c092e83b89c3463cc66529c1092a3071
           │ │ └─ceph-eb48d0ef-3496-563c-b73d-661fb962013e@rgw.rgw.compute-0.molmny.service
           │ │   ├─libpod-payload-31c23500b425f65c54ced9f2de36b162413b6fa358f90c128fa457114a0a693e
           │ │   │ ├─95127 /run/podman-init -- /usr/bin/radosgw -n client.rgw.rgw.compute-0.molmny -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --stop-timeout=120
           │ │   │ └─95129 /usr/bin/radosgw -n client.rgw.rgw.compute-0.molmny -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --stop-timeout=120
           │ │   └─runtime
           │ │     └─95125 /usr/bin/conmon --api-version 1 -c 31c23500b425f65c54ced9f2de36b162413b6fa358f90c128fa457114a0a693e -u 31c23500b425f65c54ced9f2de36b162413b6fa358f90c128fa457114a0a693e -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/31c23500b425f65c54ced9f2de36b162413b6fa358f90c128fa457114a0a693e/userdata -p /run/containers/storage/overlay-containers/31c23500b425f65c54ced9f2de36b162413b6fa358f90c128fa457114a0a693e/userdata/pidfile -n ceph-eb48d0ef-3496-563c-b73d-661fb962013e-rgw-rgw-compute-0-molmny --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/31c23500b425f65c54ced9f2de36b162413b6fa358f90c128fa457114a0a693e --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/31c23500b425f65c54ced9f2de36b162413b6fa358f90c128fa457114a0a693e/userdata/oci-log --conmon-pidfile /run/ceph-eb48d0ef-3496-563c-b73d-661fb962013e@rgw.rgw.compute-0.molmny.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 31c23500b425f65c54ced9f2de36b162413b6fa358f90c128fa457114a0a693e
           │ ├─system-getty.slice
           │ │ └─getty@tty1.service
           │ │   └─1013 /sbin/agetty -o "-p -- \\u" --noclear - linux
           │ ├─system-serial\x2dgetty.slice
           │ │ └─serial-getty@ttyS0.service
           │ │   └─1014 /sbin/agetty -o "-p -- \\u" --keep-baud 115200,57600,38400,9600 - vt220
           │ ├─systemd-hostnamed.service
           │ │ └─290803 /usr/lib/systemd/systemd-hostnamed
           │ ├─systemd-journald.service
           │ │ └─679 /usr/lib/systemd/systemd-journald
           │ ├─systemd-logind.service
           │ │ └─799 /usr/lib/systemd/systemd-logind
           │ ├─systemd-machined.service
           │ │ └─208080 /usr/lib/systemd/systemd-machined
           │ ├─systemd-udevd.service
           │ │ └─udev
           │ │   └─732 /usr/lib/systemd/systemd-udevd
           │ ├─tuned.service
           │ │ └─106772 /usr/bin/python3 -Es /usr/sbin/tuned -l -P
           │ ├─virtlogd.service
           │ │ └─207424 /usr/sbin/virtlogd
           │ ├─virtnodedevd.service
           │ │ └─240340 /usr/sbin/virtnodedevd --timeout 120
           │ ├─virtqemud.service
           │ │ └─239321 /usr/sbin/virtqemud --timeout 120
           │ └─virtsecretd.service
           │   └─246591 /usr/sbin/virtsecretd --timeout 120
           └─user.slice
             ├─user-1000.slice
             │ ├─session-1.scope
             │ │ └─4520 /usr/bin/python3
             │ ├─session-54.scope
             │ │ ├─287794 "sshd-session: zuul [priv]"
             │ │ ├─287797 "sshd-session: zuul@notty"
             │ │ ├─287798 sudo bash -c "rm -rf /var/tmp/sos-osp && mkdir /var/tmp/sos-osp && sos report Unit boot.automount could not be found.
--batch --all-logs --tmp-dir=/var/tmp/sos-osp  -p container,openstack_edpm,system,storage,virt"
             │ │ ├─287822 /usr/bin/python3 -s /sbin/sos report --batch --all-logs --tmp-dir=/var/tmp/sos-osp -p container,openstack_edpm,system,storage,virt
             │ │ ├─294026 timeout 15s turbostat --debug sleep 10
             │ │ ├─294368 timeout 300s semanage interface -l
             │ │ ├─294369 /usr/bin/python3 -EsI /usr/sbin/semanage interface -l
             │ │ ├─294374 timeout 300s systemctl status --all
             │ │ ├─294375 systemctl status --all
             │ │ ├─294376 timeout 300s ceph mds stat --format json-pretty
             │ │ └─294377 /usr/bin/python3 -s /usr/bin/ceph mds stat --format json-pretty
             │ └─user@1000.service
             │   ├─app.slice
             │   │ └─dbus-broker.service
             │   │   ├─13900 /usr/bin/dbus-broker-launch --scope user
             │   │   └─13910 dbus-broker --log 4 --controller 9 --machine-id bf0bc0bb03de29b24cba1cc9599cf5d0 --max-bytes 100000000000000 --max-fds 25000000000000 --max-matches 5000000000
             │   ├─init.scope
             │   │ ├─4307 /usr/lib/systemd/systemd --user
             │   │ └─4309 "(sd-pam)"
             │   └─user.slice
             │     └─podman-pause-6e894c72.scope
             │       └─13874 catatonit -P
             └─user-42477.slice
               ├─session-20.scope
               │ ├─76562 "sshd-session: ceph-admin [priv]"
               │ └─76584 "sshd-session: ceph-admin"
               ├─session-22.scope
               │ ├─76579 "sshd-session: ceph-admin [priv]"
               │ └─76585 "sshd-session: ceph-admin@notty"
               ├─session-23.scope
               │ ├─76611 "sshd-session: ceph-admin [priv]"
               │ └─76614 "sshd-session: ceph-admin@notty"
               ├─session-24.scope
               │ ├─76640 "sshd-session: ceph-admin [priv]"
               │ └─76643 "sshd-session: ceph-admin@notty"
               ├─session-25.scope
               │ ├─76669 "sshd-session: ceph-admin [priv]"
               │ └─76672 "sshd-session: ceph-admin@notty"
               ├─session-26.scope
               │ ├─76698 "sshd-session: ceph-admin [priv]"
               │ └─76701 "sshd-session: ceph-admin@notty"
               ├─session-27.scope
               │ ├─76727 "sshd-session: ceph-admin [priv]"
               │ └─76730 "sshd-session: ceph-admin@notty"
               ├─session-28.scope
               │ ├─76756 "sshd-session: ceph-admin [priv]"
               │ └─76759 "sshd-session: ceph-admin@notty"
               ├─session-29.scope
               │ ├─76785 "sshd-session: ceph-admin [priv]"
               │ └─76788 "sshd-session: ceph-admin@notty"
               ├─session-30.scope
               │ ├─76814 "sshd-session: ceph-admin [priv]"
               │ └─76817 "sshd-session: ceph-admin@notty"
               ├─session-31.scope
               │ ├─76841 "sshd-session: ceph-admin [priv]"
               │ └─76844 "sshd-session: ceph-admin@notty"
               ├─session-32.scope
               │ ├─76870 "sshd-session: ceph-admin [priv]"
               │ └─76873 "sshd-session: ceph-admin@notty"
               └─user@42477.service
                 └─init.scope
                   ├─76566 /usr/lib/systemd/systemd --user
                   └─76568 "(sd-pam)"

● proc-sys-fs-binfmt_misc.automount - Arbitrary Executable File Formats File System Automount Point
     Loaded: loaded (/usr/lib/systemd/system/proc-sys-fs-binfmt_misc.automount; static)
     Active: active (running) since Mon 2026-02-02 16:46:02 UTC; 1h 27min ago
      Until: Mon 2026-02-02 16:46:02 UTC; 1h 27min ago
   Triggers: ● proc-sys-fs-binfmt_misc.mount
      Where: /proc/sys/fs/binfmt_misc
       Docs: https://docs.kernel.org/admin-guide/binfmt-misc.html
             https://www.freedesktop.org/wiki/Software/systemd/APIFileSystems

Feb 02 17:20:50 compute-0 systemd[1]: proc-sys-fs-binfmt_misc.automount: Got automount request for /proc/sys/fs/binfmt_misc, triggered by 77613 (sysctl)

● dev-cdrom.device - QEMU_DVD-ROM config-2
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:01.1-ata1-host0-target0:0:0-0:0:0:0-block-sr0.device
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 16:46:03 UTC; 1h 27min ago
      Until: Mon 2026-02-02 16:46:03 UTC; 1h 27min ago
     Device: /sys/devices/pci0000:00/0000:00:01.1/ata1/host0/target0:0:0/0:0:0:0/block/sr0

● dev-ceph_vg0-ceph_lv0.device - /dev/ceph_vg0/ceph_lv0
    Follows: unit currently follows state of sys-devices-virtual-block-dm\x2d0.device
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 17:19:33 UTC; 53min ago
      Until: Mon 2026-02-02 17:19:33 UTC; 53min ago
     Device: /sys/devices/virtual/block/dm-0

● dev-ceph_vg1-ceph_lv1.device - /dev/ceph_vg1/ceph_lv1
    Follows: unit currently follows state of sys-devices-virtual-block-dm\x2d1.device
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 17:19:37 UTC; 53min ago
      Until: Mon 2026-02-02 17:19:37 UTC; 53min ago
     Device: /sys/devices/virtual/block/dm-1

● dev-ceph_vg2-ceph_lv2.device - /dev/ceph_vg2/ceph_lv2
    Follows: unit currently follows state of sys-devices-virtual-block-dm\x2d2.device
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 17:19:41 UTC; 53min ago
      Until: Mon 2026-02-02 17:19:41 UTC; 53min ago
     Device: /sys/devices/virtual/block/dm-2

● dev-disk-by\x2ddiskseq-1.device - /dev/disk/by-diskseq/1
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:04.0-virtio2-block-vda.device
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 16:46:03 UTC; 1h 27min ago
      Until: Mon 2026-02-02 16:46:03 UTC; 1h 27min ago
     Device: /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda

● dev-disk-by\x2ddiskseq-11.device - /dev/disk/by-diskseq/11
    Follows: unit currently follows state of sys-devices-virtual-block-loop5.device
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 17:19:41 UTC; 53min ago
      Until: Mon 2026-02-02 17:19:41 UTC; 53min ago
     Device: /sys/devices/virtual/block/loop5

● dev-disk-by\x2ddiskseq-12.device - /dev/disk/by-diskseq/12
    Follows: unit currently follows state of sys-devices-virtual-block-dm\x2d2.device
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 17:19:41 UTC; 53min ago
      Until: Mon 2026-02-02 17:19:41 UTC; 53min ago
     Device: /sys/devices/virtual/block/dm-2

● dev-disk-by\x2ddiskseq-3.device - QEMU_DVD-ROM config-2
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:01.1-ata1-host0-target0:0:0-0:0:0:0-block-sr0.device
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 16:46:03 UTC; 1h 27min ago
      Until: Mon 2026-02-02 16:46:03 UTC; 1h 27min ago
     Device: /sys/devices/pci0000:00/0000:00:01.1/ata1/host0/target0:0:0/0:0:0:0/block/sr0

● dev-disk-by\x2ddiskseq-5.device - /dev/disk/by-diskseq/5
    Follows: unit currently follows state of sys-devices-virtual-block-loop3.device
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 17:19:32 UTC; 53min ago
      Until: Mon 2026-02-02 17:19:32 UTC; 53min ago
     Device: /sys/devices/virtual/block/loop3

● dev-disk-by\x2ddiskseq-6.device - /dev/disk/by-diskseq/6
    Follows: unit currently follows state of sys-devices-virtual-block-dm\x2d0.device
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 17:19:33 UTC; 53min ago
      Until: Mon 2026-02-02 17:19:33 UTC; 53min ago
     Device: /sys/devices/virtual/block/dm-0

● dev-disk-by\x2ddiskseq-8.device - /dev/disk/by-diskseq/8
    Follows: unit currently follows state of sys-devices-virtual-block-loop4.device
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 17:19:37 UTC; 53min ago
      Until: Mon 2026-02-02 17:19:37 UTC; 53min ago
     Device: /sys/devices/virtual/block/loop4

● dev-disk-by\x2ddiskseq-9.device - /dev/disk/by-diskseq/9
    Follows: unit currently follows state of sys-devices-virtual-block-dm\x2d1.device
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 17:19:37 UTC; 53min ago
      Until: Mon 2026-02-02 17:19:37 UTC; 53min ago
     Device: /sys/devices/virtual/block/dm-1

● dev-disk-by\x2did-ata\x2dQEMU_DVD\x2dROM_QM00001.device - QEMU_DVD-ROM config-2
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:01.1-ata1-host0-target0:0:0-0:0:0:0-block-sr0.device
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 16:46:03 UTC; 1h 27min ago
      Until: Mon 2026-02-02 16:46:03 UTC; 1h 27min ago
     Device: /sys/devices/pci0000:00/0000:00:01.1/ata1/host0/target0:0:0/0:0:0:0/block/sr0

● dev-disk-by\x2did-dm\x2dname\x2dceph_vg0\x2dceph_lv0.device - /dev/disk/by-id/dm-name-ceph_vg0-ceph_lv0
    Follows: unit currently follows state of sys-devices-virtual-block-dm\x2d0.device
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 17:19:33 UTC; 53min ago
      Until: Mon 2026-02-02 17:19:33 UTC; 53min ago
     Device: /sys/devices/virtual/block/dm-0

● dev-disk-by\x2did-dm\x2dname\x2dceph_vg1\x2dceph_lv1.device - /dev/disk/by-id/dm-name-ceph_vg1-ceph_lv1
    Follows: unit currently follows state of sys-devices-virtual-block-dm\x2d1.device
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 17:19:37 UTC; 53min ago
      Until: Mon 2026-02-02 17:19:37 UTC; 53min ago
     Device: /sys/devices/virtual/block/dm-1

● dev-disk-by\x2did-dm\x2dname\x2dceph_vg2\x2dceph_lv2.device - /dev/disk/by-id/dm-name-ceph_vg2-ceph_lv2
    Follows: unit currently follows state of sys-devices-virtual-block-dm\x2d2.device
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 17:19:41 UTC; 53min ago
      Until: Mon 2026-02-02 17:19:41 UTC; 53min ago
     Device: /sys/devices/virtual/block/dm-2

● dev-disk-by\x2did-dm\x2duuid\x2dLVM\x2ddc02eosFa8h5eCqUPgM8A8KXoUdpicDRCqaHsNLsykVn0NZzrwWpbmGaaoHPVA2s.device - /dev/disk/by-id/dm-uuid-LVM-dc02eosFa8h5eCqUPgM8A8KXoUdpicDRCqaHsNLsykVn0NZzrwWpbmGaaoHPVA2s
    Follows: unit currently follows state of sys-devices-virtual-block-dm\x2d1.device
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 17:19:37 UTC; 53min ago
      Until: Mon 2026-02-02 17:19:37 UTC; 53min ago
     Device: /sys/devices/virtual/block/dm-1

● dev-disk-by\x2did-dm\x2duuid\x2dLVM\x2dmjvXDCRncbCTYSocG1S0zijAERGksRlr5McfYlzwU7cLGOUofu9c3SlR35beE1HP.device - /dev/disk/by-id/dm-uuid-LVM-mjvXDCRncbCTYSocG1S0zijAERGksRlr5McfYlzwU7cLGOUofu9c3SlR35beE1HP
    Follows: unit currently follows state of sys-devices-virtual-block-dm\x2d2.device
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 17:19:41 UTC; 53min ago
      Until: Mon 2026-02-02 17:19:41 UTC; 53min ago
     Device: /sys/devices/virtual/block/dm-2

● dev-disk-by\x2did-dm\x2duuid\x2dLVM\x2dPO9klGbXguXpdNqRPbIhiJp0d0wctpgA0AKnlFkT48ijtsLrH2qOCQ1SQVtK47Q1.device - /dev/disk/by-id/dm-uuid-LVM-PO9klGbXguXpdNqRPbIhiJp0d0wctpgA0AKnlFkT48ijtsLrH2qOCQ1SQVtK47Q1
    Follows: unit currently follows state of sys-devices-virtual-block-dm\x2d0.device
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 17:19:33 UTC; 53min ago
      Until: Mon 2026-02-02 17:19:33 UTC; 53min ago
     Device: /sys/devices/virtual/block/dm-0

● dev-disk-by\x2did-lvm\x2dpv\x2duuid\x2d0bmreA\x2dARzX\x2dnYC0\x2d0MLv\x2d8DFq\x2ddffc\x2dSWpvnP.device - /dev/disk/by-id/lvm-pv-uuid-0bmreA-ARzX-nYC0-0MLv-8DFq-dffc-SWpvnP
    Follows: unit currently follows state of sys-devices-virtual-block-loop3.device
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 17:19:33 UTC; 53min ago
      Until: Mon 2026-02-02 17:19:33 UTC; 53min ago
     Device: /sys/devices/virtual/block/loop3

● dev-disk-by\x2did-lvm\x2dpv\x2duuid\x2dGgxepI\x2dktvG\x2ddoRu\x2dTC2g\x2dHOeK\x2deQej\x2dImIK1e.device - /dev/disk/by-id/lvm-pv-uuid-GgxepI-ktvG-doRu-TC2g-HOeK-eQej-ImIK1e
    Follows: unit currently follows state of sys-devices-virtual-block-loop5.device
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 17:19:41 UTC; 53min ago
      Until: Mon 2026-02-02 17:19:41 UTC; 53min ago
     Device: /sys/devices/virtual/block/loop5

● dev-disk-by\x2did-lvm\x2dpv\x2duuid\x2dObTS0X\x2dES7N\x2dYQQk\x2dNd6i\x2de2t1\x2dyUaC\x2duMwweh.device - /dev/disk/by-id/lvm-pv-uuid-ObTS0X-ES7N-YQQk-Nd6i-e2t1-yUaC-uMwweh
    Follows: unit currently follows state of sys-devices-virtual-block-loop4.device
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 17:19:37 UTC; 53min ago
      Until: Mon 2026-02-02 17:19:37 UTC; 53min ago
     Device: /sys/devices/virtual/block/loop4

● dev-disk-by\x2dlabel-config\x2d2.device - QEMU_DVD-ROM config-2
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:01.1-ata1-host0-target0:0:0-0:0:0:0-block-sr0.device
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 16:46:03 UTC; 1h 27min ago
      Until: Mon 2026-02-02 16:46:03 UTC; 1h 27min ago
     Device: /sys/devices/pci0000:00/0000:00:01.1/ata1/host0/target0:0:0/0:0:0:0/block/sr0

● dev-disk-by\x2dpartuuid-714daac1\x2d01.device - /dev/disk/by-partuuid/714daac1-01
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:04.0-virtio2-block-vda-vda1.device
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 16:46:03 UTC; 1h 27min ago
      Until: Mon 2026-02-02 16:46:03 UTC; 1h 27min ago
     Device: /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda/vda1

● dev-disk-by\x2dpath-pci\x2d0000:00:01.1\x2data\x2d1.0.device - QEMU_DVD-ROM config-2
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:01.1-ata1-host0-target0:0:0-0:0:0:0-block-sr0.device
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 16:46:03 UTC; 1h 27min ago
      Until: Mon 2026-02-02 16:46:03 UTC; 1h 27min ago
     Device: /sys/devices/pci0000:00/0000:00:01.1/ata1/host0/target0:0:0/0:0:0:0/block/sr0

● dev-disk-by\x2dpath-pci\x2d0000:00:01.1\x2data\x2d1.device - QEMU_DVD-ROM config-2
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:01.1-ata1-host0-target0:0:0-0:0:0:0-block-sr0.device
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 16:46:03 UTC; 1h 27min ago
      Until: Mon 2026-02-02 16:46:03 UTC; 1h 27min ago
     Device: /sys/devices/pci0000:00/0000:00:01.1/ata1/host0/target0:0:0/0:0:0:0/block/sr0

● dev-disk-by\x2dpath-pci\x2d0000:00:04.0.device - /dev/disk/by-path/pci-0000:00:04.0
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:04.0-virtio2-block-vda.device
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 16:46:03 UTC; 1h 27min ago
      Until: Mon 2026-02-02 16:46:03 UTC; 1h 27min ago
     Device: /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda

● dev-disk-by\x2dpath-pci\x2d0000:00:04.0\x2dpart1.device - /dev/disk/by-path/pci-0000:00:04.0-part1
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:04.0-virtio2-block-vda-vda1.device
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 16:46:03 UTC; 1h 27min ago
      Until: Mon 2026-02-02 16:46:03 UTC; 1h 27min ago
     Device: /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda/vda1

● dev-disk-by\x2dpath-virtio\x2dpci\x2d0000:00:04.0.device - /dev/disk/by-path/virtio-pci-0000:00:04.0
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:04.0-virtio2-block-vda.device
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 16:46:03 UTC; 1h 27min ago
      Until: Mon 2026-02-02 16:46:03 UTC; 1h 27min ago
     Device: /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda

● dev-disk-by\x2dpath-virtio\x2dpci\x2d0000:00:04.0\x2dpart1.device - /dev/disk/by-path/virtio-pci-0000:00:04.0-part1
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:04.0-virtio2-block-vda-vda1.device
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 16:46:03 UTC; 1h 27min ago
      Until: Mon 2026-02-02 16:46:03 UTC; 1h 27min ago
     Device: /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda/vda1

● dev-disk-by\x2duuid-2026\x2d02\x2d02\x2d16\x2d45\x2d45\x2d00.device - QEMU_DVD-ROM config-2
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:01.1-ata1-host0-target0:0:0-0:0:0:0-block-sr0.device
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 16:46:03 UTC; 1h 27min ago
      Until: Mon 2026-02-02 16:46:03 UTC; 1h 27min ago
     Device: /sys/devices/pci0000:00/0000:00:01.1/ata1/host0/target0:0:0/0:0:0:0/block/sr0

● dev-disk-by\x2duuid-822f14ea\x2d6e7e\x2d41df\x2db0d8\x2dfbe282d9ded8.device - /dev/disk/by-uuid/822f14ea-6e7e-41df-b0d8-fbe282d9ded8
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:04.0-virtio2-block-vda-vda1.device
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 16:46:03 UTC; 1h 27min ago
      Until: Mon 2026-02-02 16:46:03 UTC; 1h 27min ago
     Device: /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda/vda1

Feb 02 16:46:00 localhost systemd[1]: Found device /dev/disk/by-uuid/822f14ea-6e7e-41df-b0d8-fbe282d9ded8.

● dev-dm\x2d0.device - /dev/dm-0
    Follows: unit currently follows state of sys-devices-virtual-block-dm\x2d0.device
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 17:19:33 UTC; 53min ago
      Until: Mon 2026-02-02 17:19:33 UTC; 53min ago
     Device: /sys/devices/virtual/block/dm-0

● dev-dm\x2d1.device - /dev/dm-1
    Follows: unit currently follows state of sys-devices-virtual-block-dm\x2d1.device
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 17:19:37 UTC; 53min ago
      Until: Mon 2026-02-02 17:19:37 UTC; 53min ago
     Device: /sys/devices/virtual/block/dm-1

● dev-dm\x2d2.device - /dev/dm-2
    Follows: unit currently follows state of sys-devices-virtual-block-dm\x2d2.device
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 17:19:41 UTC; 53min ago
      Until: Mon 2026-02-02 17:19:41 UTC; 53min ago
     Device: /sys/devices/virtual/block/dm-2

● dev-loop3.device - /dev/loop3
    Follows: unit currently follows state of sys-devices-virtual-block-loop3.device
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 17:19:32 UTC; 53min ago
      Until: Mon 2026-02-02 17:19:32 UTC; 53min ago
     Device: /sys/devices/virtual/block/loop3

● dev-loop4.device - /dev/loop4
    Follows: unit currently follows state of sys-devices-virtual-block-loop4.device
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 17:19:37 UTC; 53min ago
      Until: Mon 2026-02-02 17:19:37 UTC; 53min ago
     Device: /sys/devices/virtual/block/loop4

● dev-loop5.device - /dev/loop5
    Follows: unit currently follows state of sys-devices-virtual-block-loop5.device
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 17:19:41 UTC; 53min ago
      Until: Mon 2026-02-02 17:19:41 UTC; 53min ago
     Device: /sys/devices/virtual/block/loop5

● dev-mapper-ceph_vg0\x2dceph_lv0.device - /dev/mapper/ceph_vg0-ceph_lv0
    Follows: unit currently follows state of sys-devices-virtual-block-dm\x2d0.device
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 17:19:33 UTC; 53min ago
      Until: Mon 2026-02-02 17:19:33 UTC; 53min ago
     Device: /sys/devices/virtual/block/dm-0

● dev-mapper-ceph_vg1\x2dceph_lv1.device - /dev/mapper/ceph_vg1-ceph_lv1
    Follows: unit currently follows state of sys-devices-virtual-block-dm\x2d1.device
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 17:19:37 UTC; 53min ago
      Until: Mon 2026-02-02 17:19:37 UTC; 53min ago
     Device: /sys/devices/virtual/block/dm-1

● dev-mapper-ceph_vg2\x2dceph_lv2.device - /dev/mapper/ceph_vg2-ceph_lv2
    Follows: unit currently follows state of sys-devices-virtual-block-dm\x2d2.device
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 17:19:41 UTC; 53min ago
      Until: Mon 2026-02-02 17:19:41 UTC; 53min ago
     Device: /sys/devices/virtual/block/dm-2

● dev-rfkill.device - /dev/rfkill
    Follows: unit currently follows state of sys-devices-virtual-misc-rfkill.device
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 16:46:05 UTC; 1h 27min ago
      Until: Mon 2026-02-02 16:46:05 UTC; 1h 27min ago
     Device: /sys/devices/virtual/misc/rfkill

● dev-sr0.device - QEMU_DVD-ROM config-2
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:01.1-ata1-host0-target0:0:0-0:0:0:0-block-sr0.device
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 16:46:03 UTC; 1h 27min ago
      Until: Mon 2026-02-02 16:46:03 UTC; 1h 27min ago
     Device: /sys/devices/pci0000:00/0000:00:01.1/ata1/host0/target0:0:0/0:0:0:0/block/sr0

● dev-ttyS0.device - /dev/ttyS0
    Follows: unit currently follows state of sys-devices-pnp0-00:00-tty-ttyS0.device
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 16:46:03 UTC; 1h 27min ago
      Until: Mon 2026-02-02 16:46:03 UTC; 1h 27min ago
     Device: /sys/devices/pnp0/00:00/tty/ttyS0

Feb 02 16:46:03 localhost systemd[1]: Condition check resulted in /dev/ttyS0 being skipped.

● dev-ttyS1.device - /dev/ttyS1
    Follows: unit currently follows state of sys-devices-platform-serial8250-tty-ttyS1.device
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 16:46:03 UTC; 1h 27min ago
      Until: Mon 2026-02-02 16:46:03 UTC; 1h 27min ago
     Device: /sys/devices/platform/serial8250/tty/ttyS1

● dev-ttyS2.device - /dev/ttyS2
    Follows: unit currently follows state of sys-devices-platform-serial8250-tty-ttyS2.device
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 16:46:03 UTC; 1h 27min ago
      Until: Mon 2026-02-02 16:46:03 UTC; 1h 27min ago
     Device: /sys/devices/platform/serial8250/tty/ttyS2

● dev-ttyS3.device - /dev/ttyS3
    Follows: unit currently follows state of sys-devices-platform-serial8250-tty-ttyS3.device
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 16:46:03 UTC; 1h 27min ago
      Until: Mon 2026-02-02 16:46:03 UTC; 1h 27min ago
     Device: /sys/devices/platform/serial8250/tty/ttyS3

● dev-vda.device - /dev/vda
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:04.0-virtio2-block-vda.device
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 16:46:03 UTC; 1h 27min ago
      Until: Mon 2026-02-02 16:46:03 UTC; 1h 27min ago
     Device: /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda

● dev-vda1.device - /dev/vda1
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:04.0-virtio2-block-vda-vda1.device
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 16:46:03 UTC; 1h 27min ago
      Until: Mon 2026-02-02 16:46:03 UTC; 1h 27min ago
     Device: /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda/vda1

● sys-devices-pci0000:00-0000:00:01.1-ata1-host0-target0:0:0-0:0:0:0-block-sr0.device - QEMU_DVD-ROM config-2
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 16:46:03 UTC; 1h 27min ago
      Until: Mon 2026-02-02 16:46:03 UTC; 1h 27min ago
     Device: /sys/devices/pci0000:00/0000:00:01.1/ata1/host0/target0:0:0/0:0:0:0/block/sr0

● sys-devices-pci0000:00-0000:00:03.0-virtio1-net-eth0.device - Virtio network device
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 16:46:03 UTC; 1h 27min ago
      Until: Mon 2026-02-02 16:46:03 UTC; 1h 27min ago
     Device: /sys/devices/pci0000:00/0000:00:03.0/virtio1/net/eth0

● sys-devices-pci0000:00-0000:00:04.0-virtio2-block-vda-vda1.device - /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda/vda1
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 16:46:03 UTC; 1h 27min ago
      Until: Mon 2026-02-02 16:46:03 UTC; 1h 27min ago
     Device: /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda/vda1

● sys-devices-pci0000:00-0000:00:04.0-virtio2-block-vda.device - /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 16:46:03 UTC; 1h 27min ago
      Until: Mon 2026-02-02 16:46:03 UTC; 1h 27min ago
     Device: /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda

● sys-devices-pci0000:00-0000:00:07.0-virtio5-net-eth1.device - Virtio network device
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 16:48:01 UTC; 1h 25min ago
      Until: Mon 2026-02-02 16:48:01 UTC; 1h 25min ago
     Device: /sys/devices/pci0000:00/0000:00:07.0/virtio5/net/eth1

● sys-devices-platform-serial8250-tty-ttyS1.device - /sys/devices/platform/serial8250/tty/ttyS1
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 16:46:03 UTC; 1h 27min ago
      Until: Mon 2026-02-02 16:46:03 UTC; 1h 27min ago
     Device: /sys/devices/platform/serial8250/tty/ttyS1

● sys-devices-platform-serial8250-tty-ttyS2.device - /sys/devices/platform/serial8250/tty/ttyS2
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 16:46:03 UTC; 1h 27min ago
      Until: Mon 2026-02-02 16:46:03 UTC; 1h 27min ago
     Device: /sys/devices/platform/serial8250/tty/ttyS2

● sys-devices-platform-serial8250-tty-ttyS3.device - /sys/devices/platform/serial8250/tty/ttyS3
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 16:46:03 UTC; 1h 27min ago
      Until: Mon 2026-02-02 16:46:03 UTC; 1h 27min ago
     Device: /sys/devices/platform/serial8250/tty/ttyS3

● sys-devices-pnp0-00:00-tty-ttyS0.device - /sys/devices/pnp0/00:00/tty/ttyS0
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 16:46:03 UTC; 1h 27min ago
      Until: Mon 2026-02-02 16:46:03 UTC; 1h 27min ago
     Device: /sys/devices/pnp0/00:00/tty/ttyS0

● sys-devices-virtual-block-dm\x2d0.device - /sys/devices/virtual/block/dm-0
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 17:19:33 UTC; 53min ago
      Until: Mon 2026-02-02 17:19:33 UTC; 53min ago
     Device: /sys/devices/virtual/block/dm-0

● sys-devices-virtual-block-dm\x2d1.device - /sys/devices/virtual/block/dm-1
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 17:19:37 UTC; 53min ago
      Until: Mon 2026-02-02 17:19:37 UTC; 53min ago
     Device: /sys/devices/virtual/block/dm-1

● sys-devices-virtual-block-dm\x2d2.device - /sys/devices/virtual/block/dm-2
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 17:19:41 UTC; 53min ago
      Until: Mon 2026-02-02 17:19:41 UTC; 53min ago
     Device: /sys/devices/virtual/block/dm-2

● sys-devices-virtual-block-loop3.device - /sys/devices/virtual/block/loop3
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 17:19:32 UTC; 53min ago
      Until: Mon 2026-02-02 17:19:32 UTC; 53min ago
     Device: /sys/devices/virtual/block/loop3

● sys-devices-virtual-block-loop4.device - /sys/devices/virtual/block/loop4
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 17:19:37 UTC; 53min ago
      Until: Mon 2026-02-02 17:19:37 UTC; 53min ago
     Device: /sys/devices/virtual/block/loop4

● sys-devices-virtual-block-loop5.device - /sys/devices/virtual/block/loop5
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 17:19:41 UTC; 53min ago
      Until: Mon 2026-02-02 17:19:41 UTC; 53min ago
     Device: /sys/devices/virtual/block/loop5

● sys-devices-virtual-misc-rfkill.device - /sys/devices/virtual/misc/rfkill
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 16:46:05 UTC; 1h 27min ago
      Until: Mon 2026-02-02 16:46:05 UTC; 1h 27min ago
     Device: /sys/devices/virtual/misc/rfkill

● sys-devices-virtual-net-br\x2dex.device - /sys/devices/virtual/net/br-ex
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 17:16:52 UTC; 56min ago
      Until: Mon 2026-02-02 17:16:52 UTC; 56min ago
     Device: /sys/devices/virtual/net/br-ex

● sys-devices-virtual-net-br\x2dint.device - /sys/devices/virtual/net/br-int
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 17:29:47 UTC; 43min ago
      Until: Mon 2026-02-02 17:29:47 UTC; 43min ago
     Device: /sys/devices/virtual/net/br-int

● sys-devices-virtual-net-genev_sys_6081.device - /sys/devices/virtual/net/genev_sys_6081
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 17:29:47 UTC; 43min ago
      Until: Mon 2026-02-02 17:29:47 UTC; 43min ago
     Device: /sys/devices/virtual/net/genev_sys_6081

● sys-devices-virtual-net-ovs\x2dsystem.device - /sys/devices/virtual/net/ovs-system
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 17:16:52 UTC; 56min ago
      Until: Mon 2026-02-02 17:16:52 UTC; 56min ago
     Device: /sys/devices/virtual/net/ovs-system

● sys-devices-virtual-net-vlan20.device - /sys/devices/virtual/net/vlan20
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 17:16:52 UTC; 56min ago
      Until: Mon 2026-02-02 17:16:52 UTC; 56min ago
     Device: /sys/devices/virtual/net/vlan20

● sys-devices-virtual-net-vlan21.device - /sys/devices/virtual/net/vlan21
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 17:16:52 UTC; 56min ago
      Until: Mon 2026-02-02 17:16:52 UTC; 56min ago
     Device: /sys/devices/virtual/net/vlan21

● sys-devices-virtual-net-vlan22.device - /sys/devices/virtual/net/vlan22
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 17:16:52 UTC; 56min ago
      Until: Mon 2026-02-02 17:16:52 UTC; 56min ago
     Device: /sys/devices/virtual/net/vlan22

● sys-devices-virtual-net-vlan23.device - /sys/devices/virtual/net/vlan23
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 17:16:52 UTC; 56min ago
      Until: Mon 2026-02-02 17:16:52 UTC; 56min ago
     Device: /sys/devices/virtual/net/vlan23

● sys-module-configfs.device - /sys/module/configfs
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 16:46:03 UTC; 1h 27min ago
      Until: Mon 2026-02-02 16:46:03 UTC; 1h 27min ago
     Device: /sys/module/configfs

● sys-module-fuse.device - /sys/module/fuse
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 16:46:03 UTC; 1h 27min ago
      Until: Mon 2026-02-02 16:46:03 UTC; 1h 27min ago
     Device: /sys/module/fuse

● sys-subsystem-net-devices-br\x2dex.device - /sys/subsystem/net/devices/br-ex
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 17:16:52 UTC; 56min ago
      Until: Mon 2026-02-02 17:16:52 UTC; 56min ago
     Device: /sys/devices/virtual/net/br-ex

● sys-subsystem-net-devices-br\x2dint.device - /sys/subsystem/net/devices/br-int
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 17:29:47 UTC; 43min ago
      Until: Mon 2026-02-02 17:29:47 UTC; 43min ago
     Device: /sys/devices/virtual/net/br-int

● sys-subsystem-net-devices-eth0.device - Virtio network device
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 16:46:03 UTC; 1h 27min ago
      Until: Mon 2026-02-02 16:46:03 UTC; 1h 27min ago
     Device: /sys/devices/pci0000:00/0000:00:03.0/virtio1/net/eth0

● sys-subsystem-net-devices-eth1.device - Virtio network device
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 16:48:01 UTC; 1h 25min ago
      Until: Mon 2026-02-02 16:48:01 UTC; 1h 25min ago
     Device: /sys/devices/pci0000:00/0000:00:07.0/virtio5/net/eth1

● sys-subsystem-net-devices-genev_sys_6081.device - /sys/subsystem/net/devices/genev_sys_6081
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 17:29:47 UTC; 43min ago
      Until: Mon 2026-02-02 17:29:47 UTC; 43min ago
     Device: /sys/devices/virtual/net/genev_sys_6081

● sys-subsystem-net-devices-ovs\x2dsystem.device - /sys/subsystem/net/devices/ovs-system
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 17:16:52 UTC; 56min ago
      Until: Mon 2026-02-02 17:16:52 UTC; 56min ago
     Device: /sys/devices/virtual/net/ovs-system

● sys-subsystem-net-devices-vlUnit boot.mount could not be found.
Unit home.mount could not be found.
an20.device - /sys/subsystem/net/devices/vlan20
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 17:16:52 UTC; 56min ago
      Until: Mon 2026-02-02 17:16:52 UTC; 56min ago
     Device: /sys/devices/virtual/net/vlan20

● sys-subsystem-net-devices-vlan21.device - /sys/subsystem/net/devices/vlan21
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 17:16:52 UTC; 56min ago
      Until: Mon 2026-02-02 17:16:52 UTC; 56min ago
     Device: /sys/devices/virtual/net/vlan21

● sys-subsystem-net-devices-vlan22.device - /sys/subsystem/net/devices/vlan22
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 17:16:52 UTC; 56min ago
      Until: Mon 2026-02-02 17:16:52 UTC; 56min ago
     Device: /sys/devices/virtual/net/vlan22

● sys-subsystem-net-devices-vlan23.device - /sys/subsystem/net/devices/vlan23
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 17:16:52 UTC; 56min ago
      Until: Mon 2026-02-02 17:16:52 UTC; 56min ago
     Device: /sys/devices/virtual/net/vlan23

● -.mount - Root Mount
     Loaded: loaded (/etc/fstab; generated)
     Active: active (mounted)
      Where: /
       What: /dev/vda1
       Docs: man:fstab(5)
             man:systemd-fstab-generator(8)

● dev-hugepages.mount - Huge Pages File System
     Loaded: loaded (/usr/lib/systemd/system/dev-hugepages.mount; static)
     Active: active (mounted) since Mon 2026-02-02 16:46:02 UTC; 1h 27min ago
      Until: Mon 2026-02-02 16:46:02 UTC; 1h 27min ago
      Where: /dev/hugepages
       What: hugetlbfs
       Docs: https://docs.kernel.org/admin-guide/mm/hugetlbpage.html
             https://www.freedesktop.org/wiki/Software/systemd/APIFileSystems
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48560)
     Memory: 52.0K (peak: 556.0K)
        CPU: 7ms
     CGroup: /dev-hugepages.mount

Feb 02 16:46:02 localhost systemd[1]: Mounted Huge Pages File System.

● dev-hugepages1G.mount - /dev/hugepages1G
     Loaded: loaded (/etc/fstab; generated)
     Active: active (mounted) since Mon 2026-02-02 17:18:41 UTC; 54min ago
      Until: Mon 2026-02-02 17:18:41 UTC; 54min ago
      Where: /dev/hugepages1G
       What: none
       Docs: man:fstab(5)
             man:systemd-fstab-generator(8)

● dev-hugepages2M.mount - /dev/hugepages2M
     Loaded: loaded (/etc/fstab; generated)
     Active: active (mounted) since Mon 2026-02-02 17:18:41 UTC; 54min ago
      Until: Mon 2026-02-02 17:18:41 UTC; 54min ago
      Where: /dev/hugepages2M
       What: none
       Docs: man:fstab(5)
             man:systemd-fstab-generator(8)

● dev-mqueue.mount - POSIX Message Queue File System
     Loaded: loaded (/usr/lib/systemd/system/dev-mqueue.mount; static)
     Active: active (mounted) since Mon 2026-02-02 16:46:02 UTC; 1h 27min ago
      Until: Mon 2026-02-02 16:46:02 UTC; 1h 27min ago
      Where: /dev/mqueue
       What: mqueue
       Docs: man:mq_overview(7)
             https://www.freedesktop.org/wiki/Software/systemd/APIFileSystems
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48560)
     Memory: 4.0K (peak: 540.0K)
        CPU: 4ms
     CGroup: /dev-mqueue.mount

Feb 02 16:46:02 localhost systemd[1]: Mounted POSIX Message Queue File System.

○ proc-fs-nfsd.mount - NFSD configuration filesystem
     Loaded: loaded (/usr/lib/systemd/system/proc-fs-nfsd.mount; static)
     Active: inactive (dead)
      Where: /proc/fs/nfsd
       What: nfsd

● proc-sys-fs-binfmt_misc.mount - Arbitrary Executable File Formats File System
     Loaded: loaded (/usr/lib/systemd/system/proc-sys-fs-binfmt_misc.mount; disabled; preset: disabled)
     Active: active (mounted) since Mon 2026-02-02 17:20:50 UTC; 52min ago
      Until: Mon 2026-02-02 17:20:50 UTC; 52min ago
TriggeredBy: ● proc-sys-fs-binfmt_misc.automount
      Where: /proc/sys/fs/binfmt_misc
       What: binfmt_misc
       Docs: https://docs.kernel.org/admin-guide/binfmt-misc.html
             https://www.freedesktop.org/wiki/Software/systemd/APIFileSystems
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48560)
     Memory: 8.0K (peak: 540.0K)
        CPU: 5ms
     CGroup: /proc-sys-fs-binfmt_misc.mount

Feb 02 17:20:50 compute-0 systemd[1]: Mounting Arbitrary Executable File Formats File System...
Feb 02 17:20:50 compute-0 systemd[1]: Mounted Arbitrary Executable File Formats File System.

● run-credentials-systemd\x2dsysctl.service.mount - /run/credentials/systemd-sysctl.service
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Mon 2026-02-02 17:15:52 UTC; 57min ago
      Until: Mon 2026-02-02 17:15:52 UTC; 57min ago
      Where: /run/credentials/systemd-sysctl.service
       What: none

● run-credentials-systemd\x2dsysusers.service.mount - /run/credentials/systemd-sysusers.service
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Mon 2026-02-02 16:46:02 UTC; 1h 27min ago
      Until: Mon 2026-02-02 16:46:02 UTC; 1h 27min ago
      Where: /run/credentials/systemd-sysusers.service
       What: none

● run-credentials-systemd\x2dtmpfiles\x2dsetup.service.mount - /run/credentials/systemd-tmpfiles-setup.service
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Mon 2026-02-02 16:46:03 UTC; 1h 27min ago
      Until: Mon 2026-02-02 16:46:03 UTC; 1h 27min ago
      Where: /run/credentials/systemd-tmpfiles-setup.service
       What: none

● run-credentials-systemd\x2dtmpfiles\x2dsetup\x2ddev.service.mount - /run/credentials/systemd-tmpfiles-setup-dev.service
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Mon 2026-02-02 16:46:02 UTC; 1h 27min ago
      Until: Mon 2026-02-02 16:46:02 UTC; 1h 27min ago
      Where: /run/credentials/systemd-tmpfiles-setup-dev.service
       What: none

● run-netns.mount - /run/netns
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Mon 2026-02-02 17:18:15 UTC; 55min ago
      Until: Mon 2026-02-02 17:18:15 UTC; 55min ago
      Where: /run/netns
       What: tmpfs

● run-user-1000.mount - /run/user/1000
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Mon 2026-02-02 16:46:28 UTC; 1h 26min ago
      Until: Mon 2026-02-02 16:46:28 UTC; 1h 26min ago
      Where: /run/user/1000
       What: tmpfs

● run-user-42477.mount - /run/user/42477
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Mon 2026-02-02 17:20:42 UTC; 52min ago
      Until: Mon 2026-02-02 17:20:42 UTC; 52min ago
      Where: /run/user/42477
       What: tmpfs

● sys-fs-fuse-connections.mount - FUSE Control File System
     Loaded: loaded (/usr/lib/systemd/system/sys-fs-fuse-connections.mount; static)
     Active: active (mounted) since Mon 2026-02-02 16:46:02 UTC; 1h 27min ago
      Until: Mon 2026-02-02 16:46:02 UTC; 1h 27min ago
      Where: /sys/fs/fuse/connections
       What: fusectl
       Docs: https://docs.kernel.org/filesystems/fuse.html
             https://www.freedesktop.org/wiki/Software/systemd/APIFileSystems
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48560)
     Memory: 4.0K (peak: 556.0K)
        CPU: 3ms
     CGroup: /sys-fs-fuse-connections.mount

Feb 02 16:46:02 localhost systemd[1]: Mounting FUSE Control File System...
Feb 02 16:46:02 localhost systemd[1]: Mounted FUSE Control File System.

● sys-kernel-config.mount - Kernel Configuration File System
     Loaded: loaded (/proc/self/mountinfo; static)
     Active: active (mounted) since Mon 2026-02-02 16:46:02 UTC; 1h 27min ago
      Until: Mon 2026-02-02 16:46:02 UTC; 1h 27min ago
      Where: /sys/kernel/config
       What: configfs
       Docs: https://docs.kernel.org/filesystems/configfs.html
             https://www.freedesktop.org/wiki/Software/systemd/APIFileSystems

● sys-kernel-debug-tracing.mount - /sys/kernel/debug/tracing
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Mon 2026-02-02 18:03:08 UTC; 10min ago
      Until: Mon 2026-02-02 18:03:08 UTC; 10min ago
      Where: /sys/kernel/debug/tracing
       What: tracefs

● sys-kernel-debug.mount - Kernel Debug File System
     Loaded: loaded (/usr/lib/systemdUnit sysroot.mount could not be found.
/system/sys-kernel-debug.mount; static)
     Active: active (mounted) since Mon 2026-02-02 16:46:02 UTC; 1h 27min ago
      Until: Mon 2026-02-02 16:46:02 UTC; 1h 27min ago
      Where: /sys/kernel/debug
       What: debugfs
       Docs: https://docs.kernel.org/filesystems/debugfs.html
             https://www.freedesktop.org/wiki/Software/systemd/APIFileSystems
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48560)
     Memory: 4.0K (peak: 540.0K)
        CPU: 3ms
     CGroup: /sys-kernel-debug.mount

Feb 02 16:46:02 localhost systemd[1]: Mounted Kernel Debug File System.

● sys-kernel-tracing.mount - Kernel Trace File System
     Loaded: loaded (/usr/lib/systemd/system/sys-kernel-tracing.mount; static)
     Active: active (mounted) since Mon 2026-02-02 16:46:02 UTC; 1h 27min ago
      Until: Mon 2026-02-02 16:46:02 UTC; 1h 27min ago
      Where: /sys/kernel/tracing
       What: tracefs
       Docs: https://docs.kernel.org/trace/ftrace.html
             https://www.freedesktop.org/wiki/Software/systemd/APIFileSystems
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48560)
     Memory: 4.0K (peak: 540.0K)
        CPU: 4ms
     CGroup: /sys-kernel-tracing.mount

Feb 02 16:46:02 localhost systemd[1]: Mounted Kernel Trace File System.

○ tmp.mount - Temporary Directory /tmp
     Loaded: loaded (/usr/lib/systemd/system/tmp.mount; disabled; preset: disabled)
     Active: inactive (dead)
      Where: /tmp
       What: tmpfs
       Docs: https://systemd.io/TEMPORARY_DIRECTORIES
             man:file-hierarchy(7)
             https://www.freedesktop.org/wiki/Software/systemd/APIFileSystems

● var-lib-containers-storage-overlay-12b201a78fc33ffd5ecf252499217645a7a52199eb93133891737b3f11b2b3c1-merged.mount - /var/lib/containers/storage/overlay/12b201a78fc33ffd5ecf252499217645a7a52199eb93133891737b3f11b2b3c1/merged
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Mon 2026-02-02 17:37:21 UTC; 36min ago
      Until: Mon 2026-02-02 17:37:21 UTC; 36min ago
      Where: /var/lib/containers/storage/overlay/12b201a78fc33ffd5ecf252499217645a7a52199eb93133891737b3f11b2b3c1/merged
       What: overlay

● var-lib-containers-storage-overlay-2987def45149fc5249cbc723bda35e766362949640d794514681c630a9cc739a-merged.mount - /var/lib/containers/storage/overlay/2987def45149fc5249cbc723bda35e766362949640d794514681c630a9cc739a/merged
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Mon 2026-02-02 17:21:22 UTC; 52min ago
      Until: Mon 2026-02-02 17:21:22 UTC; 52min ago
      Where: /var/lib/containers/storage/overlay/2987def45149fc5249cbc723bda35e766362949640d794514681c630a9cc739a/merged
       What: overlay

● var-lib-containers-storage-overlay-342c35df542ed96dbd206211c1cb8b1fbfcf5f53e46a9d2541015e043cad3895-merged.mount - /var/lib/containers/storage/overlay/342c35df542ed96dbd206211c1cb8b1fbfcf5f53e46a9d2541015e043cad3895/merged
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Mon 2026-02-02 17:30:44 UTC; 42min ago
      Until: Mon 2026-02-02 17:30:44 UTC; 42min ago
      Where: /var/lib/containers/storage/overlay/342c35df542ed96dbd206211c1cb8b1fbfcf5f53e46a9d2541015e043cad3895/merged
       What: overlay

● var-lib-containers-storage-overlay-3a407352bffbd8c32c0c5fae65a069ae35ec5a4538ab19f169eac7fb4d31648b-merged.mount - /var/lib/containers/storage/overlay/3a407352bffbd8c32c0c5fae65a069ae35ec5a4538ab19f169eac7fb4d31648b/merged
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Mon 2026-02-02 17:22:03 UTC; 51min ago
      Until: Mon 2026-02-02 17:22:03 UTC; 51min ago
      Where: /var/lib/containers/storage/overlay/3a407352bffbd8c32c0c5fae65a069ae35ec5a4538ab19f169eac7fb4d31648b/merged
       What: overlay

● var-lib-containers-storage-overlay-4bf6daa03ef21358e51594cc97c548d0cd288d82b5051d7c14b4f297bf74e97f-merged.mount - /var/lib/containers/storage/overlay/4bf6daa03ef21358e51594cc97c548d0cd288d82b5051d7c14b4f297bf74e97f/merged
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Mon 2026-02-02 17:20:55 UTC; 52min ago
      Until: Mon 2026-02-02 17:20:55 UTC; 52min ago
      Where: /var/lib/containers/storage/overlay/4bf6daa03ef21358e51594cc97c548d0cd288d82b5051d7c14b4f297bf74e97f/merged
       What: overlay

● var-lib-containers-storage-overlay-5898685d04b362b3615be44f4ef9ba6201398fd991d72fa87b426e4a238337a9-merged.mount - /var/lib/containers/storage/overlay/5898685d04b362b3615be44f4ef9ba6201398fd991d72fa87b426e4a238337a9/merged
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Mon 2026-02-02 17:21:19 UTC; 52min ago
      Until: Mon 2026-02-02 17:21:19 UTC; 52min ago
      Where: /var/lib/containers/storage/overlay/5898685d04b362b3615be44f4ef9ba6201398fd991d72fa87b426e4a238337a9/merged
       What: overlay

● var-lib-containers-storage-overlay-712b4fe75eacb05bd462801e6213002997eeb8625e3509a97a7e905c4d528a8b-merged.mount - /var/lib/containers/storage/overlay/712b4fe75eacb05bd462801e6213002997eeb8625e3509a97a7e905c4d528a8b/merged
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Mon 2026-02-02 17:20:21 UTC; 53min ago
      Until: Mon 2026-02-02 17:20:21 UTC; 53min ago
      Where: /var/lib/containers/storage/overlay/712b4fe75eacb05bd462801e6213002997eeb8625e3509a97a7e905c4d528a8b/merged
       What: overlay

● var-lib-containers-storage-overlay-84ee672d236bb08fe51c40da1b138fce5f3b3744b6b9cf85b010fa26ae16304b-merged.mount - /var/lib/containers/storage/overlay/84ee672d236bb08fe51c40da1b138fce5f3b3744b6b9cf85b010fa26ae16304b/merged
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Mon 2026-02-02 17:21:15 UTC; 52min ago
      Until: Mon 2026-02-02 17:21:15 UTC; 52min ago
      Where: /var/lib/containers/storage/overlay/84ee672d236bb08fe51c40da1b138fce5f3b3744b6b9cf85b010fa26ae16304b/merged
       What: overlay

● var-lib-containers-storage-overlay-acc235568d5ff1570e05cad10c8716d6f325a02e28f068a1c35e5f616b2837a2-merged.mount - /var/lib/containers/storage/overlay/acc235568d5ff1570e05cad10c8716d6f325a02e28f068a1c35e5f616b2837a2/merged
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Mon 2026-02-02 17:22:01 UTC; 51min ago
      Until: Mon 2026-02-02 17:22:01 UTC; 51min ago
      Where: /var/lib/containers/storage/overlay/acc235568d5ff1570e05cad10c8716d6f325a02e28f068a1c35e5f616b2837a2/merged
       What: overlay

● var-lib-containers-storage-overlay-d5dc665d5b607e11e5261cb0bf495273dbf4c1b133126a1378805e20594ccaad-merged.mount - /var/lib/containers/storage/overlay/d5dc665d5b607e11e5261cb0bf495273dbf4c1b133126a1378805e20594ccaad/merged
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Mon 2026-02-02 17:20:18 UTC; 53min ago
      Until: Mon 2026-02-02 17:20:18 UTC; 53min ago
      Where: /var/lib/containers/storage/overlay/d5dc665d5b607e11e5261cb0bf495273dbf4c1b133126a1378805e20594ccaad/merged
       What: overlay

● var-lib-containers-storage-overlay-dc4de35ad4e032854b890938b202cbce85adbe4d8ddcb341dbb469a9938ddbc5-merged.mount - /var/lib/containers/storage/overlay/dc4de35ad4e032854b890938b202cbce85adbe4d8ddcb341dbb469a9938ddbc5/merged
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Mon 2026-02-02 17:29:46 UTC; 43min ago
      Until: Mon 2026-02-02 17:29:46 UTC; 43min ago
      Where: /var/lib/containers/storage/overlay/dc4de35ad4e032854b890938b202cbce85adbe4d8ddcb341dbb469a9938ddbc5/merged
       What: overlay

● var-lib-containers-storage-overlay.mount - /var/lib/containers/storage/overlay
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Mon 2026-02-02 17:20:18 UTC; 53min ago
      Until: Mon 2026-02-02 17:20:18 UTC; 53min ago
      Where: /var/lib/containers/storage/overlay
       What: /dev/vda1

● var-lib-containers-storage-overlay\x2dcontainers-17e8fd462ba5e72ae3430209748fcdbe5242ab00cbe0d2070f3bc042ab5b9681-userdata-shm.mount - /var/lib/containers/storage/overlay-containers/17e8fd462ba5e72ae3430209748fcdbe5242ab00cbe0d2070f3bc042ab5b9681/userdata/shm
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Mon 2026-02-02 17:37:21 UTC; 36min ago
      Until: Mon 2026-02-02 17:37:21 UTC; 36min ago
      Where: /var/lib/containers/storage/overlay-containers/17e8fd462ba5e72ae3430209748fcdbe5242ab00cbe0d2070f3bc042ab5b9681/userdata/shm
       What: shm

● var-lib-containers-storage-overlay\x2dcontainers-70e0d83cb45fbe649a29519f5074ad11df900a5702b7e7e666708ce90ca8d783-userdata-shm.mount - /var/lib/containers/storage/overlay-containers/70e0d83cb45fbe649a29519f5074ad11df900a5702b7e7e666708ce90ca8d783/userdata/shm
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Mon 2026-02-02 17:29:46 UTC; 43min ago
      Until: Mon 2026-02-02 17:29:46 UTC; 43min ago
      Where: /var/lib/containers/storage/overlay-containers/70e0d83cb45fbe649a29519f5074ad11df900a5702b7e7e666708ce90ca8d783/userdata/shm
       What: shm

● var-lib-containers-storage-overlay\x2dcontainers-983aad36fbefc6eb42f7b2455e6339d70a90e29e2ce721c2d9ecdd2cd91b9e7b-userdata-shm.mount - /var/lib/containers/storage/overlay-containers/983aad36fbefc6eb42f7b2455e6339d70a90e29e2ce721c2d9ecdd2cd91b9e7b/userdata/shm
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Mon 2026-02-02 17:30:44 UTC; 42min ago
      Until: Mon 2026-02-02 17:30:44 UTC; 42min ago
      Where: /var/lib/containers/storage/overlay-containers/983aad36fbefc6eb42f7b2455e6339d70a90e29e2ce721c2d9ecdd2cd91b9e7b/userdata/shm
       What: shm

○ var-lib-machines.mount - Virtual Machine and Container Storage (Compatibility)
     Loaded: loaded (/usr/lib/systemd/system/var-lib-machines.mount; static)
     Active: inactive (dead)
  Condition: start condition failed at Mon 2026-02-02 17:34:24 UTC; 38min ago
      Where: /var/lib/machines
       What: /var/lib/machines.raw

Feb 02 17:34:24 compute-0 systemd[1]: Virtual Machine and Container Storage (Compatibility) was skipped because of an unmet condition check (ConditionPathExists=/var/lib/machines.raw).

● var-lib-nfs-rpc_pipefs.mount - RPC Pipe File System
     Loaded: loaded (/proc/self/mountinfo; static)
     Active: active (mounted) since Mon 2026-02-02 16:46:02 UTC; 1h 27min ago
      Until: Mon 2026-02-02 16:46:02 UTC; 1h 27min ago
      Where: /var/lib/nfs/rpc_pipefs
       What: rpc_pipefs

● systemd-ask-password-console.path - Dispatch Password Requests to Console Directory Watch
     Loaded: loaded (/usr/lib/systemd/system/systemd-ask-password-console.path; static)
     Active: active (waiting) since Mon 2026-02-02 16:46:02 UTC; 1h 27min ago
      Until: Mon 2026-02-02 16:46:02 UTC; 1h 27min ago
   Triggers: ● systemd-ask-password-console.service
       Docs: man:systemd-ask-password-console.path(8)

● systemd-ask-password-wall.path - Forward Password Requests to Wall Directory Watch
     Loaded: loaded (/usr/lib/systemd/system/systemd-ask-password-wall.path; static)
     Active: active (waiting) since Mon 2026-02-02 16:46:02 UTC; 1h 27min ago
      Until: Mon 2026-02-02 16:46:02 UTC; 1h 27min ago
   Triggers: ● systemd-ask-password-wall.service
       Docs: man:systemd-ask-password-wall.path(8)

● init.scope - System and Service Manager
     Loaded: loaded
  Transient: yes
     Active: active (running) since Mon 2026-02-02 16:45:59 UTC; 1h 27min ago
       Docs: man:systemd(1)
         IO: 1.1M read, 0B written
      Tasks: 1 (limit: 48560)
     Memory: 48.0M (peak: 67.2M)
        CPU: 1min 706ms
     CGroup: /init.scope
             └─1 /usr/lib/systemd/systemd --switched-root --system --deserialize 31

Feb 02 18:13:12 compute-0 systemd[1]: Started libcrun container.
Feb 02 18:13:12 compute-0 systemd[1]: libpod-32082b334d08a6d5ab5daf434d029569f670f60ee0b0f99762b8ec4bb1548a07.scope: Deactivated successfully.
Feb 02 18:13:12 compute-0 systemd[1]: var-lib-containers-storage-overlay-e3bb365c1ad1b8e2c8dd8f20c75b00a6fbc7872df31f8b4a252e80a1ccbd1da7-merged.mount: Deactivated successfully.
Feb 02 18:13:12 compute-0 systemd[1]: libpod-conmon-32082b334d08a6d5ab5daf434d029569f670f60ee0b0f99762b8ec4bb1548a07.scope: Deactivated successfully.
Feb 02 18:13:12 compute-0 systemd[1]: Started libpod-conmon-f04134b56be7ae51a2491dd83e89562165b2581d3a93a5029ed331cdcb914e4e.scope.
Feb 02 18:13:12 compute-0 systemd[1]: Started libcrun container.
Feb 02 18:13:13 compute-0 systemd[1]: libpod-f04134b56be7ae51a2491dd83e89562165b2581d3a93a5029ed331cdcb914e4e.scope: Deactivated successfully.
Feb 02 18:13:13 compute-0 systemd[1]: libpod-f04134b56be7ae51a2491dd83e89562165b2581d3a93a5029ed331cdcb914e4e.scope: Consumed 1.199s CPU time.
Feb 02 18:13:13 compute-0 systemd[1]: var-lib-containers-storage-overlay-cad04384d7d78b50320d0896bfe81ac6c3c4299bb9d54a3cc4c94015227deb0d-merged.mount: Deactivated successfully.
Feb 02 18:13:13 compute-0 systemd[1]: libpod-conmon-f04134b56be7ae51a2491dd83e89562165b2581d3a93a5029ed331cdcb914e4e.scope: Deactivated successfully.

● libpod-17e8fd462ba5e72ae3430209748fcdbe5242ab00cbe0d2070f3bc042ab5b9681.scope - libcrun container
     Loaded: loaded (/run/systemd/transient/libpod-17e8fd462ba5e72ae3430209748fcdbe5242ab00cbe0d2070f3bc042ab5b9681.scope; transient)
  Transient: yes
     Active: active (running) since Mon 2026-02-02 17:37:21 UTC; 36min ago
         IO: 37.4M read, 41.7M written
      Tasks: 27 (limit: 4096)
     Memory: 425.4M (peak: 524.7M)
        CPU: 2min 29.392s
     CGroup: /machine.slice/libpod-17e8fd462ba5e72ae3430209748fcdbe5242ab00cbe0d2070f3bc042ab5b9681.scope
             └─container
               ├─239848 dumb-init --single-child -- kolla_start
               ├─239853 /usr/bin/python3 /usr/bin/nova-compute
               ├─246527 /usr/bin/python3 /bin/privsep-helper --config-file /etc/nova/nova.conf --config-file /etc/nova/nova-compute.conf --config-dir /etc/nova/nova.conf.d --privsep_context vif_plug_ovs.privsep.vif_plug --privsep_sock_path /tmp/tmp70ca1rbh/privsep.sock
               └─249256 /usr/bin/python3 /bin/privsep-helper --config-file /etc/nova/nova.conf --config-file /etc/nova/nova-compute.conf --config-dir /etc/nova/nova.conf.d --privsep_context os_brick.privileged.default --privsep_sock_path /tmp/tmp9qdvacqx/privsep.sock

Feb 02 17:37:21 compute-0 systemd[1]: Started libcrun container.

● libpod-70e0d83cb45fbe649a29519f5074ad11df900a5702b7e7e666708ce90ca8d783.scope - libcrun container
     Loaded: loaded (/run/systemd/transient/libpod-70e0d83cb45fbe649a29519f5074ad11df900a5702b7e7e666708ce90ca8d783.scope; transient)
  Transient: yes
    Drop-In: /run/systemd/transient/libpod-70e0d83cb45fbe649a29519f5074ad11df900a5702b7e7e666708ce90ca8d783.scope.d
             └─dep.conf
     Active: active (running) since Mon 2026-02-02 17:29:46 UTC; 43min ago
         IO: 6.1M read, 4.0K written
      Tasks: 6 (limit: 4096)
     Memory: 18.8M (peak: 25.1M)
        CPU: 6.120s
     CGroup: /machine.slice/libpod-70e0d83cb45fbe649a29519f5074ad11df900a5702b7e7e666708ce90ca8d783.scope
             └─container
               ├─146043 dumb-init --single-child -- kolla_start
               └─146046 /usr/bin/ovn-controller --pidfile unix:/run/openvswitch/db.sock -p /etc/pki/tls/private/ovndb.key -c /etc/pki/tls/certs/ovndb.crt -C /etc/pki/tls/certs/ovndbca.crt

Feb 02 17:29:46 compute-0 systemd[1]: Started libcrun container.

● libpod-983aad36fbefc6eb42f7b2455e6339d70a90e29e2ce721c2d9ecdd2cd91b9e7b.scope - libcrun container
     Loaded: loaded (/run/systemd/transient/libpod-983aad36fbefc6eb42f7b2455e6339d70a90e29e2ce721c2d9ecdd2cd91b9e7b.scope; transient)
  Transient: yes
    Drop-In: /run/systemd/transient/libpod-983aad36fbefc6eb42f7b2455e6339d70a90e29e2ce721c2d9ecdd2cd91b9e7b.scope.d
             └─dep.conf
     Active: active (running) since Mon 2026-02-02 17:30:44 UTC; 42min ago
         IO: 17.8M read, 14.2M written
      Tasks: 11 (limit: 4096)
     Memory: 436.7M (peak: 485.5M)
        CPU: 36.400s
     CGroup: /machine.slice/libpod-983aad36fbefc6eb42f7b2455e6339d70a90e29e2ce721c2d9ecdd2cd91b9e7b.scope
             └─container
               ├─155388 dumb-init --single-child -- kolla_start
               ├─155391 "neutron-ovn-metadata-agent (/usr/bin/python3 /usr/bin/neutron-ovn-metadata-agent)"
               ├─155696 "neutron-ovn-metadata-agent (/usr/bin/python3 /usr/bin/neutron-ovn-metadata-agent)"
               ├─155891 /usr/bin/python3 /bin/privsep-helper --config-file /etc/neutron/neutron.conf --config-dir /etc/neutron.conf.d --privsep_context neutron.privileged.namespace_cmd --privsep_sock_path /tmp/tmpty0fqf9d/privsep.sock
               ├─246686 /usr/bin/python3 /bin/privsep-helper --config-file /etc/neutron/neutron.conf --config-dir /etc/neutron.conf.d --privsep_context neutron.privileged.default --privsep_sock_path /tmp/tmpx1bw3qnr/privsep.sock
               └─246720 /usr/bin/python3 /bin/privsep-helper --config-file /etc/neutron/neutron.conf --config-dir /etc/neutron.conf.d --privsep_context neutron.privileged.link_cmd --privsep_sock_path /tmp/tmp037046cp/privsep.sock

Feb 02 18:00:26 compute-0 podman[271516]: 2026-02-02 18:00:26.927918818 +0000 UTC m=+0.054511526 container died 9de86b0c86793bedd1e4879184ea7f50bdcebde57324cbba35cf4ea71311bbd8 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=neutron-haproxy-ovnmeta-82a7f311-fed2-4a09-8203-270dceb25c76, io.buildah.version=1.41.3, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, maintainer=OpenStack Kubernetes Operator team, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, tcib_build_tag=b85d0548925081ae8c6bdd697658cec4, tcib_managed=true, org.label-schema.build-date=20260127)
Feb 02 18:00:26 compute-0 podman[271516]: 2026-02-02 18:00:26.995015048 +0000 UTC m=+0.121607736 container cleanup 9de86b0c86793bedd1e4879184ea7f50bdcebde57324cbba35cf4ea71311bbd8 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=neutron-haproxy-ovnmeta-82a7f311-fed2-4a09-8203-270dceb25c76, org.label-schema.schema-version=1.0, tcib_build_tag=b85d0548925081ae8c6bdd697658cec4, tcib_managed=true, io.buildah.version=1.41.3, org.label-schema.vendor=CentOS, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20260127, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image)
Feb 02 18:00:27 compute-0 podman[271573]: 2026-02-02 18:00:27.054124573 +0000 UTC m=+0.040979225 container remove 9de86b0c86793bedd1e4879184ea7f50bdcebde57324cbba35cf4ea71311bbd8 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=neutron-haproxy-ovnmeta-82a7f311-fed2-4a09-8203-270dceb25c76, org.label-schema.build-date=20260127, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, tcib_managed=true, maintainer=OpenStack Kubernetes Operator team, org.label-schema.vendor=CentOS, tcib_build_tag=b85d0548925081ae8c6bdd697658cec4, io.buildah.version=1.41.3, org.label-schema.name=CentOS Stream 9 Base Image)
Feb 02 18:00:49 compute-0 podman[272138]: 2026-02-02 18:00:49.361543805 +0000 UTC m=+0.043388593 container create b630d3dc37514c6fd297f48ce077a9ea5fa195391bfacc2148e51ccdb707daa9 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=neutron-haproxy-ovnmeta-967fc097-5eb9-45d1-9d27-cd16a27cb74e, org.label-schema.license=GPLv2, org.label-schema.build-date=20260127, tcib_managed=true, org.label-schema.name=CentOS Stream 9 Base Image, maintainer=OpenStack Kubernetes Operator team, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, tcib_build_tag=b85d0548925081ae8c6bdd697658cec4, io.buildah.version=1.41.3)
Feb 02 18:00:49 compute-0 podman[272138]: 2026-02-02 18:00:49.336561142 +0000 UTC m=+0.018405970 image pull 19964fda6b912d3d57e21b0bcc221725d936e513025030cb508474fe04b06af8 quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified
Feb 02 18:00:49 compute-0 podman[272138]: 2026-02-02 18:00:49.476605936 +0000 UTC m=+0.158450754 container init b630d3dc37514c6fd297f48ce077a9ea5fa195391bfacc2148e51ccdb707daa9 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=neutron-haproxy-ovnmeta-967fc097-5eb9-45d1-9d27-cd16a27cb74e, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, tcib_managed=true, tcib_build_tag=b85d0548925081ae8c6bdd697658cec4, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20260127, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2)
Feb 02 18:00:49 compute-0 podman[272138]: 2026-02-02 18:00:49.483715226 +0000 UTC m=+0.165560014 container start b630d3dc37514c6fd297f48ce077a9ea5fa195391bfacc2148e51ccdb707daa9 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=neutron-haproxy-ovnmeta-967fc097-5eb9-45d1-9d27-cd16a27cb74e, tcib_build_tag=b85d0548925081ae8c6bdd697658cec4, org.label-schema.schema-version=1.0, tcib_managed=true, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.41.3, org.label-schema.license=GPLv2, maintainer=OpenStack Kubernetes Operator team, org.label-schema.vendor=CentOS, org.label-schema.build-date=20260127)
Feb 02 18:01:55 compute-0 podman[273075]: 2026-02-02 18:01:55.577983536 +0000 UTC m=+0.051743325 container died b630d3dc37514c6fd297f48ce077a9ea5fa195391bfacc2148e51ccdb707daa9 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=neutron-haproxy-ovnmeta-967fc097-5eb9-45d1-9d27-cd16a27cb74e, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, tcib_build_tag=b85d0548925081ae8c6bdd697658cec4, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, tcib_managed=true, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, org.label-schema.build-date=20260127)
Feb 02 18:01:55 compute-0 podman[273075]: 2026-02-02 18:01:55.614069748 +0000 UTC m=+0.087829537 container cleanup b630d3dc37514c6fd297f48ce077a9ea5fa195391bfacc2148e51ccdb707daa9 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=neutron-haproxy-ovnmeta-967fc097-5eb9-45d1-9d27-cd16a27cb74e, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, org.label-schema.build-date=20260127, tcib_managed=true, org.label-schema.schema-version=1.0, tcib_build_tag=b85d0548925081ae8c6bdd697658cec4, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, org.label-schema.name=CentOS Stream 9 Base Image)
Feb 02 18:01:55 compute-0 podman[273115]: 2026-02-02 18:01:55.676653859 +0000 UTC m=+0.041851626 container remove b630d3dc37514c6fd297f48ce077a9ea5fa195391bfacc2148e51ccdb707daa9 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=neutron-haproxy-ovnmeta-967fc097-5eb9-45d1-9d27-cd16a27cb74e, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20260127, org.label-schema.license=GPLv2, tcib_managed=true, org.label-schema.vendor=CentOS, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, org.label-schema.schema-version=1.0, tcib_build_tag=b85d0548925081ae8c6bdd697658cec4)

● session-1.scope - Session 1 of User zuul
     Loaded: loaded (/run/systemd/transient/session-1.scope; transient)
  Transient: yes
     Active: active (abandoned) since Mon 2026-02-02 16:46:29 UTC; 1h 26min ago
         IO: 0B read, 0B written
      Tasks: 1
     Memory: 17.1M (peak: 38.9M)
        CPU: 1min 4.205s
     CGroup: /user.slice/user-1000.slice/session-1.scope
             └─4520 /usr/bin/python3

Feb 02 16:49:13 np0005605476.novalocal sudo[7372]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Feb 02 16:49:13 np0005605476.novalocal python3[7374]: ansible-ansible.legacy.stat Invoked with path=/etc/ci/env/networking-info.yml follow=False get_checksum=True checksum_algorithm=sha1 get_md5=False get_mime=True get_attributes=True
Feb 02 16:49:13 np0005605476.novalocal sudo[7372]: pam_unix(sudo:session): session closed for user root
Feb 02 16:49:13 np0005605476.novalocal sudo[7445]:     zuul : PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-eimjqkgkjwbosjatknyagoizeboxtiwm ; OS_CLOUD=vexxhost /usr/bin/python3'
Feb 02 16:49:13 np0005605476.novalocal sudo[7445]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Feb 02 16:49:13 np0005605476.novalocal python3[7447]: ansible-ansible.legacy.copy Invoked with dest=/etc/ci/env/networking-info.yml owner=root group=root mode=0644 src=/home/zuul/.ansible/tmp/ansible-tmp-1770050953.143065-267-186098056267376/source _original_basename=tmpzh8d4rkp follow=False checksum=4afc33d8796c4d0a05d3c8aff74739aae3c20214 backup=False force=True unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None remote_src=None local_follow=None seuser=None serole=None selevel=None setype=None attributes=None
Feb 02 16:49:13 np0005605476.novalocal sudo[7445]: pam_unix(sudo:session): session closed for user root
Feb 02 16:50:13 np0005605476.novalocal sshd-session[4317]: Received disconnect from 38.102.83.114 port 46562:11: disconnected by user
Feb 02 16:50:13 np0005605476.novalocal sshd-session[4317]: Disconnected from user zuul 38.102.83.114 port 46562
Feb 02 16:50:13 np0005605476.novalocal sshd-session[4303]: pam_unix(sshd:session): session closed for user zuul

● session-20.scope - Session 20 of User ceph-admin
     Loaded: loaded (/run/systemd/transient/session-20.scope; transient)
  Transient: yes
     Active: active (running) since Mon 2026-02-02 17:20:42 UTC; 52min ago
         IO: 0B read, 0B written
      Tasks: 2
     Memory: 1.1M (peak: 1.3M)
        CPU: 104ms
     CGroup: /user.slice/user-42477.slice/session-20.scope
             ├─76562 "sshd-session: ceph-admin [priv]"
             └─76584 "sshd-session: ceph-admin"

Feb 02 17:20:42 compute-0 systemd[1]: Started Session 20 of User ceph-admin.

● session-22.scope - Session 22 of User ceph-admin
     Loaded: loaded (/run/systemd/transient/session-22.scope; transient)
  Transient: yes
     Active: active (running) since Mon 2026-02-02 17:20:42 UTC; 52min ago
         IO: 0B read, 0B written
      Tasks: 2
     Memory: 1.1M (peak: 4.1M)
        CPU: 159ms
     CGroup: /user.slice/user-42477.slice/session-22.scope
             ├─76579 "sshd-session: ceph-admin [priv]"
             └─76585 "sshd-session: ceph-admin@notty"

Feb 02 17:20:42 compute-0 systemd[1]: Started Session 22 of User ceph-admin.
Feb 02 17:20:42 compute-0 sudo[76586]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Feb 02 17:20:42 compute-0 sudo[76586]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Feb 02 17:20:42 compute-0 sudo[76586]: pam_unix(sudo:session): session closed for user root

● session-23.scope - Session 23 of User ceph-admin
     Loaded: loaded (/run/systemd/transient/session-23.scope; transient)
  Transient: yes
     Active: active (running) since Mon 2026-02-02 17:20:42 UTC; 52min ago
         IO: 0B read, 0B written
      Tasks: 2
     Memory: 1.1M (peak: 6.1M)
        CPU: 146ms
     CGroup: /user.slice/user-42477.slice/session-23.scope
             ├─76611 "sshd-session: ceph-admin [priv]"
             └─76614 "sshd-session: ceph-admin@notty"

Feb 02 17:20:42 compute-0 systemd[1]: Started Session 23 of User ceph-admin.
Feb 02 17:20:42 compute-0 sudo[76615]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/eb48d0ef-3496-563c-b73d-661fb962013e/cephadm.ed5a13ad26f7f55dd30e9b63855e4e581fd86973bec1d21a12ed0bb26af19c8b --timeout 895 check-host --expect-hostname compute-0
Feb 02 17:20:42 compute-0 sudo[76615]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Feb 02 17:20:42 compute-0 sudo[76615]: pam_unix(sudo:session): session closed for user root

● session-24.scope - Session 24 of User ceph-admin
     Loaded: loaded (/run/systemd/transient/session-24.scope; transient)
  Transient: yes
     Active: active (running) since Mon 2026-02-02 17:20:43 UTC; 52min ago
         IO: 0B read, 0B written
      Tasks: 2
     Memory: 1.1M (peak: 4.1M)
        CPU: 135ms
     CGroup: /user.slice/user-42477.slice/session-24.scope
             ├─76640 "sshd-session: ceph-admin [priv]"
             └─76643 "sshd-session: ceph-admin@notty"

Feb 02 17:20:43 compute-0 systemd[1]: Started Session 24 of User ceph-admin.
Feb 02 17:20:43 compute-0 sudo[76644]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/ls /var/lib/ceph/eb48d0ef-3496-563c-b73d-661fb962013e/cephadm.ed5a13ad26f7f55dd30e9b63855e4e581fd86973bec1d21a12ed0bb26af19c8b
Feb 02 17:20:43 compute-0 sudo[76644]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Feb 02 17:20:43 compute-0 sudo[76644]: pam_unix(sudo:session): session closed for user root

● session-25.scope - Session 25 of User ceph-admin
     Loaded: loaded (/run/systemd/transient/session-25.scope; transient)
  Transient: yes
     Active: active (running) since Mon 2026-02-02 17:20:43 UTC; 52min ago
         IO: 0B read, 0B written
      Tasks: 2
     Memory: 1.1M (peak: 4.6M)
        CPU: 122ms
     CGroup: /user.slice/user-42477.slice/session-25.scope
             ├─76669 "sshd-session: ceph-admin [priv]"
             └─76672 "sshd-session: ceph-admin@notty"

Feb 02 17:20:43 compute-0 systemd[1]: Started Session 25 of User ceph-admin.
Feb 02 17:20:43 compute-0 sudo[76673]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/mkdir -p /var/lib/ceph/eb48d0ef-3496-563c-b73d-661fb962013e
Feb 02 17:20:43 compute-0 sudo[76673]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Feb 02 17:20:43 compute-0 sudo[76673]: pam_unix(sudo:session): session closed for user root

● session-26.scope - Session 26 of User ceph-admin
     Loaded: loaded (/run/systemd/transient/session-26.scope; transient)
  Transient: yes
     Active: active (running) since Mon 2026-02-02 17:20:43 UTC; 52min ago
         IO: 0B read, 0B written
      Tasks: 2
     Memory: 1.1M (peak: 4.1M)
        CPU: 131ms
     CGroup: /user.slice/user-42477.slice/session-26.scope
             ├─76698 "sshd-session: ceph-admin [priv]"
             └─76701 "sshd-session: ceph-admin@notty"

Feb 02 17:20:43 compute-0 systemd[1]: Started Session 26 of User ceph-admin.
Feb 02 17:20:43 compute-0 sudo[76702]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/mkdir -p /tmp/cephadm-eb48d0ef-3496-563c-b73d-661fb962013e/var/lib/ceph/eb48d0ef-3496-563c-b73d-661fb962013e
Feb 02 17:20:43 compute-0 sudo[76702]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Feb 02 17:20:43 compute-0 sudo[76702]: pam_unix(sudo:session): session closed for user root

● session-27.scope - Session 27 of User ceph-admin
     Loaded: loaded (/run/systemd/transient/session-27.scope; transient)
  Transient: yes
     Active: active (running) since Mon 2026-02-02 17:20:44 UTC; 52min ago
         IO: 0B read, 0B written
      Tasks: 2
     Memory: 1.1M (peak: 3.9M)
        CPU: 151ms
     CGroup: /user.slice/user-42477.slice/session-27.scope
             ├─76727 "sshd-session: ceph-admin [priv]"
             └─76730 "sshd-session: ceph-admin@notty"

Feb 02 17:20:44 compute-0 systemd[1]: Started Session 27 of User ceph-admin.
Feb 02 17:20:44 compute-0 sudo[76731]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/touch /tmp/cephadm-eb48d0ef-3496-563c-b73d-661fb962013e/var/lib/ceph/eb48d0ef-3496-563c-b73d-661fb962013e/cephadm.ed5a13ad26f7f55dd30e9b63855e4e581fd86973bec1d21a12ed0bb26af19c8b.new
Feb 02 17:20:44 compute-0 sudo[76731]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Feb 02 17:20:44 compute-0 sudo[76731]: pam_unix(sudo:session): session closed for user root

● session-28.scope - Session 28 of User ceph-admin
     Loaded: loaded (/run/systemd/transient/session-28.scope; transient)
  Transient: yes
     Active: active (running) since Mon 2026-02-02 17:20:44 UTC; 52min ago
         IO: 0B read, 0B written
      Tasks: 2
     Memory: 1.2M (peak: 4.3M)
        CPU: 131ms
     CGroup: /user.slice/user-42477.slice/session-28.scope
             ├─76756 "sshd-session: ceph-admin [priv]"
             └─76759 "sshd-session: ceph-admin@notty"

Feb 02 17:20:44 compute-0 systemd[1]: Started Session 28 of User ceph-admin.
Feb 02 17:20:44 compute-0 sudo[76760]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/chown -R ceph-admin /tmp/cephadm-eb48d0ef-3496-563c-b73d-661fb962013e
Feb 02 17:20:44 compute-0 sudo[76760]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Feb 02 17:20:44 compute-0 sudo[76760]: pam_unix(sudo:session): session closed for user root

● session-29.scope - Session 29 of User ceph-admin
     Loaded: loaded (/run/systemd/transient/session-29.scope; transient)
  Transient: yes
     Active: active (running) since Mon 2026-02-02 17:20:44 UTC; 52min ago
         IO: 0B read, 0B written
      Tasks: 2
     Memory: 1.1M (peak: 4.1M)
        CPU: 142ms
     CGroup: /user.slice/user-42477.slice/session-29.scope
             ├─76785 "sshd-session: ceph-admin [priv]"
             └─76788 "sshd-session: ceph-admin@notty"

Feb 02 17:20:44 compute-0 systemd[1]: Started Session 29 of User ceph-admin.
Feb 02 17:20:44 compute-0 sudo[76789]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/chmod 644 /tmp/cephadm-eb48d0ef-3496-563c-b73d-661fb962013e/var/lib/ceph/eb48d0ef-3496-563c-b73d-661fb962013e/cephadm.ed5a13ad26f7f55dd30e9b63855e4e581fd86973bec1d21a12ed0bb26af19c8b.new
Feb 02 17:20:44 compute-0 sudo[76789]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Feb 02 17:20:44 compute-0 sudo[76789]: pam_unix(sudo:session): session closed for user root

● session-30.scope - Session 30 of User ceph-admin
     Loaded: loaded (/run/systemd/transient/session-30.scope; transient)
  Transient: yes
     Active: active (running) since Mon 2026-02-02 17:20:45 UTC; 52min ago
         IO: 0B read, 1016.0K written
      Tasks: 2
     Memory: 2.2M (peak: 3.6M)
        CPU: 155ms
     CGroup: /user.slice/user-42477.slice/session-30.scope
             ├─76814 "sshd-session: ceph-admin [priv]"
             └─76817 "sshd-session: ceph-admin@notty"

Feb 02 17:20:45 compute-0 systemd[1]: Started Session 30 of User ceph-admin.

● session-31.scope - Session 31 of User ceph-admin
     Loaded: loaded (/run/systemd/transient/session-31.scope; transient)
  Transient: yes
     Active: active (running) since Mon 2026-02-02 17:20:46 UTC; 52min ago
         IO: 0B read, 0B written
      Tasks: 2
     Memory: 1.1M (peak: 5.7M)
        CPU: 162ms
     CGroup: /user.slice/user-42477.slice/session-31.scope
             ├─76841 "sshd-session: ceph-admin [priv]"
             └─76844 "sshd-session: ceph-admin@notty"

Feb 02 17:20:46 compute-0 systemd[1]: Started Session 31 of User ceph-admin.
Feb 02 17:20:46 compute-0 sudo[76845]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/mv -Z /tmp/cephadm-eb48d0ef-3496-563c-b73d-661fb962013e/var/lib/ceph/eb48d0ef-3496-563c-b73d-661fb962013e/cephadm.ed5a13ad26f7f55dd30e9b63855e4e581fd86973bec1d21a12ed0bb26af19c8b.new /var/lib/ceph/eb48d0ef-3496-563c-b73d-661fb962013e/cephadm.ed5a13ad26f7f55dd30e9b63855e4e581fd86973bec1d21a12ed0bb26af19c8b
Feb 02 17:20:46 compute-0 sudo[76845]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Feb 02 17:20:46 compute-0 sudo[76845]: pam_unix(sudo:session): session closed for user root

● session-32.scope - Session 32 of User ceph-admin
     Loaded: loaded (/run/systemd/transient/session-32.scope; transient)
  Transient: yes
     Active: active (running) since Mon 2026-02-02 17:20:46 UTC; 52min ago
         IO: 180.0K read, 190.4M written
      Tasks: 2
     Memory: 5.0M (peak: 55.0M)
        CPU: 3min 1.481s
     CGroup: /user.slice/user-42477.slice/session-32.scope
             ├─76870 "sshd-session: ceph-admin [priv]"
             └─76873 "sshd-session: ceph-admin@notty"

Feb 02 18:13:12 compute-0 podman[291988]: 2026-02-02 18:13:12.539263483 +0000 UTC m=+0.025413827 image pull 524f3da276461682bec27427fb8a63b5139c40ad4185939aede197474a6817b3 quay.io/ceph/ceph@sha256:1228c3d05e45fbc068a8c33614e4409b6dac688bcc77369b06009b5830fa8d86
Feb 02 18:13:12 compute-0 podman[291988]: 2026-02-02 18:13:12.650569939 +0000 UTC m=+0.136720283 container init f04134b56be7ae51a2491dd83e89562165b2581d3a93a5029ed331cdcb914e4e (image=quay.io/ceph/ceph@sha256:1228c3d05e45fbc068a8c33614e4409b6dac688bcc77369b06009b5830fa8d86, name=brave_lederberg, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=tentacle, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=69f84cc2651aa259a15bc192ddaabd3baba07489, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, ceph=True, org.label-schema.build-date=20251030, OSD_FLAVOR=default, io.buildah.version=1.41.3, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9)
Feb 02 18:13:12 compute-0 podman[291988]: 2026-02-02 18:13:12.657800372 +0000 UTC m=+0.143950686 container start f04134b56be7ae51a2491dd83e89562165b2581d3a93a5029ed331cdcb914e4e (image=quay.io/ceph/ceph@sha256:1228c3d05e45fbc068a8c33614e4409b6dac688bcc77369b06009b5830fa8d86, name=brave_lederberg, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=tentacle, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=69f84cc2651aa259a15bc192ddaabd3baba07489, io.buildah.version=1.41.3, org.label-schema.build-date=20251030, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Feb 02 18:13:12 compute-0 podman[291988]: 2026-02-02 18:13:12.662128484 +0000 UTC m=+0.148278818 container attach f04134b56be7ae51a2491dd83e89562165b2581d3a93a5029ed331cdcb914e4e (image=quay.io/ceph/ceph@sha256:1228c3d05e45fbc068a8c33614e4409b6dac688bcc77369b06009b5830fa8d86, name=brave_lederberg, CEPH_REF=tentacle, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=69f84cc2651aa259a15bc192ddaabd3baba07489, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20251030, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.41.3)
Feb 02 18:13:13 compute-0 podman[291988]: 2026-02-02 18:13:13.469816948 +0000 UTC m=+0.955967282 container died f04134b56be7ae51a2491dd83e89562165b2581d3a93a5029ed331cdcb914e4e (image=quay.io/ceph/ceph@sha256:1228c3d05e45fbc068a8c33614e4409b6dac688bcc77369b06009b5830fa8d86, name=brave_lederberg, CEPH_REF=tentacle, org.label-schema.license=GPLv2, io.buildah.version=1.41.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20251030, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.vendor=CentOS, CEPH_SHA1=69f84cc2651aa259a15bc192ddaabd3baba07489, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Feb 02 18:13:13 compute-0 podman[291988]: 2026-02-02 18:13:13.531255448 +0000 UTC m=+1.017405772 container remove f04134b56be7ae51a2491dd83e89562165b2581d3a93a5029ed331cdcb914e4e (image=quay.io/ceph/ceph@sha256:1228c3d05e45fbc068a8c33614e4409b6dac688bcc77369b06009b5830fa8d86, name=brave_lederberg, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=69f84cc2651aa259a15bc192ddaabd3baba07489, io.buildah.version=1.41.3, ceph=True, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=tentacle, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20251030, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/)
Feb 02 18:13:13 compute-0 sudo[291861]: pam_unix(sudo:session): session closed for user root
Feb 02 18:13:13 compute-0 sudo[292316]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/ls /etc/sysctl.d
Feb 02 18:13:13 compute-0 sudo[292316]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Feb 02 18:13:13 compute-0 sudo[292316]: pam_unix(sudo:session): session closed for user root

● session-54.scope - Session 54 of User zuul
     Loaded: loaded (/run/systemd/transient/session-54.scope; transient)
  Transient: yes
     Active: active (running) since Mon 2026-02-02 18:12:40 UTC; 42s ago
         IO: 6.3M read, 335.2M written
      Tasks: 31
     Memory: 490.8M (peak: 532.2M)
        CPU: 2min 7.763s
     CGroup: /user.slice/user-1000.slice/session-54.scope
             ├─287794 "sshd-session: zuul [priv]"
             ├─287797 "sshd-session: zuul@notty"
             ├─287798 sudo bash -c "rm -rf /var/tmp/sos-osp && mkdir /var/tmp/sos-osp && sos report --batch --all-logs --tmp-dir=/var/tmp/sos-osp  -p container,openstack_edpm,system,storage,virt"
             ├─287822 /usr/bin/python3 -s /sbin/sos report --batch --all-logs --tmp-dir=/var/tmp/sos-osp -p container,openstack_edpm,system,storage,virt
             ├─294026 timeout 15s turbostat --debug sleep 10
             ├─294374 timeout 300s systemctl status --all
             ├─294375 systemctl status --all
             ├─294376 timeout 300s ceph mds stat --format json-pretty
             ├─294377 /usr/bin/python3 -s /usr/bin/ceph mds stat --format json-pretty
             ├─294397 timeout 300s semanage boolean -l
             └─294398 /usr/bin/python3 -EsI /usr/sbin/semanage boolean -l

Feb 02 18:12:40 compute-0 systemd[1]: Started Session 54 of User zuul.
Feb 02 18:12:40 compute-0 sudo[287798]:     zuul : PWD=/home/zuul ; USER=root ; COMMAND=/bin/bash -c 'rm -rf /var/tmp/sos-osp && mkdir /var/tmp/sos-osp && sos report --batch --all-logs --tmp-dir=/var/tmp/sos-osp  -p container,openstack_edpm,system,storage,virt'
Feb 02 18:12:40 compute-0 sudo[287798]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Feb 02 18:13:16 compute-0 ovs-appctl[293162]: ovs|00001|daemon_unix|WARN|/var/run/openvswitch/ovs-monitor-ipsec.pid: open: No such file or directory
Feb 02 18:13:16 compute-0 ovs-appctl[293166]: ovs|00001|daemon_unix|WARN|/var/run/openvswitch/ovs-monitor-ipsec.pid: open: No such file or directory
Feb 02 18:13:16 compute-0 ovs-appctl[293171]: ovs|00001|daemon_unix|WARN|/var/run/openvswitch/ovs-monitor-ipsec.pid: open: No such file or directory

○ 70e0d83cb45fbe649a29519f5074ad11df900a5702b7e7e666708ce90ca8d783-2c712ae2bc69d601.service - /usr/bin/podman healthcheck run 70e0d83cb45fbe649a29519f5074ad11df900a5702b7e7e666708ce90ca8d783
     Loaded: loaded (/run/systemd/transient/70e0d83cb45fbe649a29519f5074ad11df900a5702b7e7e666708ce90ca8d783-2c712ae2bc69d601.service; transient)
  Transient: yes
     Active: inactive (dead) since Mon 2026-02-02 18:13:22 UTC; 777ms ago
   Duration: 84ms
TriggeredBy: ● 70e0d83cb45fbe649a29519f5074ad11df900a5702b7e7e666708ce90ca8d783-2c712ae2bc69d601.timer
    Process: 294309 ExecStart=/usr/bin/podman healthcheck run 70e0d83cb45fbe649a29519f5074ad11df900a5702b7e7e666708ce90ca8d783 (code=exited, status=0/SUCCESS)
   Main PID: 294309 (code=exited, status=0/SUCCESS)
        CPU: 65ms

Feb 02 18:13:22 compute-0 podman[294309]: 2026-02-02 18:13:22.893892451 +0000 UTC m=+0.066878965 container health_status 70e0d83cb45fbe649a29519f5074ad11df900a5702b7e7e666708ce90ca8d783 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, tcib_build_tag=b85d0548925081ae8c6bdd697658cec4, tcib_managed=true, container_name=ovn_controller, io.buildah.version=1.41.3, org.label-schema.name=CentOS Stream 9 Base Image, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'EDPM_CONFIG_HASH': 'fb44d116753823076754339ecdff5d26c5c02250617a2157b9bf22160a92362b-b679b18dd4e53db9e352e8eb6b265beb4b106035d3e3bfb3cb99fdf41954fcec-b679b18dd4e53db9e352e8eb6b265beb4b106035d3e3bfb3cb99fdf41954fcec-b679b18dd4e53db9e352e8eb6b265beb4b106035d3e3bfb3cb99fdf41954fcec'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, org.label-schema.vendor=CentOS, config_id=ovn_controller, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, org.label-schema.build-date=20260127)

○ 983aad36fbefc6eb42f7b2455e6339d70a90e29e2ce721c2d9ecdd2cd91b9e7b-17f04868bd9a498.service - /usr/bin/podman healthcheck run 983aad36fbefc6eb42f7b2455e6339d70a90e29e2ce721c2d9ecdd2cd91b9e7b
     Loaded: loaded (/run/systemd/transient/983aad36fbefc6eb42f7b2455e6339d70a90e29e2ce721c2d9ecdd2cd91b9e7b-17f04868bd9a498.service; transient)
  Transient: yes
     Active: inactive (dead) since Mon 2026-02-02 18:13:20 UTC; 3s ago
   Duration: 76ms
TriggeredBy: ● 983aad36fbefc6eb42f7b2455e6339d70a90e29e2ce721c2d9ecdd2cd91b9e7b-17f04868bd9a498.timer
    Process: 294136 ExecStart=/usr/bin/podman healthcheck run 983aad36fbefc6eb42f7b2455e6339d70a90e29e2ce721c2d9ecdd2cd91b9e7b (code=exited, status=0/SUCCESS)
   Main PID: 294136 (code=exited, status=0/SUCCESS)
        CPU: 80ms

Feb 02 18:13:20 compute-0 podman[294136]: 2026-02-02 18:13:20.611048372 +0000 UTC m=+0.057621445 container health_status 983aad36fbefc6eb42f7b2455e6339d70a90e29e2ce721c2d9ecdd2cd91b9e7b (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=ovn_metadata_agent, health_status=healthy, health_failing_streak=0, health_log=, managed_by=edpm_ansible, org.label-schema.build-date=20260127, container_name=ovn_metadata_agent, io.buildah.version=1.41.3, tcib_build_tag=b85d0548925081ae8c6bdd697658cec4, config_data={'cgroupns': 'host', 'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'EDPM_CONFIG_HASH': 'fb44d116753823076754339ecdff5d26c5c02250617a2157b9bf22160a92362b-b679b18dd4e53db9e352e8eb6b265beb4b106035d3e3bfb3cb99fdf41954fcec-b679b18dd4e53db9e352e8eb6b265beb4b106035d3e3bfb3cb99fdf41954fcec-b679b18dd4e53db9e352e8eb6b265beb4b106035d3e3bfb3cb99fdf41954fcec-0823bd3e096c75f72e4a95820d41b0d4b6a1172bd2892ddb9f29b788a11bc87d'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_metadata_agent', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-neutron-metadata-ageUnit apparmor.service could not be found.
Unit apt-daily.service could not be found.
Unit auto-cpufreq.service could not be found.
Unit autofs.service could not be found.
nt-ovn:current-podified', 'net': 'host', 'pid': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/run/openvswitch:/run/openvswitch:z', '/var/lib/openstack/neutron-ovn-metadata-agent:/etc/neutron.conf.d:z', '/run/netns:/run/netns:shared', '/var/lib/kolla/config_files/ovn_metadata_agent.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/neutron:/var/lib/neutron:shared,z', '/var/lib/neutron/ovn_metadata_haproxy_wrapper:/usr/local/bin/haproxy:ro', '/var/lib/neutron/kill_scripts:/etc/neutron/kill_scripts:ro', '/var/lib/openstack/cacerts/neutron-metadata/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_metadata_agent:/openstack:ro,z']}, config_id=ovn_metadata_agent, maintainer=OpenStack Kubernetes Operator team, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, tcib_managed=true, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image)

● auditd.service - Security Auditing Service
     Loaded: loaded (/usr/lib/systemd/system/auditd.service; enabled; preset: enabled)
     Active: active (running) since Mon 2026-02-02 16:46:03 UTC; 1h 27min ago
       Docs: man:auditd(8)
             https://github.com/linux-audit/audit-documentation
   Main PID: 704 (auditd)
         IO: 0B read, 23.8M written
      Tasks: 4 (limit: 48560)
     Memory: 15.5M (peak: 16.0M)
        CPU: 4.838s
     CGroup: /system.slice/auditd.service
             ├─704 /sbin/auditd
             └─706 /usr/sbin/sedispatch

Feb 02 16:46:03 localhost augenrules[724]: failure 1
Feb 02 16:46:03 localhost augenrules[724]: pid 704
Feb 02 16:46:03 localhost augenrules[724]: rate_limit 0
Feb 02 16:46:03 localhost augenrules[724]: backlog_limit 8192
Feb 02 16:46:03 localhost augenrules[724]: lost 0
Feb 02 16:46:03 localhost augenrules[724]: backlog 4
Feb 02 16:46:03 localhost augenrules[724]: backlog_wait_time 60000
Feb 02 16:46:03 localhost augenrules[724]: backlog_wait_time_actual 0
Feb 02 16:46:03 localhost systemd[1]: Started Security Auditing Service.
Feb 02 17:34:11 compute-0 auditd[704]: Audit daemon rotating log files

○ auth-rpcgss-module.service - Kernel Module supporting RPCSEC_GSS
     Loaded: loaded (/usr/lib/systemd/system/auth-rpcgss-module.service; static)
     Active: inactive (dead)
  Condition: start condition failed at Mon 2026-02-02 16:46:02 UTC; 1h 27min ago

○ blk-availability.service - Availability of block devices
     Loaded: loaded (/usr/lib/systemd/system/blk-availability.service; disabled; preset: disabled)
     Active: inactive (dead)

● ceph-eb48d0ef-3496-563c-b73d-661fb962013e@crash.compute-0.service - Ceph crash.compute-0 for eb48d0ef-3496-563c-b73d-661fb962013e
     Loaded: loaded (/etc/systemd/system/ceph-eb48d0ef-3496-563c-b73d-661fb962013e@.service; enabled; preset: disabled)
     Active: active (running) since Mon 2026-02-02 17:20:55 UTC; 52min ago
   Main PID: 80150 (conmon)
         IO: 0B read, 174.0K written
      Tasks: 3 (limit: 48560)
     Memory: 7.7M (peak: 29.2M)
        CPU: 378ms
     CGroup: /system.slice/system-ceph\x2deb48d0ef\x2d3496\x2d563c\x2db73d\x2d661fb962013e.slice/ceph-eb48d0ef-3496-563c-b73d-661fb962013e@crash.compute-0.service
             ├─libpod-payload-43c95f4962571d6ee3a5291e1f020e4230f3e56f098995891d143922a65cac4b
             │ ├─80152 /run/podman-init -- /usr/bin/ceph-crash -n client.crash.compute-0
             │ └─80154 /usr/bin/python3 -s /usr/bin/ceph-crash -n client.crash.compute-0
             └─runtime
               └─80150 /usr/bin/conmon --api-version 1 -c 43c95f4962571d6ee3a5291e1f020e4230f3e56f098995891d143922a65cac4b -u 43c95f4962571d6ee3a5291e1f020e4230f3e56f098995891d143922a65cac4b -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/43c95f4962571d6ee3a5291e1f020e4230f3e56f098995891d143922a65cac4b/userdata -p /run/containers/storage/overlay-containers/43c95f4962571d6ee3a5291e1f020e4230f3e56f098995891d143922a65cac4b/userdata/pidfile -n ceph-eb48d0ef-3496-563c-b73d-661fb962013e-crash-compute-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/43c95f4962571d6ee3a5291e1f020e4230f3e56f098995891d143922a65cac4b --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/43c95f4962571d6ee3a5291e1f020e4230f3e56f098995891d143922a65cac4b/userdata/oci-log --conmon-pidfile /run/ceph-eb48d0ef-3496-563c-b73d-661fb962013e@crash.compute-0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 43c95f4962571d6ee3a5291e1f020e4230f3e56f098995891d143922a65cac4b

Feb 02 17:20:55 compute-0 systemd[1]: Started Ceph crash.compute-0 for eb48d0ef-3496-563c-b73d-661fb962013e.
Feb 02 17:20:55 compute-0 ceph-eb48d0ef-3496-563c-b73d-661fb962013e-crash-compute-0[80150]: INFO:ceph-crash:pinging cluster to exercise our key
Feb 02 17:20:55 compute-0 ceph-eb48d0ef-3496-563c-b73d-661fb962013e-crash-compute-0[80150]: 2026-02-02T17:20:55.928+0000 7fdd03515640 -1 auth: unable to find a keyring on /etc/ceph/ceph.client.admin.keyring,/etc/ceph/ceph.keyring,/etc/ceph/keyring,/etc/ceph/keyring.bin: (2) No such file or directory
Feb 02 17:20:55 compute-0 ceph-eb48d0ef-3496-563c-b73d-661fb962013e-crash-compute-0[80150]: 2026-02-02T17:20:55.928+0000 7fdd03515640 -1 AuthRegistry(0x7fdcfc052d90) no keyring found at /etc/ceph/ceph.client.admin.keyring,/etc/ceph/ceph.keyring,/etc/ceph/keyring,/etc/ceph/keyring.bin, disabling cephx
Feb 02 17:20:55 compute-0 ceph-eb48d0ef-3496-563c-b73d-661fb962013e-crash-compute-0[80150]: 2026-02-02T17:20:55.929+0000 7fdd03515640 -1 auth: unable to find a keyring on /etc/ceph/ceph.client.admin.keyring,/etc/ceph/ceph.keyring,/etc/ceph/keyring,/etc/ceph/keyring.bin: (2) No such file or directory
Feb 02 17:20:55 compute-0 ceph-eb48d0ef-3496-563c-b73d-661fb962013e-crash-compute-0[80150]: 2026-02-02T17:20:55.929+0000 7fdd03515640 -1 AuthRegistry(0x7fdd03513fe0) no keyring found at /etc/ceph/ceph.client.admin.keyring,/etc/ceph/ceph.keyring,/etc/ceph/keyring,/etc/ceph/keyring.bin, disabling cephx
Feb 02 17:20:55 compute-0 ceph-eb48d0ef-3496-563c-b73d-661fb962013e-crash-compute-0[80150]: 2026-02-02T17:20:55.929+0000 7fdd0128a640 -1 monclient(hunting): handle_auth_bad_method server allowed_methods [2] but i only support [1]
Feb 02 17:20:55 compute-0 ceph-eb48d0ef-3496-563c-b73d-661fb962013e-crash-compute-0[80150]: 2026-02-02T17:20:55.929+0000 7fdd03515640 -1 monclient: authenticate NOTE: no keyring found; disabled cephx authentication
Feb 02 17:20:55 compute-0 ceph-eb48d0ef-3496-563c-b73d-661fb962013e-crash-compute-0[80150]: [errno 13] RADOS permission denied (error connecting to the cluster)
Feb 02 17:20:55 compute-0 ceph-eb48d0ef-3496-563c-b73d-661fb962013e-crash-compute-0[80150]: INFO:ceph-crash:monitoring path /var/lib/ceph/crash, delay 600s

● ceph-eb48d0ef-3496-563c-b73d-661fb962013e@mds.cephfs.compute-0.vvdoei.service - Ceph mds.cephfs.compute-0.vvdoei for eb48d0ef-3496-563c-b73d-661fb962013e
     Loaded: loaded (/etc/systemd/system/ceph-eb48d0ef-3496-563c-b73d-661fb962013e@.service; enabled; preset: disabled)
     Active: active (running) since Mon 2026-02-02 17:22:03 UTC; 51min ago
   Main PID: 95610 (conmon)
         IO: 0B read, 874.0K written
      Tasks: 31 (limit: 48560)
     Memory: 28.6M (peak: 29.4M)
        CPU: 4.991s
     CGroup: /system.slice/system-ceph\x2deb48d0ef\x2d3496\x2d563c\x2db73d\x2d661fb962013e.slice/ceph-eb48d0ef-3496-563c-b73d-661fb962013e@mds.cephfs.compute-0.vvdoei.service
             ├─libpod-payload-30ea8cb4e62f17278ebefb0cae478bc9b6467ba0762d054bfe95a0976430da56
             │ ├─95612 /run/podman-init -- /usr/bin/ceph-mds -n mds.cephfs.compute-0.vvdoei -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ └─95614 /usr/bin/ceph-mds -n mds.cephfs.compute-0.vvdoei -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             └─runtime
               └─95610 /usr/bin/conmon --api-version 1 -c 30ea8cb4e62f17278ebefb0cae478bc9b6467ba0762d054bfe95a0976430da56 -u 30ea8cb4e62f17278ebefb0cae478bc9b6467ba0762d054bfe95a0976430da56 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/30ea8cb4e62f17278ebefb0cae478bc9b6467ba0762d054bfe95a0976430da56/userdata -p /run/containers/storage/overlay-containers/30ea8cb4e62f17278ebefb0cae478bc9b6467ba0762d054bfe95a0976430da56/userdata/pidfile -n ceph-eb48d0ef-3496-563c-b73d-661fb962013e-mds-cephfs-compute-0-vvdoei --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/30ea8cb4e62f17278ebefb0cae478bc9b6467ba0762d054bfe95a0976430da56 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/30ea8cb4e62f17278ebefb0cae478bc9b6467ba0762d054bfe95a0976430da56/userdata/oci-log --conmon-pidfile /run/ceph-eb48d0ef-3496-563c-b73d-661fb962013e@mds.cephfs.compute-0.vvdoei.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 30ea8cb4e62f17278ebefb0cae478bc9b6467ba0762d054bfe95a0976430da56

Feb 02 18:12:49 compute-0 ceph-mds[95614]: mds.cephfs.compute-0.vvdoei asok_command: dump loads {prefix=dump loads} (starting...)
Feb 02 18:12:49 compute-0 ceph-mds[95614]: mds.cephfs.compute-0.vvdoei asok_command: dump tree {prefix=dump tree,root=/} (starting...)
Feb 02 18:12:49 compute-0 ceph-mds[95614]: mds.cephfs.compute-0.vvdoei asok_command: dump_blocked_ops {prefix=dump_blocked_ops} (starting...)
Feb 02 18:12:49 compute-0 ceph-mds[95614]: mds.cephfs.compute-0.vvdoei asok_command: dump_historic_ops {prefix=dump_historic_ops} (starting...)
Feb 02 18:12:49 compute-0 ceph-mds[95614]: mds.cephfs.compute-0.vvdoei asok_command: dump_historic_ops_by_duration {prefix=dump_historic_ops_by_duration} (starting...)
Feb 02 18:12:49 compute-0 ceph-mds[95614]: mds.cephfs.compute-0.vvdoei asok_command: dump_ops_in_flight {prefix=dump_ops_in_flight} (starting...)
Feb 02 18:12:50 compute-0 ceph-mds[95614]: mds.cephfs.compute-0.vvdoei asok_command: get subtrees {prefix=get subtrees} (starting...)
Feb 02 18:12:50 compute-0 ceph-mds[95614]: mds.cephfs.compute-0.vvdoei asok_command: ops {prefix=ops} (starting...)
Feb 02 18:12:50 compute-0 ceph-mds[95614]: mds.cephfs.compute-0.vvdoei asok_command: session ls {prefix=session ls} (starting...)
Feb 02 18:12:51 compute-0 ceph-mds[95614]: mds.cephfs.compute-0.vvdoei asok_command: status {prefix=status} (starting...)

● ceph-eb48d0ef-3496-563c-b73d-661fb962013e@mgr.compute-0.hccdnu.service - Ceph mgr.compute-0.hccdnu for eb48d0ef-3496-563c-b73d-661fb962013e
     Loaded: loaded (/etc/systemd/system/ceph-eb48d0ef-3496-563c-b73d-661fb962013e@.service; enabled; preset: disabled)
     Active: active (running) since Mon 2026-02-02 17:20:21 UTC; 53min ago
   Main PID: 75489 (conmon)
         IO: 0B read, 2.9M written
      Tasks: 144 (limit: 48560)
     Memory: 529.1M (peak: 530.0M)
        CPU: 56.081s
     CGroup: /system.slice/system-ceph\x2deb48d0ef\x2d3496\x2d563c\x2db73d\x2d661fb962013e.slice/ceph-eb48d0ef-3496-563c-b73d-661fb962013e@mgr.compute-0.hccdnu.service
             ├─libpod-payload-f51dea2484a885b1ef464f71470d5eb130f74e7dd5065bf2b6342dc1451e6a4c
             │ ├─75491 /run/podman-init -- /usr/bin/ceph-mgr -n mgr.compute-0.hccdnu -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ └─75493 /usr/bin/ceph-mgr -n mgr.compute-0.hccdnu -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             └─runtime
               └─75489 /usr/bin/conmon --api-version 1 -c f51dea2484a885b1ef464f71470d5eb130f74e7dd5065bf2b6342dc1451e6a4c -u f51dea2484a885b1ef464f71470d5eb130f74e7dd5065bf2b6342dc1451e6a4c -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/f51dea2484a885b1ef464f71470d5eb130f74e7dd5065bf2b6342dc1451e6a4c/userdata -p /run/containers/storage/overlay-containers/f51dea2484a885b1ef464f71470d5eb130f74e7dd5065bf2b6342dc1451e6a4c/userdata/pidfile -n ceph-eb48d0ef-3496-563c-b73d-661fb962013e-mgr-compute-0-hccdnu --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/f51dea2484a885b1ef464f71470d5eb130f74e7dd5065bf2b6342dc1451e6a4c --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/f51dea2484a885b1ef464f71470d5eb130f74e7dd5065bf2b6342dc1451e6a4c/userdata/oci-log --conmon-pidfile /run/ceph-eb48d0ef-3496-563c-b73d-661fb962013e@mgr.compute-0.hccdnu.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg f51dea2484a885b1ef464f71470d5eb130f74e7dd5065bf2b6342dc1451e6a4c

Feb 02 18:13:15 compute-0 ceph-mgr[75493]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Feb 02 18:13:15 compute-0 ceph-mgr[75493]: [pg_autoscaler INFO root] Pool 'default.rgw.meta' root_id -1 using 1.2718141564107572e-07 of space, bias 4.0, pg target 0.00015261769876929088 quantized to 32 (current 32)
Feb 02 18:13:16 compute-0 ceph-mgr[75493]: log_channel(cluster) log [DBG] : pgmap v2043: 305 pgs: 305 active+clean; 271 MiB data, 632 MiB used, 59 GiB / 60 GiB avail
Feb 02 18:13:17 compute-0 ceph-mgr[75493]: log_channel(audit) log [DBG] : from='client.19598 -' entity='client.admin' cmd=[{"prefix": "pg dump", "target": ["mon-mgr", ""]}]: dispatch
Feb 02 18:13:17 compute-0 ceph-mgr[75493]: log_channel(audit) log [DBG] : from='client.19600 -' entity='client.admin' cmd=[{"prefix": "pg stat", "target": ["mon-mgr", ""]}]: dispatch
Feb 02 18:13:18 compute-0 ceph-mgr[75493]: log_channel(cluster) log [DBG] : pgmap v2044: 305 pgs: 305 active+clean; 271 MiB data, 632 MiB used, 59 GiB / 60 GiB avail
Feb 02 18:13:20 compute-0 ceph-mgr[75493]: log_channel(cluster) log [DBG] : pgmap v2045: 305 pgs: 305 active+clean; 271 MiB data, 632 MiB used, 59 GiB / 60 GiB avail
Feb 02 18:13:20 compute-0 ceph-mgr[75493]: log_channel(audit) log [DBG] : from='client.19608 -' entity='client.admin' cmd=[{"prefix": "device ls", "target": ["mon-mgr", ""], "format": "json-pretty"}]: dispatch
Feb 02 18:13:22 compute-0 ceph-mgr[75493]: log_channel(cluster) log [DBG] : pgmap v2046: 305 pgs: 305 active+clean; 271 MiB data, 632 MiB used, 59 GiB / 60 GiB avail
Feb 02 18:13:23 compute-0 ceph-mgr[75493]: log_channel(audit) log [DBG] : from='client.19618 -' entity='client.admin' cmd=[{"prefix": "fs status", "target": ["mon-mgr", ""], "format": "json-pretty"}]: dispatch

● ceph-eb48d0ef-3496-563c-b73d-661fb962013e@mon.compute-0.service - Ceph mon.compute-0 for eb48d0ef-3496-563c-b73d-661fb962013e
     Loaded: loaded (/etc/systemd/system/ceph-eb48d0ef-3496-563c-b73d-661fb962013e@.service; enabled; preset: disabled)
     Active: active (running) since Mon 2026-02-02 17:20:18 UTC; 53min ago
   Main PID: 75193 (conmon)
         IO: 2.0M read, 417.8M written
      Tasks: 27 (limit: 48560)
     Memory: 104.1M (peak: 118.7M)
        CPU: 42.110s
     CGroup: /system.slice/system-ceph\x2deb48d0ef\x2d3496\x2d563c\x2db73d\x2d661fb962013e.slice/ceph-eb48d0ef-3496-563c-b73d-661fb962013e@mon.compute-0.service
             ├─libpod-payload-49cf601899989c75e64c17f569867f9dc2bb2a6ce1f21706d993305a3a0b4d26
             │ ├─75195 /run/podman-init -- /usr/bin/ceph-mon -n mon.compute-0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false
             │ └─75197 /usr/bin/ceph-mon -n mon.compute-0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false
             └─runtime
               └─75193 /usr/bin/conmon --api-version 1 -c 49cf601899989c75e64c17f569867f9dc2bb2a6ce1f21706d993305a3a0b4d26 -u 49cf601899989c75e64c17f569867f9dc2bb2a6ce1f21706d993305a3a0b4d26 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/49cf601899989c75e64c17f569867f9dc2bb2a6ce1f21706d993305a3a0b4d26/userdata -p /run/containers/storage/overlay-containers/49cf601899989c75e64c17f569867f9dc2bb2a6ce1f21706d993305a3a0b4d26/userdata/pidfile -n ceph-eb48d0ef-3496-563c-b73d-661fb962013e-mon-compute-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/49cf601899989c75e64c17f569867f9dc2bb2a6ce1f21706d993305a3a0b4d26 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/49cf601899989c75e64c17f569867f9dc2bb2a6ce1f21706d993305a3a0b4d26/userdata/oci-log --conmon-pidfile /run/ceph-eb48d0ef-3496-563c-b73d-661fb962013e@mon.compute-0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 49cf601899989c75e64c17f569867f9dc2bb2a6ce1f21706d993305a3a0b4d26

Feb 02 18:13:22 compute-0 ceph-mon[75197]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "fs dump", "format": "json-pretty"} v 0)
Feb 02 18:13:22 compute-0 ceph-mon[75197]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/1767462686' entity='client.admin' cmd={"prefix": "fs dump", "format": "json-pretty"} : dispatch
Feb 02 18:13:22 compute-0 ceph-mon[75197]: from='client.19608 -' entity='client.admin' cmd=[{"prefix": "device ls", "target": ["mon-mgr", ""], "format": "json-pretty"}]: dispatch
Feb 02 18:13:22 compute-0 ceph-mon[75197]: from='client.? 192.168.122.100:0/4176112843' entity='client.admin' cmd={"prefix": "df", "format": "json-pretty"} : dispatch
Feb 02 18:13:22 compute-0 ceph-mon[75197]: from='client.? 192.168.122.100:0/1767462686' entity='client.admin' cmd={"prefix": "fs dump", "format": "json-pretty"} : dispatch
Feb 02 18:13:22 compute-0 ceph-mon[75197]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "fs ls", "format": "json-pretty"} v 0)
Feb 02 18:13:22 compute-0 ceph-mon[75197]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/2436103772' entity='client.admin' cmd={"prefix": "fs ls", "format": "json-pretty"} : dispatch
Feb 02 18:13:23 compute-0 ceph-mon[75197]: pgmap v2046: 305 pgs: 305 active+clean; 271 MiB data, 632 MiB used, 59 GiB / 60 GiB avail
Feb 02 18:13:23 compute-0 ceph-mon[75197]: from='client.? 192.168.122.100:0/2436103772' entity='client.admin' cmd={"prefix": "fs ls", "format": "json-pretty"} : dispatch
Feb 02 18:13:23 compute-0 ceph-mon[75197]: from='client.19618 -' entity='client.admin' cmd=[{"prefix": "fs status", "target": ["mon-mgr", ""], "format": "json-pretty"}]: dispatch

● ceph-eb48d0ef-3496-563c-b73d-661fb962013e@osd.0.service - Ceph osd.0 for eb48d0ef-3496-563c-b73d-661fb962013e
     Loaded: loaded (/etc/systemd/system/ceph-eb48d0ef-3496-563c-b73d-661fb962013e@.service; enabled; preset: disabled)
     Active: active (running) since Mon 2026-02-02 17:21:15 UTC; 52min ago
   Main PID: 85692 (conmon)
         IO: 554.2M read, 8.4G written
      Tasks: 61 (limit: 48560)
     Memory: 1.0G (peak: 1.4G)
        CPU: 44.564s
     CGroup: /system.slice/system-ceph\x2deb48d0ef\x2d3496\x2d563c\x2db73d\x2d661fb962013e.slice/ceph-eb48d0ef-3496-563c-b73d-661fb962013e@osd.0.service
             ├─libpod-payload-5ec5d30977a06416493ca65ffb2bc1efb368bd71870a20ffcf4a99fc8c30655f
             │ ├─85694 /run/podman-init -- /usr/bin/ceph-osd -n osd.0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --osd-objectstore=bluestore
             │ └─85696 /usr/bin/ceph-osd -n osd.0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --osd-objectstore=bluestore
             └─runtime
               └─85692 /usr/bin/conmon --api-version 1 -c 5ec5d30977a06416493ca65ffb2bc1efb368bd71870a20ffcf4a99fc8c30655f -u 5ec5d30977a06416493ca65ffb2bc1efb368bd71870a20ffcf4a99fc8c30655f -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/5ec5d30977a06416493ca65ffb2bc1efb368bd71870a20ffcf4a99fc8c30655f/userdata -p /run/containers/storage/overlay-containers/5ec5d30977a06416493ca65ffb2bc1efb368bd71870a20ffcf4a99fc8c30655f/userdata/pidfile -n ceph-eb48d0ef-3496-563c-b73d-661fb962013e-osd-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/5ec5d30977a06416493ca65ffb2bc1efb368bd71870a20ffcf4a99fc8c30655f --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/5ec5d30977a06416493ca65ffb2bc1efb368bd71870a20ffcf4a99fc8c30655f/userdata/oci-log --conmon-pidfile /run/ceph-eb48d0ef-3496-563c-b73d-661fb962013e@osd.0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 5ec5d30977a06416493ca65ffb2bc1efb368bd71870a20ffcf4a99fc8c30655f

Feb 02 18:13:04 compute-0 ceph-osd[85696]: prioritycache tune_memory target: 4294967296 mapped: 240820224 unmapped: 27402240 heap: 268222464 old mem: 2845415832 new mem: 2845415832
Feb 02 18:13:04 compute-0 ceph-osd[85696]: monclient: tick
Feb 02 18:13:04 compute-0 ceph-osd[85696]: monclient: _check_auth_tickets
Feb 02 18:13:04 compute-0 ceph-osd[85696]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2026-02-02T18:12:32.847653+0000)
Feb 02 18:13:04 compute-0 ceph-osd[85696]: prioritycache tune_memory target: 4294967296 mapped: 240820224 unmapped: 27402240 heap: 268222464 old mem: 2845415832 new mem: 2845415832
Feb 02 18:13:04 compute-0 ceph-osd[85696]: monclient: tick
Feb 02 18:13:04 compute-0 ceph-osd[85696]: monclient: _check_auth_tickets
Feb 02 18:13:04 compute-0 ceph-osd[85696]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2026-02-02T18:12:33.847810+0000)
Feb 02 18:13:04 compute-0 ceph-osd[85696]: prioritycache tune_memory target: 4294967296 mapped: 240623616 unmapped: 27598848 heap: 268222464 old mem: 2845415832 new mem: 2845415832
Feb 02 18:13:04 compute-0 ceph-osd[85696]: do_command 'log dump' '{prefix=log dump}'

● ceph-eb48d0ef-3496-563c-b73d-661fb962013e@osd.1.service - Ceph osd.1 for eb48d0ef-3496-563c-b73d-661fb962013e
     Loaded: loaded (/etc/systemd/system/ceph-eb48d0ef-3496-563c-b73d-661fb962013e@.service; enabled; preset: disabled)
     Active: active (running) since Mon 2026-02-02 17:21:19 UTC; 52min ago
   Main PID: 86733 (conmon)
         IO: 600.5M read, 8.6G written
      Tasks: 61 (limit: 48560)
     Memory: 904.0M (peak: 1.1G)
        CPU: 44.652s
     CGroup: /system.slice/system-ceph\x2deb48d0ef\x2d3496\x2d563c\x2db73d\x2d661fb962013e.slice/ceph-eb48d0ef-3496-563c-b73d-661fb962013e@osd.1.service
             ├─libpod-payload-849770b4bec6f7bda5e71b5d2b63467f261481956bf4b714be6269cbddd8aa42
             │ ├─86735 /run/podman-init -- /usr/bin/ceph-osd -n osd.1 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --osd-objectstore=bluestore
             │ └─86737 /usr/bin/ceph-osd -n osd.1 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --osd-objectstore=bluestore
             └─runtime
               └─86733 /usr/bin/conmon --api-version 1 -c 849770b4bec6f7bda5e71b5d2b63467f261481956bf4b714be6269cbddd8aa42 -u 849770b4bec6f7bda5e71b5d2b63467f261481956bf4b714be6269cbddd8aa42 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/849770b4bec6f7bda5e71b5d2b63467f261481956bf4b714be6269cbddd8aa42/userdata -p /run/containers/storage/overlay-containers/849770b4bec6f7bda5e71b5d2b63467f261481956bf4b714be6269cbddd8aa42/userdata/pidfile -n ceph-eb48d0ef-3496-563c-b73d-661fb962013e-osd-1 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/849770b4bec6f7bda5e71b5d2b63467f261481956bf4b714be6269cbddd8aa42 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/849770b4bec6f7bda5e71b5d2b63467f261481956bf4b714be6269cbddd8aa42/userdata/oci-log --conmon-pidfile /run/ceph-eb48d0ef-3496-563c-b73d-661fb962013e@osd.1.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 849770b4bec6f7bda5e71b5d2b63467f261481956bf4b714be6269cbddd8aa42

Feb 02 18:12:59 compute-0 ceph-osd[86737]: monclient: tick
Feb 02 18:12:59 compute-0 ceph-osd[86737]: monclient: _check_auth_tickets
Feb 02 18:12:59 compute-0 ceph-osd[86737]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2026-02-02T18:12:28.105611+0000)
Feb 02 18:12:59 compute-0 ceph-osd[86737]: prioritycache tune_memory target: 4294967296 mapped: 209960960 unmapped: 53288960 heap: 263249920 old mem: 2845415832 new mem: 2845415832
Feb 02 18:12:59 compute-0 ceph-osd[86737]: osd.1 473 heartbeat osd_stat(store_statfs(0x4f2f45000/0x0/0x4ffc00000, data 0x463b8bf/0x48c7000, compress 0x0/0x0/0x0, omap 0x87879, meta 0x8368787), peers [0,2] op hist [])
Feb 02 18:12:59 compute-0 ceph-osd[86737]: monclient: tick
Feb 02 18:12:59 compute-0 ceph-osd[86737]: monclient: _check_auth_tickets
Feb 02 18:12:59 compute-0 ceph-osd[86737]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2026-02-02T18:12:29.105732+0000)
Feb 02 18:12:59 compute-0 ceph-osd[86737]: prioritycache tune_memory target: 4294967296 mapped: 210083840 unmapped: 53166080 heap: 263249920 old mem: 2845415832 new mem: 2845415832
Feb 02 18:12:59 compute-0 ceph-osd[86737]: do_command 'log dump' '{prefix=log dump}'

● ceph-eb48d0ef-3496-563c-b73d-661fb962013e@osd.2.service - Ceph osd.2 for eb48d0ef-3496-563c-b73d-661fb962013e
     Loaded: loaded (/etc/systemd/system/ceph-eb48d0ef-3496-563c-b73d-661fb962013e@.service; enabled; preset: disabled)
     Active: active (running) since Mon 2026-02-02 17:21:22 UTC; 52min ago
   Main PID: 87788 (conmon)
         IO: 561.6M read, 7.4G written
      Tasks: 61 (limit: 48560)
     Memory: 772.4M (peak: 931.0M)
        CPU: 38.494s
     CGroup: /system.slice/system-ceph\x2deb48d0ef\x2d3496\x2d563c\x2db73d\x2d661fb962013e.slice/ceph-eb48d0ef-3496-563c-b73d-661fb962013e@osd.2.service
             ├─libpod-payload-49ee9de1004adc227a28add932102ae6c092e83b89c3463cc66529c1092a3071
             │ ├─87790 /run/podman-init -- /usr/bin/ceph-osd -n osd.2 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --osd-objectstore=bluestore
             │ └─87792 /usr/bin/ceph-osd -n osd.2 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --osd-objectstore=bluestore
             └─runtime
               └─87788 /usr/bin/conmon --api-version 1 -c 49ee9de1004adc227a28add932102ae6c092e83b89c3463cc66529c1092a3071 -u 49ee9de1004adc227a28add932102ae6c092e83b89c3463cc66529c1092a3071 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/49ee9de1004adc227a28add932102ae6c092e83b89c3463cc66529c1092a3071/userdata -p /run/containers/storage/overlay-containers/49ee9de1004adc227a28add932102ae6c092e83b89c3463cc66529c1092a3071/userdata/pidfile -n ceph-eb48d0ef-3496-563c-b73d-661fb962013e-osd-2 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/49ee9de1004adc227a28add932102ae6c092e83b89c3463cc66529c1092a3071 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/49ee9de1004adc227a28add932102ae6c092e83b89c3463cc66529c1092a3071/userdata/oci-log --conmon-pidfile /run/ceph-eb48d0ef-3496-563c-b73d-661fb962013e@osd.2.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 49ee9de1004adc227a28add932102ae6c092e83b89c3463cc66529c1092a3071

Feb 02 18:12:55 compute-0 ceph-osd[87792]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2026-02-02T18:12:22.659325+0000)
Feb 02 18:12:55 compute-0 ceph-osd[87792]: prioritycache tune_memory target: 4294967296 mapped: 198189056 unmapped: 39600128 heap: 237789184 old mem: 2845415832 new mem: 2845415832
Feb 02 18:12:55 compute-0 ceph-osd[87792]: monclient: tick
Feb 02 18:12:55 compute-0 ceph-osd[87792]: monclient: _check_auth_tickets
Feb 02 18:12:55 compute-0 ceph-osd[87792]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2026-02-02T18:12:23.659468+0000)
Feb 02 18:12:55 compute-0 ceph-osd[87792]: prioritycache tune_memory target: 4294967296 mapped: 198344704 unmapped: 39444480 heap: 237789184 old mem: 2845415832 new mem: 2845415832
Feb 02 18:12:55 compute-0 ceph-osd[87792]: monclient: tick
Feb 02 18:12:55 compute-0 ceph-osd[87792]: monclient: _check_auth_tickets
Feb 02 18:12:55 compute-0 ceph-osd[87792]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2026-02-02T18:12:24.659633+0000)
Feb 02 18:12:55 compute-0 ceph-osd[87792]: do_command 'log dump' '{prefix=log dump}'

● ceph-eb48d0ef-3496-563c-b73d-661fb962013e@rgw.rgw.compute-0.molmny.service - Ceph rgw.rgw.compute-0.molmny for eb48d0ef-3496-563c-b73d-661fb962013e
     Loaded: loaded (/etc/systemd/system/ceph-eb48d0ef-3496-563c-b73d-661fb962013e@.service; enabled; preset: disabled)
     Active: active (running) since Mon 2026-02-02 17:22:02 UTC; 51min ago
   Main PID: 95125 (conmon)
         IO: 0B read, 174.5K written
      Tasks: 614 (limit: 48560)
     Memory: 106.8M (peak: 107.4M)
        CPU: 21.906s
     CGroup: /system.slice/system-ceph\x2deb48d0ef\x2d3496\x2d563c\x2db73d\x2d661fb962013e.slice/ceph-eb48d0ef-3496-563c-b73d-661fb962013e@rgw.rgw.compute-0.molmny.service
             ├─libpod-payload-31c23500b425f65c54ced9f2de36b162413b6fa358f90c128fa457114a0a693e
             │ ├─95127 /run/podman-init -- /usr/bin/radosgw -n client.rgw.rgw.compute-0.molmny -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --stop-timeout=120
             │ └─95129 /usr/bin/radosgw -n client.rgw.rgw.compute-0.molmny -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --stop-timeout=120
             └─runtime
               └─95125 /usr/bin/conmon --api-version 1 -c 31c23500b425f65c54ced9f2de36b162413b6fa358f90c128fa457114a0a693e -u 31c23500b425f65c54ced9f2de36b162413b6fa358f90c128fa457114a0a693e -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/31c23500b425f65c54ced9f2de36b162413b6fa358f90c128fa457114a0a693e/userdata -p /run/containers/storage/overlay-containers/31c23500b425f65c54ced9f2de36b162413b6fa358f90c128fa457114a0a693e/userdata/pidfile -n ceph-eb48d0ef-3496-563c-b73d-661fb962013e-rgw-rgw-compute-0-molmny --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/31c23500b425f65c54ced9f2de36b162413b6fa358f90c128fa457114a0a693e --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/31c23500b425f65c54ced9f2de36b162413b6fa358f90c128fa457114a0a693e/userdata/oci-log --conmon-pidfile /run/ceph-eb48d0ef-3496-563c-b73d-661fb962013e@rgw.rgw.compute-0.molmny.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 31c23500b425f65c54ced9f2de36b162413b6fa358f90c128fa457114a0a693e

Feb 02 17:22:02 compute-0 radosgw[95129]: framework conf key: endpoint, val: 192.168.122.100:8082
Feb 02 17:22:02 compute-0 radosgw[95129]: init_numa not setting numa affinity
Feb 02 17:22:11 compute-0 radosgw[95129]: v1 topic migration: starting v1 topic migration..
Feb 02 17:22:11 compute-0 radosgw[95129]: v1 topic migration: finished v1 topic migration
Feb 02 17:22:11 compute-0 radosgw[95129]: framework: beast
Feb 02 17:22:11 compute-0 radosgw[95129]: framework conf key: ssl_certificate, val: config://rgw/cert/$realm/$zone.crt
Feb 02 17:22:11 compute-0 radosgw[95129]: framework conf key: ssl_private_key, val: config://rgw/cert/$realm/$zone.key
Feb 02 17:22:11 compute-0 radosgw[95129]: starting handler: beast
Feb 02 17:22:11 compute-0 radosgw[95129]: set uid:gid to 167:167 (ceph:ceph)
Feb 02 17:22:11 compute-0 radosgw[95129]: mgrc service_daemon_register rgw.14256 metadata {arch=x86_64,ceph_release=tentacle,ceph_version=ceph version 20.2.0 (69f84cc2651aa259a15bc192ddaabd3baba07489) tentacle (stable - RelWithDebInfo),ceph_version_short=20.2.0,container_hostname=compute-0,container_image=quay.io/ceph/ceph@sha256:1228c3d05e45fbc068a8c33614e4409b6dac688bcc77369b06009b5830fa8d86,cpu=AMD EPYC-Rome Processor,distro=centos,distro_description=CentOS Stream 9,distro_version=9,frontend_config#0=beast endpoint=192.168.122.100:8082,frontend_type#0=beast,hostname=compute-0,id=rgw.compute-0.molmny,kernel_description=#1 SMP PREEMPT_DYNAMIC Thu Jan 22 12:30:22 UTC 2026,kernel_version=5.14.0-665.el9.x86_64,mem_swap_kb=1048572,mem_total_kb=7864288,num_handles=1,os=Linux,pid=2,realm_id=,realm_name=,zone_id=a7995faa-bb92-4914-a7cd-e36c1deac625,zone_name=default,zonegroup_id=ac71a82f-f3fa-4766-a3b5-5614c8c8b06a,zonegroup_name=default}

● ceph-osd-losetup-0.service - Ceph OSD losetup
     Loaded: loaded (/etc/systemd/system/ceph-osd-losetup-0.service; enabled; preset: disabled)
     Active: active (exited) since Mon 2026-02-02 17:19:34 UTC; 53min ago
   Main PID: 72538 (code=exited, status=0/SUCCESS)
        CPU: 15ms

Feb 02 17:19:34 compute-0 systemd[1]: Starting Ceph OSD losetup...
Feb 02 17:19:34 compute-0 bash[72539]: /dev/loop3: [64513]:4329560 (/var/lib/ceph-osd-0.img)
Feb 02 17:19:34 compute-0 systemd[1]: Finished Ceph OSD losetup.

● ceph-osd-losetup-1.service - Ceph OSD losetup
     Loaded: loaded (/etc/systemd/system/ceph-osd-losetup-1.service; enabled; preset: disabled)
     Active: active (exited) since Mon 2026-02-02 17:19:39 UTC; 53min ago
   Main PID: 72908 (code=exited, status=0/SUCCESS)
        CPU: 13ms

Feb 02 17:19:38 compute-0 systemd[1]: Starting Ceph OSD losetup...
Feb 02 17:19:38 compute-0 bash[72909]: /dev/loop4: [64513]:4599419 (/var/lib/ceph-osd-1.img)
Feb 02 17:19:39 compute-0 systemd[1]: Finished Ceph OSD losetup.

● ceph-osd-losetup-2.service - Ceph OSD losetup
     Loaded: loaded (/etc/systemd/system/ceph-osd-losetup-2.service; enabled; preset: disabled)
     Active: active (exited) since Mon 2026-02-02 17:19:42 UTC; 53min ago
   Main PID: 73276 (code=exited, status=0/SUCCESS)
        CPU: 17ms

Feb 02 17:19:42 compute-0 systemd[1]: Starting Ceph OSD losetup...
Feb 02 17:19:42 compute-0 bash[73277]: /dev/loop5: [64513]:4642264 (/var/lib/ceph-osd-2.img)
Feb 02 17:19:42 compute-0 systemd[1]: Finished Ceph OSD losetup.

● chronyd.service - NTP client/server
     Loaded: loaded (/usr/lib/systemd/system/chronyd.service; enabled; preset: enabled)
     Active: active (running) since Mon 2026-02-02 17:17:47 UTC; 55min ago
       Docs: man:chronyd(8)
             man:chrony.conf(5)
   Main PID: 58579 (chronyd)
         IO: 0B read, 0B written
      Tasks: 1 (limit: 48560)
     Memory: 1.0M (peak: 1.9M)
        CPU: 71ms
     CGroup: /system.slice/chronyd.service
             └─58579 /usr/sbin/chronyd -F 2

Feb 02 17:17:47 compute-0 systemd[1]: Starting NTP client/server...
Feb 02 17:17:47 compute-0 chronyd[58579]: chronyd version 4.8 starting (+CMDMON +REFCLOCK +RTC +PRIVDROP +SCFILTER +SIGND +NTS +SECHASH +IPV6 +DEBUG)
Feb 02 17:17:47 compute-0 chronyd[58579]: Frequency -28.210 +/- 0.279 ppm read from /var/lib/chrony/drift
Feb 02 17:17:47 compute-0 chronyd[58579]: Loaded seccomp filter (level 2)
Feb 02 17:17:47 compute-0 systemd[1]: Started NTP client/server.
Feb 02 17:19:57 compute-0 chronyd[58579]: Selected source 51.222.111.13 (pool.ntp.org)

● cloud-config.service - Cloud-init: Config Stage
     Loaded: loaded (/usr/lib/systemd/system/cloud-config.service; enabled; preset: disabled)
     Active: active (exited) since Mon 2026-02-02 16:46:07 UTC; 1h 27min ago
   Main PID: 1003 (code=exited, status=0/SUCCESS)
        CPU: 401ms

Feb 02 16:46:07 np0005605476.novalocal systemd[1]: Starting Cloud-init: Config Stage...
Feb 02 16:46:07 np0005605476.novalocal cloud-init[1141]: Cloud-init v. 24.4-8.el9 running 'modules:config' at Mon, 02 Feb 2026 16:46:07 +0000. Up 9.02 seconds.
Feb 02 16:46:07 np0005605476.novalocal systemd[1]: Finished Cloud-init: Config Stage.

● cloud-final.service - Cloud-init: Final Stage
     Loaded: loaded (/usr/lib/systemd/system/cloud-final.service; enabled; preset: disabled)
     Active: active (exited) since Mon 2026-02-02 16:46:08 UTC; 1h 27min ago
   Main PID: 1218 (code=exited, status=0/SUCCESS)
        CPU: 434ms

Feb 02 16:46:07 np0005605476.novalocal systemd[1]: Starting Cloud-init: Final Stage...
Feb 02 16:46:08 np0005605476.novalocal cloud-init[1328]: Cloud-init v. 24.4-8.el9 running 'modules:final' at Mon, 02 Feb 2026 16:46:08 +0000. Up 9.44 seconds.
Feb 02 16:46:08 np0005605476.novalocal cloud-init[1340]: -----BEGIN SSH HOST KEY FINGERPRINTS-----
Feb 02 16:46:08 np0005605476.novalocal cloud-init[1342]: 256 SHA256:N+FauEdItZPEKBlEfZFmQj9cKjHhFdMIWwZlpTfA7hc root@np0005605476.novalocal (ECDSA)
Feb 02 16:46:08 np0005605476.novalocal cloud-init[1344]: 256 SHA256:BjgVQizvlJkOSiPxwwyA9lp8VNT7m+4taF5VMhH9Lt8 root@np0005605476.novalocal (ED25519)
Feb 02 16:46:08 np0005605476.novalocal cloud-init[1346]: 3072 SHA256:dYVtDWrgLt5HdI+w+xjQ5VVxZf5AVpaNBr9c+g821ew root@np0005605476.novalocal (RSA)
Feb 02 16:46:08 np0005605476.novalocal cloud-init[1349]: -----END SSH HOST KEY FINGERPRINTS-----
Feb 02 16:46:08 np0005605476.novalocal cloud-init[1351]: #############################################################
Feb 02 16:46:08 np0005605476.novalocal cloud-init[1328]: Cloud-init v. 24.4-8.el9 finished at Mon, 02 Feb 2026 16:46:08 +0000. Datasource DataSourceConfigDrive [net,ver=2][source=/dev/sr0].  Up 9.60 seconds
Feb 02 16:46:08 np0005605476.novalocal systemd[1]: Finished Cloud-init: Final Stage.

● cloud-init-local.service - Cloud-init: Local Stage (pre-network)
     Loaded: loaded (/usr/lib/systemd/system/cloud-init-local.service; enabled; preset: disabled)
     Active: active (exited) since Mon 2026-02-02 16:46:05 UTC; 1h 27min ago
   Main PID: 779 (code=exited, status=0/SUCCESS)
        CPU: 715ms

Feb 02 16:46:03 localhost systemd[1]: Starting Cloud-init: Local Stage (pre-network)...
Feb 02 16:46:04 localhost cloud-init[841]: Cloud-init v. 24.4-8.el9 running 'init-local' at Mon, 02 Feb 2026 16:46:04 +0000. Up 6.16 seconds.
Feb 02 16:46:05 np0005605476.novalocal systemd[1]: Finished Cloud-init: Local Stage (pre-network).

● cloud-init.service - Cloud-init: Network Stage
     Loaded: loaded (/usr/lib/systemd/system/cloud-init.service; enabled; preset: disabled)
     Active: active (exited) since Mon 2026-02-02 16:46:07 UTC; 1h 27min ago
   Main PID: 888 (code=exited, status=0/SUCCESS)
        CPU: 1.378s

Feb 02 16:46:07 np0005605476.novalocal cloud-init[923]: |+.. .=   .    .. |
Feb 02 16:46:07 np0005605476.novalocal cloud-init[923]: |.*oo++.   .  o ..|
Feb 02 16:46:07 np0005605476.novalocal cloud-init[923]: |.o*+*o . .    + .|
Feb 02 16:46:07 np0005605476.novalocal cloud-init[923]: |o.+*.   S .  . . |
Feb 02 16:46:07 np0005605476.novalocal cloud-init[923]: |..  o  .   .. . .|
Feb 02 16:46:07 np0005605476.novalocal cloud-init[923]: |          ..o  o.|
Feb 02 16:46:07 np0005605476.novalocal cloud-init[923]: |         o.+.   E|
Feb 02 16:46:07 np0005605476.novalocal cloud-init[923]: |        o.oo..   |
Feb 02 16:46:07 np0005605476.novalocal cloud-init[923]: +----[SHA256]-----+
Feb 02 16:46:07 np0005605476.novalocal systemd[1]: Finished Cloud-init: Network Stage.

○ cpupower.service - Configure CPU power related settings
     Loaded: loadedUnit display-manager.service could not be found.
 (/usr/lib/systemd/system/cpupower.service; disabled; preset: disabled)
     Active: inactive (dead)

● crond.service - Command Scheduler
     Loaded: loaded (/usr/lib/systemd/system/crond.service; enabled; preset: enabled)
     Active: active (running) since Mon 2026-02-02 16:46:07 UTC; 1h 27min ago
   Main PID: 1010 (crond)
         IO: 168.0K read, 8.0K written
      Tasks: 2 (limit: 48560)
     Memory: 1.5M (peak: 4.7M)
        CPU: 139ms
     CGroup: /system.slice/crond.service
             ├─ 1010 /usr/sbin/crond -n
             └─29996 /usr/sbin/anacron -s

Feb 02 17:01:01 compute-0 anacron[29996]: Will run job `cron.monthly' in 81 min.
Feb 02 17:01:01 compute-0 anacron[29996]: Jobs will be executed sequentially
Feb 02 17:01:01 compute-0 run-parts[29998]: (/etc/cron.hourly) finished 0anacron
Feb 02 17:01:01 compute-0 CROND[29984]: (root) CMDEND (run-parts /etc/cron.hourly)
Feb 02 17:42:01 compute-0 anacron[29996]: Job `cron.daily' started
Feb 02 17:42:01 compute-0 anacron[29996]: Job `cron.daily' terminated
Feb 02 18:01:01 compute-0 CROND[272190]: (root) CMD (run-parts /etc/cron.hourly)
Feb 02 18:01:01 compute-0 CROND[272189]: (root) CMDEND (run-parts /etc/cron.hourly)
Feb 02 18:02:01 compute-0 anacron[29996]: Job `cron.weekly' started
Feb 02 18:02:01 compute-0 anacron[29996]: Job `cron.weekly' terminated

● dbus-broker.service - D-Bus System Message Bus
     Loaded: loaded (/usr/lib/systemd/system/dbus-broker.service; enabled; preset: enabled)
     Active: active (running) since Mon 2026-02-02 16:46:03 UTC; 1h 27min ago
TriggeredBy: ● dbus.socket
       Docs: man:dbus-broker-launch(1)
   Main PID: 770 (dbus-broker-lau)
         IO: 0B read, 0B written
      Tasks: 2 (limit: 48560)
     Memory: 2.9M (peak: 3.7M)
        CPU: 6.209s
     CGroup: /system.slice/dbus-broker.service
             ├─770 /usr/bin/dbus-broker-launch --scope system --audit
             └─777 dbus-broker --log 4 --controller 9 --machine-id bf0bc0bb03de29b24cba1cc9599cf5d0 --max-bytes 536870912 --max-fds 4096 --max-matches 131072 --audit

Feb 02 17:15:39 compute-0 dbus-broker-launch[770]: Noticed file-system modification, trigger reload.
Feb 02 17:15:39 compute-0 dbus-broker-launch[770]: Noticed file-system modification, trigger reload.
Feb 02 17:16:18 compute-0 dbus-broker-launch[777]: avc:  op=load_policy lsm=selinux seqno=7 res=1
Feb 02 17:16:25 compute-0 dbus-broker-launch[777]: avc:  op=load_policy lsm=selinux seqno=8 res=1
Feb 02 17:28:57 compute-0 dbus-broker-launch[777]: avc:  op=load_policy lsm=selinux seqno=9 res=1
Feb 02 17:32:16 compute-0 dbus-broker-launch[777]: avc:  op=load_policy lsm=selinux seqno=11 res=1
Feb 02 17:32:57 compute-0 dbus-broker-launch[770]: Noticed file-system modification, trigger reload.
Feb 02 17:32:57 compute-0 dbus-broker-launch[777]: avc:  op=load_policy lsm=selinux seqno=12 res=1
Feb 02 17:32:57 compute-0 dbus-broker-launch[770]: Noticed file-system modification, trigger reload.
Feb 02 17:34:15 compute-0 dbus-broker-launch[777]: avc:  op=load_policy lsm=selinux seqno=13 res=1

○ dm-event.service - Device-mapper event daemon
     Loaded: loaded (/usr/lib/systemd/system/dm-event.service; static)
     Active: inactive (dead)
TriggeredBy: ● dm-event.socket
       Docs: man:dmeventd(8)

○ dnf-makecache.service - dnf makecache
     Loaded: loaded (/usr/lib/systemd/system/dnf-makecache.service; static)
     Active: inactive (dead) since Mon 2026-02-02 17:15:51 UTC; 57min ago
TriggeredBy: ● dnf-makecache.timer
   Main PID: 44169 (code=exited, status=0/SUCCESS)
        CPU: 1.794s

Feb 02 17:15:50 compute-0 dnf[44169]: NFV SIG OpenvSwitch                              83 kB/s | 3.0 kB     00:00
Feb 02 17:15:50 compute-0 dnf[44169]: repo-setup-centos-appstream                      97 kB/s | 4.4 kB     00:00
Feb 02 17:15:50 compute-0 dnf[44169]: repo-setup-centos-baseos                         76 kB/s | 3.9 kB     00:00
Feb 02 17:15:50 compute-0 dnf[44169]: repo-setup-centos-highavailability              144 kB/s | 3.9 kB     00:00
Feb 02 17:15:50 compute-0 dnf[44169]: repo-setup-centos-powertools                    171 kB/s | 4.3 kB     00:00
Feb 02 17:15:51 compute-0 dnf[44169]: Extra Packages for Enterprise Linux 9 - x86_64  102 kB/s |  31 kB     00:00
Feb 02 17:15:51 compute-0 dnf[44169]: Metadata cache created.
Feb 02 17:15:51 compute-0 systemd[1]: dnf-makecache.service: Deactivated successfully.
Feb 02 17:15:51 compute-0 systemd[1]: Finished dnf makecache.
Feb 02 17:15:51 compute-0 systemd[1]: dnf-makecache.service: Consumed 1.794s CPU time.

○ dracut-cmdline.service - dracut cmdline hook
     Loaded: loaded (/usr/lib/systemd/system/dracut-cmdline.service; static)
     Active: inactive (dead) since Mon 2026-02-02 16:46:02 UTC; 1h 27min ago
   Duration: 1.699s
       Docs: man:dracut-cmdline.service(8)
             man:dracut.bootup(7)
   Main PID: 324 (code=exited, status=0/SUCCESS)
        CPU: 110ms

Feb 02 16:46:00 localhost systemd[1]: Starting dracut cmdline hook...
Feb 02 16:46:00 localhost dracut-cmdline[324]: dracut-9 dracut-057-102.git20250818.el9
Feb 02 16:46:00 localhost dracut-cmdline[324]: Using kernel command line parameters:    BOOT_IMAGE=(hd0,msdos1)/boot/vmlinuz-5.14.0-665.el9.x86_64 root=UUID=822f14ea-6e7e-41df-b0d8-fbe282d9ded8 ro console=ttyS0,115200n8 no_timer_check net.ifnames=0 crashkernel=1G-2G:192M,2G-64G:256M,64G-:512M
Feb 02 16:46:00 localhost systemd[1]: Finished dracut cmdline hook.
Feb 02 16:46:02 localhost systemd[1]: dracut-cmdline.service: Deactivated successfully.
Feb 02 16:46:02 localhost systemd[1]: Stopped dracut cmdline hook.

○ dracut-initqueue.service - dracut initqueue hook
     Loaded: loaded (/usr/lib/systemd/system/dracut-initqueue.service; static)
     Active: inactive (dead) since Mon 2026-02-02 16:46:01 UTC; 1h 27min ago
   Duration: 796ms
       Docs: man:dracut-initqueue.service(8)
             man:dracut.bootup(7)
   Main PID: 491 (code=exited, status=0/SUCCESS)
        CPU: 25ms

Feb 02 16:46:00 localhost systemd[1]: Starting dracut initqueue hook...
Feb 02 16:46:01 localhost systemd[1]: Finished dracut initqueue hook.
Feb 02 16:46:01 localhost systemd[1]: dracut-initqueue.service: Deactivated successfully.
Feb 02 16:46:01 localhost systemd[1]: Stopped dracut initqueue hook.

○ dracut-mount.service - dracut mount hook
     Loaded: loaded (/usr/lib/systemd/system/dracut-mount.service; static)
     Active: inactive (dead) since Mon 2026-02-02 16:46:01 UTC; 1h 27min ago
   Duration: 155ms
       Docs: man:dracut-mount.service(8)
             man:dracut.bootup(7)
   Main PID: 570 (code=exited, status=0/SUCCESS)
        CPU: 14ms

Feb 02 16:46:01 localhost systemd[1]: Starting dracut mount hook...
Feb 02 16:46:01 localhost systemd[1]: Finished dracut mount hook.
Feb 02 16:46:01 localhost systemd[1]: dracut-mount.service: Deactivated successfully.
Feb 02 16:46:01 localhost systemd[1]: Stopped dracut mount hook.

○ dracut-pre-mount.service - dracut pre-mount hook
     Loaded: loaded (/usr/lib/systemd/system/dracut-pre-mount.service; static)
     Active: inactive (dead) since Mon 2026-02-02 16:46:01 UTC; 1h 27min ago
   Duration: 750ms
       Docs: man:dracut-pre-mount.service(8)
             man:dracut.bootup(7)
   Main PID: 547 (code=exited, status=0/SUCCESS)
        CPU: 9ms

Feb 02 16:46:01 localhost systemd[1]: Starting dracut pre-mount hook...
Feb 02 16:46:01 localhost systemd[1]: Finished dracut pre-mount hook.
Feb 02 16:46:01 localhost systemd[1]: dracut-pre-mount.service: Deactivated successfully.
Feb 02 16:46:01 localhost systemd[1]: Stopped dracut pre-mount hook.

○ dracut-pre-pivot.service - dracut pre-pivot and cleanup hook
     Loaded: loaded (/usr/lib/systemd/system/dracut-pre-pivot.service; static)
     Active: inactive (dead) since Mon 2026-02-02 16:46:01 UTC; 1h 27min ago
   Duration: 42ms
       Docs: man:dracut-pre-pivot.service(8)
             man:dracut.bootup(7)
   Main PID: 577 (code=exited, status=0/SUCCESS)
        CPU: 78ms

Feb 02 16:46:01 localhost systemd[1]: Starting dracut pre-pivot and cleanup hook...
Feb 02 16:46:01 localhost systemd[1]: Finished dracut pre-pivot and cleanup hook.
Feb 02 16:46:01 localhost systemd[1]: dracut-pre-pivot.service: Deactivated successfully.
Feb 02 16:46:01 localhost systemd[1]: Stopped dracut pre-pivot and cleanup hook.

○ dracut-pre-trigger.service - dracut pre-trigger hook
     Loaded: loaded (/usr/lib/systemd/system/dracut-pre-trigger.service; static)
     Active: inactive (dead) since Mon 2026-02-02 16:46:02 UTC; 1h 27min ago
   Duration: 1.255s
       Docs: man:dracut-pre-trigger.service(8)
             man:dracut.bootup(7)
   Main PID: 464 (code=exited, status=0/SUCCESS)
        CPU: 18ms

Feb 02 16:46:00 localhost systemd[1]: Starting dracut pre-trigger hook...
Feb 02 16:46:00 localhost systemd[1]: Finished dracut pre-trigger hook.
Feb 02 16:46:02 localhost systemd[1]: dracut-pre-trigger.service: Deactivated successfully.
Feb 02 16:46:02 localhost systemd[1]: Stopped dracut pre-trigger hook.

○ dracut-pre-udev.service - dracut pre-udev hook
     Loaded: loaded (/usr/lib/systemd/system/dracut-pre-udev.service; static)
     Active: inactive (dead) since Mon 2026-02-02 16:46:02 UTC; 1h 27min ago
   Duration: 1.388s
       Docs: man:dracut-pre-udev.service(8)
             man:dracut.bootup(7)
   Main PID: 413 (code=exited, status=0/SUCCESS)
        CPU: 301ms

Feb 02 16:46:00 localhost systemd[1]: Starting dracut pre-udev hook...
Feb 02 16:46:00 localhost rpc.statd[441]: Version 2.5.4 starting
Feb 02 16:46:00 localhost rpc.statd[441]: Initializing NSM state
Feb 02 16:46:00 localhost rpc.idmapd[446]: Setting log level to 0
Feb 02 16:46:00 localhost systemd[1]: Finished dracut pre-udev hook.
Feb 02 16:46:01 localhost rpc.idmapd[446]: exiting on signal 15
Feb 02 16:46:02 localhost systemd[1]: dracut-pre-udev.service: Deactivated successfully.
Feb 02 16:46:02 localhost systemd[1]: Stopped dracut pre-udev hook.

○ dracut-shutdown-onfailure.service - Service executing upon dracut-shutdown failure to perform cleanup
     Loaded: loaded (/usr/lib/systemd/system/dracut-shutdown-onfailure.service; static)
     Active: inactive (dead)
       Docs: man:dracut-shutdown.service(8)

● dracut-shutdown.service - Restore /run/initramfs on shutdown
     Loaded: loaded (/usr/lib/systemd/system/dracut-shutdown.service; static)
     Active: active (exited) since Mon 2026-02-02 16:46:03 UTC; 1h 27min ago
       Docs: man:dracut-shutdown.service(8)
   Main PID: 781 (code=exited, status=0/SUCCESS)
        CPU: 3ms

Feb 02 16:46:03 localhost systemd[1]: Starting Restore /run/initramfs on shutdown...
Feb 02 16:46:03 localhost systemd[1]: Finished Restore /run/initramfs on shutdown.

● edpm-container-shutdown.service - EDPM Container Shutdown
     Loaded: loaded (/etc/systemd/system/edpm-container-shutdown.service; enabled; preset: enabled)
     Active: active (exited) since Mon 2026-02-02 17:18:12 UTC; 55min ago
       Docs: https://github.com/openstack-k8s-operators/docs
   Main PID: 61572 (code=exited, status=0/SUCCESS)
        CPU: 3ms

Feb 02 17:18:12 compute-0 systemd[1]: Starting EDPM Container Shutdown...
Feb 02 17:18:12 compute-0 systemd[1]: Finished EDPM Container Shutdown.

○ edpm_libvirt_guests.service - Suspend libvirt Guests in edpm
     Loaded: loaded (/etc/systemd/system/edpm_libvirt_guests.service; enabled; preset: disabled)
     Active: inactive (dead)
       Docs: man:libvirtd(8)
             https://libvirt.org

● edpm_nova_compute.service - nova_compute container
     Loaded: loaded (/etc/systemd/system/edpm_nova_compute.service; enabled; preset: disabled)
     Active: active (running) since Mon 2026-02-02 17:37:21 UTC; 36min ago
    Process: 239816 ExecStart=/usr/bin/podman start nova_compute (code=exited, status=0/SUCCESS)
   Main PID: 239846 (conmon)
         IO: 0B read, 84.5K written
      Tasks: 1 (limit: 48560)
     Memory: 680.0K (peak: 16.7M)
        CPU: 1.030s
     CGroup: /system.slice/edpm_nova_compute.service
             └─239846 /usr/bin/conmon --api-version 1 -c 17e8fd462ba5e72ae3430209748fcdbe5242ab00cbe0d2070f3bc042ab5b9681 -u 17e8fd462ba5e72ae3430209748fcdbe5242ab00cbe0d2070f3bc042ab5b9681 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/17e8fd462ba5e72ae3430209748fcdbe5242ab00cbe0d2070f3bc042ab5b9681/userdata -p /run/containers/storage/overlay-containers/17e8fd462ba5e72ae3430209748fcdbe5242ab00cbe0d2070f3bc042ab5b9681/userdata/pidfile -n nova_compute --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/17e8fd462ba5e72ae3430209748fcdbe5242ab00cbe0d2070f3bc042ab5b9681 --full-attach -s -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/17e8fd462ba5e72ae3430209748fcdbe5242ab00cbe0d2070f3bc042ab5b9681/userdata/oci-log --conmon-pidfile /run/nova_compute.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg 17e8fd462ba5e72ae3430209748fcdbe5242ab00cbe0d2070f3bc042ab5b9681

Feb 02 18:13:06 compute-0 nova_compute[239846]: 2026-02-02 18:13:06.046 239853 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 39 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263[00m
Feb 02 18:13:06 compute-0 nova_compute[239846]: 2026-02-02 18:13:06.242 239853 DEBUG oslo_service.periodic_task [None req-2829bb05-3ca0-460a-873d-6129b7c9c50b - - - - - -] Running periodic task ComputeManager._cleanup_incomplete_migrations run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210[00m
Feb 02 18:13:06 compute-0 nova_compute[239846]: 2026-02-02 18:13:06.242 239853 DEBUG nova.compute.manager [None req-2829bb05-3ca0-460a-873d-6129b7c9c50b - - - - - -] Cleaning up deleted instances with incomplete migration  _cleanup_incomplete_migrations /usr/lib/python3.9/site-packages/nova/compute/manager.py:11183[00m
Feb 02 18:13:11 compute-0 nova_compute[239846]: 2026-02-02 18:13:11.046 239853 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 39 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263[00m
Feb 02 18:13:14 compute-0 nova_compute[239846]: 2026-02-02 18:13:14.278 239853 DEBUG oslo_service.periodic_task [None req-2829bb05-3ca0-460a-873d-6129b7c9c50b - - - - - -] Running periodic task ComputeManager._run_pending_deletes run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210[00m
Feb 02 18:13:14 compute-0 nova_compute[239846]: 2026-02-02 18:13:14.279 239853 DEBUG nova.compute.manager [None req-2829bb05-3ca0-460a-873d-6129b7c9c50b - - - - - -] Cleaning up deleted instances _run_pending_deletes /usr/lib/python3.9/site-packages/nova/compute/manager.py:11145[00m
Feb 02 18:13:14 compute-0 nova_compute[239846]: 2026-02-02 18:13:14.305 239853 DEBUG nova.compute.manager [None req-2829bb05-3ca0-460a-873d-6129b7c9c50b - - - - - -] There are 0 instances to clean _run_pending_deletes /usr/lib/python3.9/site-packages/nova/compute/manager.py:11154[00m
Feb 02 18:13:16 compute-0 nova_compute[239846]: 2026-02-02 18:13:16.048 239853 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 39 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263[00m
Feb 02 18:13:16 compute-0 nova_compute[239846]: 2026-02-02 18:13:16.050 239853 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 39 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263[00m
Feb 02 18:13:21 compute-0 nova_compute[239846]: 2026-02-02 18:13:21.050 239853 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 39 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263[00m

● edpm_ovn_controller.service - ovn_controller container
     Loaded: loaded (/etc/systemd/system/edpm_ovn_controller.service; enabled; preset: disabled)
     Active: active (running) since Mon 2026-02-02 17:29:47 UTC; 43min ago
   Main PID: 146041 (conmon)
         IO: 0B read, 140.0K written
      Tasks: 1 (limit: 48560)
     Memory: 696.0K (peak: 18.2M)
        CPU: 211ms
     CGroup: /system.slice/edpm_ovn_controller.service
             └─146041 /usr/bin/conmon --api-version 1 -c 70e0d83cb45fbe649a29519f5074ad11df900a5702b7e7e666708ce90ca8d783 -u 70e0d83cb45fbe649a29519f5074ad11df900a5702b7e7e666708ce90ca8d783 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/70e0d83cb45fbe649a29519f5074ad11df900a5702b7e7e666708ce90ca8d783/userdata -p /run/containers/storage/overlay-containers/70e0d83cb45fbe649a29519f5074ad11df900a5702b7e7e666708ce90ca8d783/userdata/pidfile -n ovn_controller --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/70e0d83cb45fbe649a29519f5074ad11df900a5702b7e7e666708ce90ca8d783 --full-attach -s -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/70e0d83cb45fbe649a29519f5074ad11df900a5702b7e7e666708ce90ca8d783/userdata/oci-log --conmon-pidfile /run/ovn_controller.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg 70e0d83cb45fbe649a29519f5074ad11df900a5702b7e7e666708ce90ca8d783

Feb 02 18:00:48 compute-0 ovn_controller[146041]: 2026-02-02T18:00:48Z|00273|binding|INFO|Setting lport 75586f61-07ff-4cd0-9aa1-9845359a1fe6 up in Southbound
Feb 02 18:00:48 compute-0 ovn_controller[146041]: 2026-02-02T18:00:48Z|00274|binding|INFO|Releasing lport 28b2a3c2-071b-4b34-9bd5-287eeeabc012 from this chassis (sb_readonly=0)
Feb 02 18:00:52 compute-0 ovn_controller[146041]: 2026-02-02T18:00:52Z|00275|binding|INFO|Releasing lport 28b2a3c2-071b-4b34-9bd5-287eeeabc012 from this chassis (sb_readonly=0)
Feb 02 18:01:01 compute-0 ovn_controller[146041]: 2026-02-02T18:01:01Z|00067|pinctrl(ovn_pinctrl0)|INFO|DHCPOFFER fa:16:3e:30:d8:b7 10.100.0.7
Feb 02 18:01:01 compute-0 ovn_controller[146041]: 2026-02-02T18:01:01Z|00068|pinctrl(ovn_pinctrl0)|INFO|DHCPACK fa:16:3e:30:d8:b7 10.100.0.7
Feb 02 18:01:22 compute-0 ovn_controller[146041]: 2026-02-02T18:01:22Z|00276|memory_trim|INFO|Detected inactivity (last active 30015 ms ago): trimming memory
Feb 02 18:01:55 compute-0 ovn_controller[146041]: 2026-02-02T18:01:55Z|00277|binding|INFO|Releasing lport 75586f61-07ff-4cd0-9aa1-9845359a1fe6 from this chassis (sb_readonly=0)
Feb 02 18:01:55 compute-0 ovn_controller[146041]: 2026-02-02T18:01:55Z|00278|binding|INFO|Setting lport 75586f61-07ff-4cd0-9aa1-9845359a1fe6 down in Southbound
Feb 02 18:01:55 compute-0 ovn_controller[146041]: 2026-02-02T18:01:55Z|00279|binding|INFO|Removing iface tap75586f61-07 ovn-installed in OVS
Feb 02 18:02:35 compute-0 ovn_controller[146041]: 2026-02-02T18:02:35Z|00280|memory_trim|INFO|Detected inactivity (last active 30001 ms ago): trimming memory

● edpm_ovn_metadata_agent.service - ovn_metadata_agent container
     Loaded: loaded (/etc/systemd/system/edpm_ovn_metadata_agent.service; enabled; preset: disabled)
     Active: active (running) since Mon 2026-02-02 17:30:45 UTC; 42min ago
   Main PID: 155386 (conmon)
         IO: 48.0K read, 113.5K written
      Tasks: 1 (limit: 48560)
     Memory: 772.0K (peak: 18.6M)
        CPU: 382ms
     CGroup: /system.slice/edpm_ovn_metadata_agent.service
             └─155386 /usr/bin/conmon --api-version 1 -c 983aad36fbefc6eb42f7b2455e6339d70a90e29e2ce721c2d9ecdd2cd91b9e7b -u 983aad36fbefc6eb42f7b2455e6339d70a90e29e2ce721c2d9ecdd2cd91b9e7b -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/983aad36fbefc6eb42f7b2455e6339d70a90e29e2ce721c2d9ecdd2cd91b9e7b/userdata -p /run/containers/storage/overlay-containers/983aad36fbefc6eb42f7b2455e6339d70a90e29e2ce721c2d9ecdd2cd91b9e7b/userdata/pidfile -n ovn_metadata_agent --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/983aad36fbefc6eb42f7b2455e6339d70a90e29e2ce721c2d9ecdd2cd91b9e7b --full-attach -s -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/983aad36fbefc6eb42f7b2455e6339d70a90e29e2ce721c2d9ecdd2cd91b9e7b/userdata/oci-log --conmon-pidfile /run/ovn_metadata_agent.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg 983aad36fbefc6eb42f7b2455e6339d70a90e29e2ce721c2d9ecdd2cd91b9e7b

Feb 02 18:09:46 compute-0 ovn_metadata_agent[155386]: 2026-02-02 18:09:46.661 155391 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" "released" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: held 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423[00m
Feb 02 18:10:46 compute-0 ovn_metadata_agent[155386]: 2026-02-02 18:10:46.662 155391 DEBUG oslo_concurrency.lockutils [-] Acquiring lock "_check_child_processes" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404[00m
Feb 02 18:10:46 compute-0 ovn_metadata_agent[155386]: 2026-02-02 18:10:46.663 155391 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" acquired by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: waited 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409[00m
Feb 02 18:10:46 compute-0 ovn_metadata_agent[155386]: 2026-02-02 18:10:46.663 155391 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" "released" by "neutron.agent.linux.external_process.ProcUnit fcoe.service could not be found.
Unit hv_kvp_daemon.service could not be found.
essMonitor._check_child_processes" :: held 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423[00m
Feb 02 18:11:46 compute-0 ovn_metadata_agent[155386]: 2026-02-02 18:11:46.664 155391 DEBUG oslo_concurrency.lockutils [-] Acquiring lock "_check_child_processes" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404[00m
Feb 02 18:11:46 compute-0 ovn_metadata_agent[155386]: 2026-02-02 18:11:46.665 155391 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" acquired by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409[00m
Feb 02 18:11:46 compute-0 ovn_metadata_agent[155386]: 2026-02-02 18:11:46.665 155391 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" "released" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: held 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423[00m
Feb 02 18:12:46 compute-0 ovn_metadata_agent[155386]: 2026-02-02 18:12:46.666 155391 DEBUG oslo_concurrency.lockutils [-] Acquiring lock "_check_child_processes" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404[00m
Feb 02 18:12:46 compute-0 ovn_metadata_agent[155386]: 2026-02-02 18:12:46.666 155391 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" acquired by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409[00m
Feb 02 18:12:46 compute-0 ovn_metadata_agent[155386]: 2026-02-02 18:12:46.666 155391 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" "released" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: held 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423[00m

○ emergency.service - Emergency Shell
     Loaded: loaded (/usr/lib/systemd/system/emergency.service; static)
     Active: inactive (dead)
       Docs: man:sulogin(8)

● getty@tty1.service - Getty on tty1
     Loaded: loaded (/usr/lib/systemd/system/getty@.service; enabled; preset: enabled)
     Active: active (running) since Mon 2026-02-02 16:46:07 UTC; 1h 27min ago
       Docs: man:agetty(8)
             man:systemd-getty-generator(8)
             http://0pointer.de/blog/projects/serial-console.html
   Main PID: 1013 (agetty)
         IO: 0B read, 0B written
      Tasks: 1 (limit: 48560)
     Memory: 212.0K (peak: 432.0K)
        CPU: 8ms
     CGroup: /system.slice/system-getty.slice/getty@tty1.service
             └─1013 /sbin/agetty -o "-p -- \\u" --noclear - linux

Feb 02 16:46:07 np0005605476.novalocal systemd[1]: Started Getty on tty1.

● gssproxy.service - GSSAPI Proxy Daemon
     Loaded: loaded (/usr/lib/systemd/system/gssproxy.service; disabled; preset: disabled)
     Active: active (running) since Mon 2026-02-02 16:46:05 UTC; 1h 27min ago
   Main PID: 873 (gssproxy)
         IO: 0B read, 0B written
      Tasks: 6 (limit: 48560)
     Memory: 1.8M (peak: 3.4M)
        CPU: 15ms
     CGroup: /system.slice/gssproxy.service
             └─873 /usr/sbin/gssproxy -D

Feb 02 16:46:05 np0005605476.novalocal systemd[1]: Starting GSSAPI Proxy Daemon...
Feb 02 16:46:05 np0005605476.novalocal systemd[1]: Started GSSAPI Proxy Daemon.

○ import-state.service - Import network configuration from initramfs
     Loaded: loaded (/usr/lib/systemd/system/import-state.service; enabled; preset: enabled)
     Active: inactive (dead)

○ initrd-cleanup.service - Cleaning Up and Shutting Down Daemons
     Loaded: loaded (/usr/lib/systemd/system/initrd-cleanup.service; static)
     Active: inactive (dead) since Mon 2026-02-02 16:46:02 UTC; 1h 27min ago
   Main PID: 618 (code=exited, status=0/SUCCESS)
        CPU: 13ms

Feb 02 16:46:01 localhost systemd[1]: Starting Cleaning Up and Shutting Down Daemons...
Feb 02 16:46:02 localhost systemd[1]: initrd-cleanup.service: Deactivated successfully.
Feb 02 16:46:02 localhost systemd[1]: Finished Cleaning Up and Shutting Down Daemons.

○ initrd-parse-etc.service - Mountpoints Configured in the Real Root
     Loaded: loaded (/usr/lib/systemd/system/initrd-parse-etc.service; static)
     Active: inactive (dead) since Mon 2026-02-02 16:46:01 UTC; 1h 27min ago
   Main PID: 569 (code=exited, status=0/SUCCESS)
        CPU: 14ms

Feb 02 16:46:01 localhost systemd[1]: Starting Mountpoints Configured in the Real Root...
Feb 02 16:46:01 localhost systemd[1]: initrd-parse-etc.service: Deactivated successfully.
Feb 02 16:46:01 localhost systemd[1]: Finished Mountpoints Configured in the Real Root.

○ initrd-switch-root.service - Switch Root
     Loaded: loaded (/usr/lib/systemd/system/initrd-switch-root.service; static)
     Active: inactive (dead) since Mon 2026-02-02 16:46:02 UTC; 1h 27min ago
   Main PID: 623 (code=exited, status=0/SUCCESS)
        CPU: 4ms

Feb 02 16:46:02 localhost systemd[1]: Starting Switch Root...

○ initrd-udevadm-cleanup-db.service - Cleanup udev Database
     Loaded: loaded (/usr/lib/systemd/system/initrd-udevadm-cleanup-db.service; static)
     Active: inactive (dead) since Mon 2026-02-02 16:46:02 UTC; 1h 27min ago
   Main PID: 621 (code=exited, status=0/SUCCESS)
        CPU: 6ms

Feb 02 16:46:02 localhost systemd[1]: Starting Cleanup udev Database...
Feb 02 16:46:02 localhost systemd[1]: initrd-udevadm-cleanup-db.service: Deactivated successfully.
Feb 02 16:46:02 localhost systemd[1]: Finished Cleanup udev Database.

○ ip6tables.service - IPv6 firewall with ip6tables
     Loaded: loaded (/usr/lib/systemd/system/ip6tables.service; disabled; preset: disabled)
     Active: inactive (dead)

○ iptables.service - IPv4 firewall with iptables
     Loaded: loaded (/usr/lib/systemd/system/iptables.service; disabled; preset: disabled)
     Active: inactive (dead) since Mon 2026-02-02 17:18:19 UTC; 55min ago
   Duration: 32min 15.559s
   Main PID: 791 (code=exited, status=0/SUCCESS)
        CPU: 97ms

Feb 02 16:46:03 localhost systemd[1]: Starting IPv4 firewall with iptables...
Feb 02 16:46:03 localhost iptables.init[791]: iptables: Applying firewall rules: [  OK  ]
Feb 02 16:46:03 localhost systemd[1]: Finished IPv4 firewall with iptables.
Feb 02 17:18:19 compute-0 systemd[1]: Stopping IPv4 firewall with iptables...
Feb 02 17:18:19 compute-0 iptables.init[62820]: iptables: Setting chains to policy ACCEPT: raw mangle filter nat [  OK  ]
Feb 02 17:18:19 compute-0 iptables.init[62820]: iptables: Flushing firewall rules: [  OK  ]
Feb 02 17:18:19 compute-0 systemd[1]: iptables.service: Deactivated successfully.
Feb 02 17:18:19 compute-0 systemd[1]: Stopped IPv4 firewall with iptables.

● irqbalance.service - irqbalance daemon
     Loaded: loaded (/usr/lib/systemd/system/irqbalance.service; enabled; preset: enabled)
     Active: active (running) since Mon 2026-02-02 16:46:03 UTC; 1h 27min ago
       Docs: man:irqbalance(1)
             https://github.com/Irqbalance/irqbalance
   Main PID: 795 (irqbalance)
         IO: 0B read, 0B written
      Tasks: 2 (limit: 48560)
     Memory: 1.1M (peak: 1.5M)
        CPU: 345ms
     CGroup: /system.slice/irqbalance.service
             └─795 /usr/sbin/irqbalance

Feb 02 16:46:13 np0005605476.novalocal irqbalance[795]: Cannot change IRQ 32 affinity: Operation not permitted
Feb 02 16:46:13 np0005605476.novalocal irqbalance[795]: IRQ 32 affinity is now unmanaged
Feb 02 16:46:13 np0005605476.novalocal irqbalance[795]: Cannot change IRQ 30 affinity: Operation not permitted
Feb 02 16:46:13 np0005605476.novalocal irqbalance[795]: IRQ 30 affinity is now unmanaged
Feb 02 16:46:13 np0005605476.novalocal irqbalance[795]: Cannot change IRQ 29 affinity: Operation not permitted
Feb 02 16:46:13 np0005605476.novalocal irqbalance[795]: IRQ 29 affinity is now unmanaged
Feb 02 16:57:13 np0005605476.novalocal irqbalance[795]: Cannot change IRQ 27 affinity: Operation not permitted
Feb 02 16:57:13 np0005605476.novalocal irqbalance[795]: IRQ 27 affinity is now unmanaged
Feb 02 17:16:13 compute-0 irqbalance[795]: Cannot change IRQ 26 affinity: Operation not permitted
Feb 02 17:16:13 compute-0 irqbalance[795]: IRQ 26 affinity is now unmanaged

○ iscsi-init.service - One time configuration for iscsi.service
     Loaded: loaded (/usr/lib/systemd/system/iscsi-init.service; disabled; preset: disabled)
     Active: inactive (dead)
  Condition: start condition failed at Mon 2026-02-02 17:35:56 UTC; 37min ago

Feb 02 17:35:23 compute-0 systemd[1]: One time configuration for iscsi.service was skipped because of an unmet condition check (ConditionPathExists=!/etc/iscsi/initiatorname.iscsi).
Feb 02 17:35:56 compute-0 systemd[1]: One time configuration for iscsi.service was skipped because of an unmet condition check (ConditionPathExists=!/etc/iscsi/initiatorname.iscsi).

○ iscsi-onboot.service - Special handling of early boot iSCSI sessions
     Loaded: loaded (/usr/lib/systemd/system/iscsi-onboot.service; enabled; preset: enabled)
     Active: inactive (dead)
       Docs: man:iscsiadm(8)
             man:iscsid(8)

● iscsi-shutdown.service - Logout off all iSCSI sessions on shutdown
     Loaded: loaded (/usr/lib/systemd/system/iscsi-shutdown.service; static)
     Active: active (exited) since Mon 2026-02-02 17:35:23 UTC; 38min ago
       Docs: man:iscsid(8)
             man:iscsiadm(8)
   Main PID: 217895 (code=exited, status=0/SUCCESS)
        CPU: 2ms

Feb 02 17:35:23 compute-0 systemd[1]: Starting Logout off all iSCSI sessions on shutdown...
Feb 02 17:35:23 compute-0 systemd[1]: Finished Logout off all iSCSI sessions on shutdown.

○ iscsi-starter.service
     Loaded: loaded (/usr/lib/systemd/system/iscsi-starter.service; enabled; preset: disabled)
     Active: inactive (dead)

○ iscsi.service - Login and scanning of iSCSI devices
     Loaded: loaded (/usr/lib/systemd/system/iscsi.service; indirect; preset: enabled)
     Active: inactive (dead)
       Docs: man:iscsiadm(8)
             man:iscsid(8)

● iscsid.service - Open-iSCSI
     Loaded: loaded (/usr/lib/systemd/system/iscsid.service; enabled; preset: disabled)
     Active: active (running) since Mon 2026-02-02 17:35:56 UTC; 37min ago
TriggeredBy: ● iscsid.socket
       Docs: man:iscsid(8)
             man:iscsiuio(8)
             man:iscsiadm(8)
   Main PID: 224391 (iscsid)
     Status: "Ready to process requests"
         IO: 0B read, 0B written
      Tasks: 1 (limit: 48560)
     Memory: 1.9M (peak: 2.0M)
        CPU: 6ms
     CGroup: /system.slice/iscsid.service
             └─224391 /usr/sbin/iscsid -f

Feb 02 17:35:56 compute-0 systemd[1]: Starting Open-iSCSI...
Feb 02 17:35:56 compute-0 systemd[1]: Started Open-iSCSI.

○ iscsiuio.service - iSCSI UserSpace I/O driver
     Loaded: loaded (/usr/lib/systemd/system/iscsiuio.service; disabled; preset: disabled)
     Active: inactive (dead)
TriggeredBy: ○ iscsiuio.socket
       Docs: man:iscsiuio(8)

● kdump.service - Crash recovery kernel arming
     Loaded: loaded (/usr/lib/systemd/system/kdump.service; enabled; preset: enabled)
     Active: active (exited) since Mon 2026-02-02 16:46:17 UTC; 1h 27min ago
   Main PID: 1009 (code=exited, status=0/SUCCESS)
        CPU: 14.973s

Feb 02 16:46:16 np0005605476.novalocal dracut[1269]: Linked:         0 files
Feb 02 16:46:16 np0005605476.novalocal dracut[1269]: Compared:       0 xattrs
Feb 02 16:46:16 np0005605476.novalocal dracut[1269]: Compared:       0 files
Feb 02 16:46:16 np0005605476.novalocal dracut[1269]: Saved:          0 B
Feb 02 16:46:16 np0005605476.novalocal dracut[1269]: Duration:       0.000342 seconds
Feb 02 16:46:16 np0005605476.novalocal dracut[1269]: *** Hardlinking files done ***
Feb 02 16:46:17 np0005605476.novalocal dracut[1269]: *** Creating initramfs image file '/boot/initramfs-5.14.0-665.el9.x86_64kdump.img' done ***
Feb 02 16:46:17 np0005605476.novalocal kdumpctl[1019]: kdump: kexec: loaded kdump kernel
Feb 02 16:46:17 np0005605476.novalocal kdumpctl[1019]: kdump: Starting kdump: [OK]
Feb 02 16:46:17 np0005605476.novaloUnit lvm2-activation-early.service could not be found.
cal systemd[1]: Finished Crash recovery kernel arming.

● kmod-static-nodes.service - Create List of Static Device Nodes
     Loaded: loaded (/usr/lib/systemd/system/kmod-static-nodes.service; static)
     Active: active (exited) since Mon 2026-02-02 16:46:02 UTC; 1h 27min ago
   Main PID: 673 (code=exited, status=0/SUCCESS)
        CPU: 6ms

Feb 02 16:46:02 localhost systemd[1]: Finished Create List of Static Device Nodes.

● ldconfig.service - Rebuild Dynamic Linker Cache
     Loaded: loaded (/usr/lib/systemd/system/ldconfig.service; static)
     Active: active (exited) since Mon 2026-02-02 16:46:03 UTC; 1h 27min ago
       Docs: man:ldconfig(8)
   Main PID: 697 (code=exited, status=0/SUCCESS)
        CPU: 41ms

Feb 02 16:46:03 localhost systemd[1]: Starting Rebuild Dynamic Linker Cache...
Feb 02 16:46:03 localhost systemd[1]: Finished Rebuild Dynamic Linker Cache.

○ libvirtd.service
     Loaded: masked (Reason: Unit libvirtd.service is masked.)
     Active: inactive (dead)
TriggeredBy: ○ libvirtd-admin.socket
             ○ libvirtd-ro.socket
             ○ libvirtd.socket

○ loadmodules.service - Load legacy module configuration
     Loaded: loaded (/usr/lib/systemd/system/loadmodules.service; enabled; preset: enabled)
     Active: inactive (dead)

○ logrotate.service - Rotate log files
     Loaded: loaded (/usr/lib/systemd/system/logrotate.service; static)
     Active: inactive (dead)
TriggeredBy: ● logrotate.timer
       Docs: man:logrotate(8)
             man:logrotate.conf(5)

○ lvm2-lvmpolld.service - LVM2 poll daemon
     Loaded: loaded (/usr/lib/systemd/system/lvm2-lvmpolld.service; static)
     Active: inactive (dead)
TriggeredBy: ● lvm2-lvmpolld.socket
       Docs: man:lvmpolld(8)

● lvm2-monitor.service - Monitoring of LVM2 mirrors, snapshots etc. using dmeventd or progress polling
     Loaded: loaded (/usr/lib/systemd/system/lvm2-monitor.service; enabled; preset: enabled)
     Active: active (exited) since Mon 2026-02-02 17:14:16 UTC; 59min ago
       Docs: man:dmeventd(8)
             man:lvcreate(8)
             man:lvchange(8)
             man:vgchange(8)
   Main PID: 34048 (code=exited, status=0/SUCCESS)
        CPU: 17ms

Feb 02 17:14:16 compute-0 systemd[1]: Starting Monitoring of LVM2 mirrors, snapshots etc. using dmeventd or progress polling...
Feb 02 17:14:16 compute-0 systemd[1]: Finished Monitoring of LVM2 mirrors, snapshots etc. using dmeventd or progress polling.

○ mdmonitor.service - Software RAID monitoring and management
     Loaded: loaded (/usr/lib/systemd/system/mdmonitor.service; enabled; preset: enabled)
     Active: inactive (dead)

○ microcode.service - Load CPU microcode update
     Loaded: loaded (/usr/lib/systemd/system/microcode.service; enabled; preset: enabled)
     Active: inactive (dead)
  Condition: start condition failed at Mon 2026-02-02 16:46:03 UTC; 1h 27min ago

Feb 02 16:46:03 localhost systemd[1]: Load CPU microcode update was skipped because of an unmet condition check (ConditionPathExists=/sys/devices/system/cpu/microcode/reload).

○ modprobe@configfs.service - Load Kernel Module configfs
     Loaded: loaded (/usr/lib/systemd/system/modprobe@.service; static)
     Active: inactive (dead) since Mon 2026-02-02 16:46:03 UTC; 1h 27min ago
       Docs: man:modprobe(8)
   Main PID: 774 (code=exited, status=0/SUCCESS)
        CPU: 5ms

Feb 02 16:46:03 localhost systemd[1]: Starting Load Kernel Module configfs...
Feb 02 16:46:03 localhost systemd[1]: modprobe@configfs.service: Deactivated successfully.
Feb 02 16:46:03 localhost systemd[1]: Finished Load Kernel Module configfs.

○ modprobe@drm.service - Load Kernel Module drm
     Loaded: loaded (/usr/lib/systemd/system/modprobe@.service; static)
     Active: inactive (dead) since Mon 2026-02-02 16:46:02 UTC; 1h 27min ago
       Docs: man:modprobe(8)
   Main PID: 675 (code=exited, status=0/SUCCESS)
        CPU: 85ms

Feb 02 16:46:02 localhost systemd[1]: modprobe@drm.service: Deactivated successfully.
Feb 02 16:46:02 localhost systemd[1]: Finished Load Kernel Module drm.

○ modprobe@efi_pstore.service - Load Kernel Module efi_pstore
     Loaded: loaded (/usr/lib/systemd/system/modprobe@.service; static)
     Active: inactive (dead) since Mon 2026-02-02 16:46:02 UTC; 1h 27min ago
       Docs: man:modprobe(8)
   Main PID: 676 (code=exited, status=0/SUCCESS)
        CPU: 4ms

Feb 02 16:46:02 localhost systemd[1]: modprobe@efi_pstore.service: Deactivated successfully.
Feb 02 16:46:02 localhost systemd[1]: Finished Load Kernel Module efi_pstore.

○ modprobe@fuse.service - Load Kernel Module fuse
     Loaded: loaded (/usr/lib/systemd/system/modprobe@.service; static)
     Active: inactive (dead) since Mon 2026-02-02 16:46:02 UTC; 1h 27min ago
       Docs: man:modprobe(8)
   Main PID: 677 (code=exited, status=0/SUCCESS)
        CPU: 43ms

Feb 02 16:46:02 localhost systemd[1]: modprobe@fuse.service: Deactivated successfully.
Feb 02 16:46:02 localhost systemd[1]: Finished Load Kernel Module fuse.

● multipathd.service - Device-Mapper Multipath Device Controller
     Loaded: loaded (/usr/lib/systemd/system/multipathd.service; enabled; preset: enabled)
     Active: active (running) since Mon 2026-02-02 17:35:57 UTC; 37min ago
TriggeredBy: ● multipathd.socket
   Main PID: 224550 (multipathd)
     Status: "up"
         IO: 0B read, 0B written
      Tasks: 7
     Memory: 18.4M (peak: 18.8M)
        CPU: 320ms
     CGroup: /system.slice/multipathd.service
             └─224550 /sbin/multipathd -d -s

Feb 02 17:35:57 compute-0 systemd[1]: Starting Device-Mapper Multipath Device Controller...
Feb 02 17:35:57 compute-0 multipathd[224550]: --------start up--------
Feb 02 17:35:57 compute-0 multipathd[224550]: read /etc/multipath.conf
Feb 02 17:35:57 compute-0 multipathd[224550]: path checkers start up
Feb 02 17:35:57 compute-0 systemd[1]: Started Device-Mapper Multipath Device Controller.

○ netns-placeholder.service - Create netns directory
     Loaded: loaded (/etc/systemd/system/netns-placeholder.service; enabled; preset: enabled)
     Active: inactive (dead) since Mon 2026-02-02 17:30:23 UTC; 43min ago
   Main PID: 152551 (code=exited, status=0/SUCCESS)
        CPU: 7ms

Feb 02 17:30:23 compute-0 systemd[1]: Starting Create netns directory...
Feb 02 17:30:23 compute-0 systemd[1]: netns-placeholder.service: Deactivated successfully.
Feb 02 17:30:23 compute-0 systemd[1]: Finished Create netns directory.

○ network.service - LSB: Bring up/down networking
     Loaded: loaded (/etc/rc.d/init.d/network; generated)
     Active: inactive (dead)
       Docs: man:systemd-sysv-generator(8)

● NetworkManager-wait-online.service - Network Manager Wait Online
     Loaded: loaded (/usr/lib/systemd/system/NetworkManager-wait-online.service; enabled; preset: disabled)
     Active: active (exited) since Mon 2026-02-02 17:16:34 UTC; 56min ago
       Docs: man:NetworkManager-wait-online.service(8)
   Main PID: 49035 (code=exited, status=0/SUCCESS)
        CPU: 21ms

Feb 02 17:16:34 compute-0 systemd[1]: Starting Network Manager Wait Online...
Feb 02 17:16:34 compute-0 systemd[1]: Finished Network Manager Wait Online.

● NetworkManager.service - Network Manager
     Loaded: loaded (/usr/lib/systemd/system/NetworkManager.service; enabled; preset: enabled)
    Drop-In: /usr/lib/systemd/system/NetworkManager.service.d
             └─NetworkManager-ovs.conf
     Active: active (running) since Mon 2026-02-02 17:16:34 UTC; 56min ago
       Docs: man:NetworkManager(8)
   Main PID: 49022 (NetworkManager)
         IO: 104.0K read, 271.0K written
      Tasks: 3 (limit: 48560)
     Memory: 5.6M (peak: 6.4M)
        CPU: 29.364s
     CGroup: /system.slice/NetworkManager.service
             └─49022 /usr/sbin/NetworkManager --no-daemon

Feb 02 18:00:48 compute-0 NetworkManager[49022]: <info>  [1770055248.0394] manager: (tap75586f61-07): new Open vSwitch Port device (/org/freedesktop/NetworkManager/Devices/140)
Feb 02 18:00:48 compute-0 NetworkManager[49022]: <info>  [1770055248.7215] manager: (tap75586f61-07): new Tun device (/org/freedesktop/NetworkManager/Devices/141)
Feb 02 18:00:48 compute-0 NetworkManager[49022]: <info>  [Unit ntpd.service could not be found.
Unit ntpdate.service could not be found.
1770055248.7887] device (tap75586f61-07): state change: unmanaged -> unavailable (reason 'connection-assumed', managed-type: 'external')
Feb 02 18:00:48 compute-0 NetworkManager[49022]: <info>  [1770055248.7895] device (tap75586f61-07): state change: unavailable -> disconnected (reason 'none', managed-type: 'external')
Feb 02 18:00:48 compute-0 NetworkManager[49022]: <info>  [1770055248.8129] manager: (tap967fc097-50): new Veth device (/org/freedesktop/NetworkManager/Devices/142)
Feb 02 18:00:48 compute-0 NetworkManager[49022]: <info>  [1770055248.8548] device (tap967fc097-50): carrier: link connected
Feb 02 18:00:48 compute-0 NetworkManager[49022]: <info>  [1770055248.9860] manager: (tap967fc097-50): new Open vSwitch Port device (/org/freedesktop/NetworkManager/Devices/143)
Feb 02 18:00:52 compute-0 NetworkManager[49022]: <info>  [1770055252.5082] manager: (patch-provnet-84933c65-96ea-4900-b5b8-d0e3462e1415-to-br-int): new Open vSwitch Port device (/org/freedesktop/NetworkManager/Devices/144)
Feb 02 18:00:52 compute-0 NetworkManager[49022]: <info>  [1770055252.5087] manager: (patch-br-int-to-provnet-84933c65-96ea-4900-b5b8-d0e3462e1415): new Open vSwitch Port device (/org/freedesktop/NetworkManager/Devices/145)
Feb 02 18:01:55 compute-0 NetworkManager[49022]: <info>  [1770055315.4377] device (tap75586f61-07): state change: disconnected -> unmanaged (reason 'unmanaged', managed-type: 'removed')

○ nfs-idmapd.service - NFSv4 ID-name mapping service
     Loaded: loaded (/usr/lib/systemd/system/nfs-idmapd.service; static)
     Active: inactive (dead)
       Docs: man:idmapd(8)

○ nfs-mountd.service - NFS Mount Daemon
     Loaded: loaded (/usr/lib/systemd/system/nfs-mountd.service; static)
     Active: inactive (dead)
       Docs: man:rpc.mountd(8)

○ nfs-server.service - NFS server and services
     Loaded: loaded (/usr/lib/systemd/system/nfs-server.service; disabled; preset: disabled)
     Active: inactive (dead)
       Docs: man:rpc.nfsd(8)
             man:exportfs(8)

○ nfs-utils.service - NFS server and client services
     Loaded: loaded (/usr/lib/systemd/system/nfs-utils.service; static)
     Active: inactive (dead)

○ nfsdcld.service - NFSv4 Client Tracking Daemon
     Loaded: loaded (/usr/lib/systemd/system/nfsdcld.service; static)
     Active: inactive (dead)
       Docs: man:nfsdcld(8)

● nftables.service - Netfilter Tables
     Loaded: loaded (/usr/lib/systemd/system/nftables.service; enabled; preset: disabled)
     Active: active (exited) since Mon 2026-02-02 17:18:21 UTC; 55min ago
       Docs: man:nft(8)
   Main PID: 63210 (code=exited, status=0/SUCCESS)
        CPU: 16ms

Feb 02 17:18:21 compute-0 systemd[1]: Starting Netfilter Tables...
Feb 02 17:18:21 compute-0 systemd[1]: Finished Netfilter Tables.

● nis-domainname.service - Read and set NIS domainname from /etc/sysconfig/network
     Loaded: loaded (/usr/lib/systemd/system/nis-domainname.service; enabled; preset: enabled)
     Active: active (exited) since Mon 2026-02-02 16:46:02 UTC; 1h 27min ago
   Main PID: 678 (code=exited, status=0/SUCCESS)
        CPU: 5ms

Feb 02 16:46:02 localhost systemd[1]: Finished Read and set NIS domainname from /etc/sysconfig/network.

○ nvmefc-boot-connections.service - Auto-connect to subsystems on FC-NVME devices found during boot
     Loaded: loaded (/usr/lib/systemd/system/nvmefc-boot-connections.service; enabled; preset: enabled)
     Active: inactive (dead)

● openvswitch.service - Open vSwitch
     Loaded: loaded (/usr/lib/systemd/system/openvswitch.service; enabled; preset: disabled)
     Active: active (exited) since Mon 2026-02-02 17:16:21 UTC; 57min ago
   Main PID: 47326 (code=exited, status=0/SUCCESS)
        CPU: 1ms

Feb 02 17:16:21 compute-0 systemd[1]: Starting Open vSwitch...
Feb 02 17:16:21 compute-0 systemd[1]: Finished Open vSwitch.

● ovs-delete-transient-ports.service - Open vSwitch Delete Transient Ports
     Loaded: loaded (/usr/lib/systemd/system/ovs-delete-transient-ports.service; static)
     Active: active (exited) since Mon 2026-02-02 17:16:21 UTC; 57min ago
   Main PID: 4Unit pacemaker.service could not be found.
Unit plymouth-quit-wait.service could not be found.
Unit plymouth-start.service could not be found.
7264 (code=exited, status=0/SUCCESS)
        CPU: 24ms

Feb 02 17:16:21 compute-0 systemd[1]: Starting Open vSwitch Delete Transient Ports...
Feb 02 17:16:21 compute-0 systemd[1]: Finished Open vSwitch Delete Transient Ports.

● ovs-vswitchd.service - Open vSwitch Forwarding Unit
     Loaded: loaded (/usr/lib/systemd/system/ovs-vswitchd.service; static)
     Active: active (running) since Mon 2026-02-02 17:16:21 UTC; 57min ago
   Main PID: 47317 (ovs-vswitchd)
         IO: 3.4M read, 404.0K written
      Tasks: 13 (limit: 48560)
     Memory: 247.1M (peak: 249.5M)
        CPU: 12.345s
     CGroup: /system.slice/ovs-vswitchd.service
             └─47317 ovs-vswitchd unix:/var/run/openvswitch/db.sock -vconsole:emer -vsyslog:err -vfile:info --mlockall --user openvswitch:hugetlbfs --no-chdir --log-file=/var/log/openvswitch/ovs-vswitchd.log --pidfile=/var/run/openvswitch/ovs-vswitchd.pid --detach

Feb 02 17:16:21 compute-0 systemd[1]: Starting Open vSwitch Forwarding Unit...
Feb 02 17:16:21 compute-0 ovs-ctl[47307]: Inserting openvswitch module [  OK  ]
Feb 02 17:16:21 compute-0 ovs-ctl[47276]: Starting ovs-vswitchd [  OK  ]
Feb 02 17:16:21 compute-0 ovs-ctl[47276]: Enabling remote OVSDB managers [  OK  ]
Feb 02 17:16:21 compute-0 systemd[1]: Started Open vSwitch Forwarding Unit.
Feb 02 17:16:21 compute-0 ovs-vsctl[47325]: ovs|00001|vsctl|INFO|Called as ovs-vsctl --no-wait add Open_vSwitch . external-ids hostname=compute-0

● ovsdb-server.service - Open vSwitch Database Unit
     Loaded: loaded (/usr/lib/systemd/system/ovsdb-server.service; static)
     Active: active (running) since Mon 2026-02-02 17:16:21 UTC; 57min ago
   Main PID: 47236 (ovsdb-server)
         IO: 1.2M read, 691.5K written
      Tasks: 1 (limit: 48560)
     Memory: 5.0M (peak: 39.7M)
        CPU: 12.029s
     CGroup: /system.slice/ovsdb-server.service
             └─47236 ovsdb-server /etc/openvswitch/conf.db -vconsole:emer -vsyslog:err -vfile:info --remote=punix:/var/run/openvswitch/db.sock --private-key=db:Open_vSwitch,SSL,private_key --certificate=db:Open_vSwitch,SSL,certificate --bootstrap-ca-cert=db:Open_vSwitch,SSL,ca_cert --user openvswitch:hugetlbfs --no-chdir --log-file=/var/log/openvswitch/ovsdb-server.log --pidfile=/var/run/openvswitch/ovsdb-server.pid --detach

Feb 02 17:16:21 compute-0 chown[47183]: /usr/bin/chown: cannot access '/run/openvswitch': No such file or directory
Feb 02 17:16:21 compute-0 ovs-ctl[47188]: /etc/openvswitch/conf.db does not exist ... (warning).
Feb 02 17:16:21 compute-0 ovs-ctl[47188]: Creating empty database /etc/openvswitch/conf.db [  OK  ]
Feb 02 17:16:21 compute-0 ovs-ctl[47188]: Starting ovsdb-server [  OK  ]
Feb 02 17:16:21 compute-0 ovs-vsctl[47237]: ovs|00001|vsctl|INFO|Called as ovs-vsctl --no-wait -- init -- set Open_vSwitch . db-version=8.5.1
Feb 02 17:16:21 compute-0 ovs-vsctl[47257]: ovs|00001|vsctl|INFO|Called as ovs-vsctl --no-wait set Open_vSwitch . ovs-version=3.3.5-115.el9s "external-ids:system-id=\"13051b64-c07e-4136-ad5c-993d3a84d93c\"" "external-ids:rundir=\"/var/run/openvswitch\"" "system-type=\"centos\"" "system-version=\"9\""
Feb 02 17:16:21 compute-0 ovs-ctl[47188]: Configuring Open vSwitch system IDs [  OK  ]
Feb 02 17:16:21 compute-0 ovs-ctl[47188]: Enabling remote OVSDB managers [  OK  ]
Feb 02 17:16:21 compute-0 ovs-vsctl[47263]: ovs|00001|vsctl|INFO|Called as ovs-vsctl --no-wait add Open_vSwitch . external-ids hostname=compute-0
Feb 02 17:16:21 compute-0 systemd[1]: Started Open vSwitch Database Unit.

● polkit.service - Authorization Manager
     Loaded: loaded (/usr/lib/systemd/system/polkit.service; static)
     Active: active (running) since Mon 2026-02-02 17:15:43 UTC; 57min ago
       Docs: man:polkit(8)
   Main PID: 43458 (polkitd)
         IO: 19.2M read, 0B written
      Tasks: 12 (limit: 48560)
     Memory: 25.1M (peak: 26.4M)
        CPU: 1.818s
     CGroup: /system.slice/polkit.service
             └─43458 /usr/lib/polkit-1/polkitd --no-debug

Feb 02 17:33:00 compute-0 polkitd[43458]: Collecting garbage unconditionally...
Feb 02 17:33:00 compute-0 polkitd[4345Unit power-profiles-daemon.service could not be found.
Unit rpc-svcgssd.service could not be found.
8]: Loading rules from directory /etc/polkit-1/rules.d
Feb 02 17:33:00 compute-0 polkitd[43458]: Loading rules from directory /usr/share/polkit-1/rules.d
Feb 02 17:33:00 compute-0 polkitd[43458]: Finished loading, compiling and executing 3 rules
Feb 02 17:34:30 compute-0 polkitd[43458]: Registered Authentication Agent for unix-process:209351:291214 (system bus name :1.2571 [pkttyagent --process 209351 --notify-fd 4 --fallback], object path /org/freedesktop/PolicyKit1/AuthenticationAgent, locale en_US.UTF-8)
Feb 02 17:34:30 compute-0 polkitd[43458]: Unregistered Authentication Agent for unix-process:209351:291214 (system bus name :1.2571, object path /org/freedesktop/PolicyKit1/AuthenticationAgent, locale en_US.UTF-8) (disconnected from bus)
Feb 02 17:34:30 compute-0 polkitd[43458]: Registered Authentication Agent for unix-process:209350:291214 (system bus name :1.2572 [pkttyagent --process 209350 --notify-fd 4 --fallback], object path /org/freedesktop/PolicyKit1/AuthenticationAgent, locale en_US.UTF-8)
Feb 02 17:34:30 compute-0 polkitd[43458]: Unregistered Authentication Agent for unix-process:209350:291214 (system bus name :1.2572, object path /org/freedesktop/PolicyKit1/AuthenticationAgent, locale en_US.UTF-8) (disconnected from bus)
Feb 02 17:34:32 compute-0 polkitd[43458]: Registered Authentication Agent for unix-process:209817:291401 (system bus name :1.2575 [pkttyagent --process 209817 --notify-fd 4 --fallback], object path /org/freedesktop/PolicyKit1/AuthenticationAgent, locale en_US.UTF-8)
Feb 02 17:34:32 compute-0 polkitd[43458]: Unregistered Authentication Agent for unix-process:209817:291401 (system bus name :1.2575, object path /org/freedesktop/PolicyKit1/AuthenticationAgent, locale en_US.UTF-8) (disconnected from bus)

○ raid-check.service - RAID setup health check
     Loaded: loaded (/usr/lib/systemd/system/raid-check.service; static)
     Active: inactive (dead)
TriggeredBy: ○ raid-check.timer

○ rbdmap.service - Map RBD devices
     Loaded: loaded (/usr/lib/systemd/system/rbdmap.service; disabled; preset: disabled)
     Active: inactive (dead)

○ rc-local.service - /etc/rc.d/rc.local Compatibility
     Loaded: loaded (/usr/lib/systemd/system/rc-local.service; static)
     Active: inactive (dead)
       Docs: man:systemd-rc-local-generator(8)

○ rescue.service - Rescue Shell
     Loaded: loaded (/usr/lib/systemd/system/rescue.service; static)
     Active: inactive (dead)
       Docs: man:sulogin(8)

○ rpc-gssd.service - RPC security service for NFS client and server
     Loaded: loaded (/usr/lib/systemd/system/rpc-gssd.service; static)
     Active: inactive (dead)
  Condition: start condition failed at Mon 2026-02-02 16:46:05 UTC; 1h 27min ago
       Docs: man:rpc.gssd(8)

Feb 02 16:46:05 np0005605476.novalocal systemd[1]: RPC security service for NFS client and server was skipped because of an unmet condition check (ConditionPathExists=/etc/krb5.keytab).

● rpc-statd-notify.service - Notify NFS peers of a restart
     Loaded: loaded (/usr/lib/systemd/system/rpc-statd-notify.service; static)
     Active: active (exited) since Mon 2026-02-02 16:46:07 UTC; 1h 27min ago
       Docs: man:sm-notify(8)
             man:rpc.statd(8)
        CPU: 6ms

Feb 02 16:46:07 np0005605476.novalocal systemd[1]: Starting Notify NFS peers of a restart...
Feb 02 16:46:07 np0005605476.novalocal sm-notify[1005]: Version 2.5.4 starting
Feb 02 16:46:07 np0005605476.novalocal systemd[1]: Started Notify NFS peers of a restart.

○ rpc-statd.service - NFS status monitor for NFSv2/3 locking.
     Loaded: loaded (/usr/lib/systemd/system/rpc-statd.service; static)
     Active: inactive (dead)
       Docs: man:rpc.statd(8)

● rpcbind.service - RPC Bind
     Loaded: loaded (/usr/lib/systemd/system/rpcbind.service; enabled; preset: enabled)
     Active: active (running) since Mon 2026-02-02 16:46:03 UTC; 1h 27min ago
TriggeredBy: ● rpcbind.socket
       Docs: man:rpcbind(8)
   Main PID: 702 (rpcbind)
         IO: 0B read, 0B written
      Tasks: 1 (limit: 48560)
     Memory: 2.5M (peak: 2.8M)
        CPU: 53ms
     CGroUnit sntp.service could not be found.
Unit sshd-keygen.service could not be found.
up: /system.slice/rpcbind.service
             └─702 /usr/bin/rpcbind -w -f

Feb 02 16:46:03 localhost systemd[1]: Starting RPC Bind...
Feb 02 16:46:03 localhost systemd[1]: Started RPC Bind.

● rsyslog.service - System Logging Service
     Loaded: loaded (/usr/lib/systemd/system/rsyslog.service; enabled; preset: enabled)
     Active: active (running) since Mon 2026-02-02 16:46:07 UTC; 1h 27min ago
       Docs: man:rsyslogd(8)
             https://www.rsyslog.com/doc/
   Main PID: 1006 (rsyslogd)
         IO: 0B read, 22.6M written
      Tasks: 3 (limit: 48560)
     Memory: 23.9M (peak: 24.4M)
        CPU: 13.999s
     CGroup: /system.slice/rsyslog.service
             └─1006 /usr/sbin/rsyslogd -n

Feb 02 18:03:17 compute-0 rsyslogd[1006]: imjournal: journal files changed, reloading...  [v8.2510.0-2.el9 try https://www.rsyslog.com/e/0 ]
Feb 02 18:03:17 compute-0 rsyslogd[1006]: imjournal: journal files changed, reloading...  [v8.2510.0-2.el9 try https://www.rsyslog.com/e/0 ]
Feb 02 18:03:21 compute-0 rsyslogd[1006]: imjournal from <np0005605476:ceph-osd>: begin to drop messages due to rate-limiting
Feb 02 18:03:21 compute-0 rsyslogd[1006]: imjournal: journal files changed, reloading...  [v8.2510.0-2.el9 try https://www.rsyslog.com/e/0 ]
Feb 02 18:03:26 compute-0 rsyslogd[1006]: imjournal: journal files changed, reloading...  [v8.2510.0-2.el9 try https://www.rsyslog.com/e/0 ]
Feb 02 18:07:12 compute-0 rsyslogd[1006]: imjournal: 20473 messages lost due to rate-limiting (20000 allowed within 600 seconds)
Feb 02 18:12:55 compute-0 rsyslogd[1006]: imjournal: journal files changed, reloading...  [v8.2510.0-2.el9 try https://www.rsyslog.com/e/0 ]
Feb 02 18:13:00 compute-0 rsyslogd[1006]: imjournal: journal files changed, reloading...  [v8.2510.0-2.el9 try https://www.rsyslog.com/e/0 ]
Feb 02 18:13:00 compute-0 rsyslogd[1006]: imjournal from <np0005605476:ceph-osd>: begin to drop messages due to rate-limiting
Feb 02 18:13:04 compute-0 rsyslogd[1006]: imjournal: journal files changed, reloading...  [v8.2510.0-2.el9 try https://www.rsyslog.com/e/0 ]

○ selinux-autorelabel-mark.service - Mark the need to relabel after reboot
     Loaded: loaded (/usr/lib/systemd/system/selinux-autorelabel-mark.service; enabled; preset: enabled)
     Active: inactive (dead)
  Condition: start condition failed at Mon 2026-02-02 16:46:03 UTC; 1h 27min ago

Feb 02 16:46:03 localhost systemd[1]: Mark the need to relabel after reboot was skipped because of an unmet condition check (ConditionSecurity=!selinux).

● serial-getty@ttyS0.service - Serial Getty on ttyS0
     Loaded: loaded (/usr/lib/systemd/system/serial-getty@.service; enabled-runtime; preset: disabled)
     Active: active (running) since Mon 2026-02-02 16:46:07 UTC; 1h 27min ago
       Docs: man:agetty(8)
             man:systemd-getty-generator(8)
             http://0pointer.de/blog/projects/serial-console.html
   Main PID: 1014 (agetty)
         IO: 0B read, 0B written
      Tasks: 1 (limit: 48560)
     Memory: 300.0K (peak: 544.0K)
        CPU: 9ms
     CGroup: /system.slice/system-serial\x2dgetty.slice/serial-getty@ttyS0.service
             └─1014 /sbin/agetty -o "-p -- \\u" --keep-baud 115200,57600,38400,9600 - vt220

Feb 02 16:46:07 np0005605476.novalocal systemd[1]: Started Serial Getty on ttyS0.

○ sshd-keygen@ecdsa.service - OpenSSH ecdsa Server Key Generation
     Loaded: loaded (/usr/lib/systemd/system/sshd-keygen@.service; disabled; preset: disabled)
    Drop-In: /usr/lib/systemd/system/sshd-keygen@.service.d
             └─disable-sshd-keygen-if-cloud-init-active.conf
     Active: inactive (dead)
  Condition: start condition failed at Mon 2026-02-02 17:33:04 UTC; 40min ago

Feb 02 16:46:03 localhost systemd[1]: OpenSSH ecdsa Server Key Generation was skipped because of an unmet condition check (ConditionPathExists=!/run/systemd/generator.early/multi-user.target.wants/cloud-init.target).
Feb 02 17:33:04 compute-0 systemd[1]: OpenSSH ecdsa Server Key Generation was skipped because of an unmet condition check (ConditionPathExists=!/run/systemd/generator.early/multi-user.target.wants/cloud-init.target).

○ sshd-keygen@ed25519.service - OpenSSH ed25519 Server Key Generation
     Loaded: loaded (/usr/lib/systemd/system/sshd-keygen@.service; disabled; preset: disabled)
    Drop-In: /usr/lib/systemd/system/sshd-keygen@.service.d
             └─disable-sshd-keygen-if-cloud-init-active.conf
     Active: inactive (dead)
  Condition: start condition failed at Mon 2026-02-02 17:33:04 UTC; 40min ago

Feb 02 16:46:03 localhost systemd[1]: OpenSSH ed25519 Server Key Generation was skipped because of an unmet condition check (ConditionPathExists=!/run/systemd/generator.early/multi-user.target.wants/cloud-init.target).
Feb 02 17:33:04 compute-0 systemd[1]: OpenSSH ed25519 Server Key Generation was skipped because of an unmet condition check (ConditionPathExists=!/run/systemd/generator.early/multi-user.target.wants/cloud-init.target).

○ sshd-keygen@rsa.service - OpenSSH rsa Server Key Generation
     Loaded: loaded (/usr/lib/systemd/system/sshd-keygen@.service; disabled; preset: disabled)
    Drop-In: /usr/lib/systemd/system/sshd-keygen@.service.d
             └─disable-sshd-keygen-if-cloud-init-active.conf
     Active: inactive (dead)
  Condition: start condition failed at Mon 2026-02-02 17:33:04 UTC; 40min ago

Feb 02 16:46:03 localhost systemd[1]: OpenSSH rsa Server Key Generation was skipped because of an unmet condition check (ConditionPathExists=!/run/systemd/generator.early/multi-user.target.wants/cloud-init.target).
Feb 02 17:33:04 compute-0 systemd[1]: OpenSSH rsa Server Key Generation was skipped because of an unmet condition check (ConditionPathExists=!/run/systemd/generator.early/multi-user.target.wants/cloud-init.target).

● sshd.service - OpenSSH server daemon
     Loaded: loaded (/usr/lib/systemd/system/sshd.service; enabled; preset: enabled)
     Active: active (running) since Mon 2026-02-02 17:33:04 UTC; 40min ago
       Docs: man:sshd(8)
             man:sshd_config(5)
   Main PID: 182591 (sshd)
         IO: 588.0K read, 0B written
      Tasks: 1 (limit: 48560)
     Memory: 4.0M (peak: 6.9M)
        CPU: 572ms
     CGroup: /system.slice/sshd.service
             └─182591 "sshd: /usr/sbin/sshd -D [listener] 0 of 10-100 startups"

Feb 02 18:04:24 compute-0 sshd-session[281431]: Accepted publickey for zuul from 192.168.122.10 port 40758 ssh2: ECDSA SHA256:zZuZTxOZQINmXTIp/w3ufHnLDl00NSjTOoCjsOHVPLY
Feb 02 18:04:24 compute-0 sshd-session[281431]: pam_unix(sshd:session): session opened for user zuul(uid=1000) by zuul(uid=0)
Feb 02 18:04:24 compute-0 sshd-session[281431]: pam_unix(sshd:session): session closed for user zuul
Feb 02 18:05:15 compute-0 sshd-session[282282]: Invalid user solv from 92.118.39.56 port 54230
Feb 02 18:05:15 compute-0 sshd-session[282282]: Connection closed by invalid user solv 92.118.39.56 port 54230 [preauth]
Feb 02 18:06:32 compute-0 sshd-session[283304]: Connection closed by authenticating user root 157.20.215.3 port 36260 [preauth]
Feb 02 18:07:26 compute-0 sshd-session[284052]: Invalid user solv from 92.118.39.56 port 60132
Feb 02 18:07:27 compute-0 sshd-session[284052]: Connection closed by invalid user solv 92.118.39.56 port 60132 [preauth]
Feb 02 18:12:40 compute-0 sshd-session[287794]: Accepted publickey for zuul from 192.168.122.10 port 60118 ssh2: ECDSA SHA256:zZuZTxOZQINmXTIp/w3ufHnLDl00NSjTOoCjsOHVPLY
Feb 02 18:12:40 compute-0 sshd-session[287794]: pam_unix(sshd:session): session opened for user zuul(uid=1000) by zuul(uid=0)

○ sssd-kcm.service - SSSD Kerberos Cache Manager
     Loaded: loaded (/usr/lib/systemd/system/sssd-kcm.service; indirect; preset: disabled)
     Active: inactive (dead)
TriggeredBy: ● sssd-kcm.socket
       Docs: man:sssd-kcm(5)

○ sssd.service - System Security Services Daemon
     Loaded: loaded (/usr/lib/systemd/system/sssd.service; enabled; preset: enabled)
     Active: inactive (dead)
  Condition: start condition failed at Mon 2026-02-02 16:46:03 UTC; 1h 27min ago

Feb 02 16:46:03 localhost systemd[1]: System Security Services Daemon was skipped because no trigger condition checks were mUnit syslog.service could not be found.
et.

○ sysstat-collect.service - system activity accounting tool
     Loaded: loaded (/usr/lib/systemd/system/sysstat-collect.service; static)
     Active: inactive (dead)
TriggeredBy: ○ sysstat-collect.timer
       Docs: man:sa1(8)

○ sysstat-summary.service - Generate a daily summary of process accounting
     Loaded: loaded (/usr/lib/systemd/system/sysstat-summary.service; static)
     Active: inactive (dead)
TriggeredBy: ○ sysstat-summary.timer
       Docs: man:sa2(8)

○ sysstat.service - Resets System Activity Logs
     Loaded: loaded (/usr/lib/systemd/system/sysstat.service; enabled; preset: enabled)
     Active: inactive (dead)

○ systemd-ask-password-console.service - Dispatch Password Requests to Console
     Loaded: loaded (/usr/lib/systemd/system/systemd-ask-password-console.service; static)
     Active: inactive (dead)
TriggeredBy: ● systemd-ask-password-console.path
       Docs: man:systemd-ask-password-console.service(8)

○ systemd-ask-password-wall.service - Forward Password Requests to Wall
     Loaded: loaded (/usr/lib/systemd/system/systemd-ask-password-wall.service; static)
     Active: inactive (dead)
TriggeredBy: ● systemd-ask-password-wall.path
       Docs: man:systemd-ask-password-wall.service(8)

○ systemd-binfmt.service - Set Up Additional Binary Formats
     Loaded: loaded (/usr/lib/systemd/system/systemd-binfmt.service; static)
     Active: inactive (dead)
  Condition: start condition failed at Mon 2026-02-02 16:46:03 UTC; 1h 27min ago
       Docs: man:systemd-binfmt.service(8)
             man:binfmt.d(5)
             https://docs.kernel.org/admin-guide/binfmt-misc.html
             https://www.freedesktop.org/wiki/Software/systemd/APIFileSystems

Feb 02 16:46:03 localhost systemd[1]: Set Up Additional Binary Formats was skipped because no trigger condition checks were met.

○ systemd-boot-random-seed.service - Update Boot Loader Random Seed
     Loaded: loaded (/usr/lib/systemd/system/systemd-boot-random-seed.service; static)
     Active: inactive (dead)
  Condition: start condition failed at Mon 2026-02-02 16:46:03 UTC; 1h 27min ago
       Docs: man:systemd-boot-random-seed.service(8)
             man:random(4)

Feb 02 16:46:03 localhost systemd[1]: Update Boot Loader Random Seed was skipped because no trigger condition checks were met.

● systemd-boot-update.service - Automatic Boot Loader Update
     Loaded: loaded (/usr/lib/systemd/system/systemd-boot-update.service; enabled; preset: enabled)
     Active: active (exited) since Mon 2026-02-02 16:46:03 UTC; 1h 27min ago
       Docs: man:bootctl(1)
   Main PID: 698 (code=exited, status=0/SUCCESS)
        CPU: 8ms

Feb 02 16:46:03 localhost systemd[1]: Starting Automatic Boot Loader Update...
Feb 02 16:46:03 localhost bootctl[698]: Couldn't find EFI system partition, skipping.
Feb 02 16:46:03 localhost systemd[1]: Finished Automatic Boot Loader Update.

○ systemd-firstboot.service - First Boot Wizard
     Loaded: loaded (/usr/lib/systemd/system/systemd-firstboot.service; static)
     Active: inactive (dead)
  Condition: start condition failed at Mon 2026-02-02 16:46:02 UTC; 1h 27min ago
       Docs: man:systemd-firstboot(1)

Feb 02 16:46:02 localhost systemd[1]: First Boot Wizard was skipped because of an unmet condition check (ConditionFirstBoot=yes).

○ systemd-fsck-root.service - File System Check on Root Device
     Loaded: loaded (/usr/lib/systemd/system/systemd-fsck-root.service; static)
     Active: inactive (dead) since Mon 2026-02-02 16:46:02 UTC; 1h 27min ago
   Duration: 1.578s
       Docs: man:systemd-fsck-root.service(8)
   Main PID: 551 (code=exited, status=0/SUCCESS)
        CPU: 13ms

Feb 02 16:46:01 localhost systemd[1]: Starting File System Check on /dev/disk/by-uuid/822f14ea-6e7e-41df-b0d8-fbe282d9ded8...
Feb 02 16:46:01 localhost systemd-fsck[554]: /usr/sbin/fsck.xfs: XFS file system.
Feb 02 16:46:01 localhost systemd[1]: Finished File System Check on /dev/disk/by-uuid/822f14ea-6e7e-41df-b0d8-fbe282d9ded8.

● systemd-hostnamed.service - Hostname Service
     Loaded: loaded (/usr/lib/systemd/system/systemd-hostnamed.service; static)
    Drop-In: /usr/lib/systemd/system/systemd-hostnamed.service.d
             └─disable-privatedevices.conf
     Active: active (running) since Mon 2026-02-02 18:13:06 UTC; 17s ago
       Docs: man:systemd-hostnamed.service(8)
             man:hostname(5)
             man:machine-info(5)
             man:org.freedesktop.resolve1(5)
   Main PID: 290803 (systemd-hostnam)
         IO: 0B read, 0B written
      Tasks: 1 (limit: 48560)
     Memory: 2.7M (peak: 3.8M)
        CPU: 89ms
     CGroup: /system.slice/systemd-hostnamed.service
             └─290803 /usr/lib/systemd/systemd-hostnamed

Feb 02 18:13:06 compute-0 systemd[1]: Starting Hostname Service...
Feb 02 18:13:06 compute-0 systemd[1]: Started Hostname Service.

● systemd-hwdb-update.service - Rebuild Hardware Database
     Loaded: loaded (/usr/lib/systemd/system/systemd-hwdb-update.service; static)
     Active: active (exited) since Mon 2026-02-02 16:46:03 UTC; 1h 27min ago
       Docs: man:hwdb(7)
             man:systemd-hwdb(8)
   Main PID: 688 (code=exited, status=0/SUCCESS)
        CPU: 491ms

Feb 02 16:46:02 localhost systemd[1]: Starting Rebuild Hardware Database...
Feb 02 16:46:03 localhost systemd[1]: Finished Rebuild Hardware Database.

○ systemd-initctl.service - initctl Compatibility Daemon
     Loaded: loaded (/usr/lib/systemd/system/systemd-initctl.service; static)
     Active: inactive (dead)
TriggeredBy: ● systemd-initctl.socket
       Docs: man:systemd-initctl.service(8)

● systemd-journal-catalog-update.service - Rebuild Journal Catalog
     Loaded: loaded (/usr/lib/systemd/system/systemd-journal-catalog-update.service; static)
     Active: active (exited) since Mon 2026-02-02 16:46:03 UTC; 1h 27min ago
       Docs: man:systemd-journald.service(8)
             man:journald.conf(5)
   Main PID: 703 (code=exited, status=0/SUCCESS)
        CPU: 18ms

Feb 02 16:46:03 localhost systemd[1]: Starting Rebuild Journal Catalog...
Feb 02 16:46:03 localhost systemd[1]: Finished Rebuild Journal Catalog.

● systemd-journal-flush.service - Flush Journal to Persistent Storage
     Loaded: loaded (/usr/lib/systemd/system/systemd-journal-flush.service; static)
     Active: active (exited) since Mon 2026-02-02 16:46:02 UTC; 1h 27min ago
       Docs: man:systemd-journald.service(8)
             man:journald.conf(5)
   Main PID: 689 (code=exited, status=0/SUCCESS)
        CPU: 14ms

Feb 02 16:46:02 localhost systemd[1]: Starting Flush Journal to Persistent Storage...
Feb 02 16:46:02 localhost systemd[1]: Finished Flush Journal to Persistent Storage.

● systemd-journald.service - Journal Service
     Loaded: loaded (/usr/lib/systemd/system/systemd-journald.service; static)
     Active: active (running) since Mon 2026-02-02 16:46:02 UTC; 1h 27min ago
TriggeredBy: ● systemd-journald-dev-log.socket
             ● systemd-journald.socket
       Docs: man:systemd-journald.service(8)
             man:journald.conf(5)
   Main PID: 679 (systemd-journal)
     Status: "Processing requests..."
         IO: 0B read, 0B written
      Tasks: 1 (limit: 48560)
     Memory: 107.9M (peak: 116.0M)
        CPU: 13.363s
     CGroup: /system.slice/systemd-journald.service
             └─679 /usr/lib/systemd/systemd-journald

Feb 02 16:46:02 localhost systemd-journald[679]: Journal started
Feb 02 16:46:02 localhost systemd-journald[679]: Runtime Journal (/run/log/journal/bf0bc0bb03de29b24cba1cc9599cf5d0) is 8.0M, max 153.6M, 145.6M free.
Feb 02 16:46:02 localhost systemd[1]: systemd-journald.service: Deactivated successfully.
Feb 02 16:46:02 localhost systemd-journald[679]: Runtime Journal (/run/log/journal/bf0bc0bb03de29b24cba1cc9599cf5d0) is 8.0M, max 153.6M, 145.6M free.
Feb 02 16:46:02 localhost systemd-journald[679]: Received client request to flush runtime journal.

● systemd-logind.service - User Login Management
     Loaded: loaded (/usr/lib/systemd/system/systemd-logind.service; static)
    Drop-In: /usr/lib/systemd/system/systemd-logind.service.d
             └─10-grub2-logind-service.conf
     Active: active (running) since Mon 2026-02-02 16:46:03 UTC; 1h 27min ago
       Docs: man:sd-login(3)
             man:systemd-logind.service(8)
             man:logind.conf(5)
             man:org.freedesktop.login1(5)
   Main PID: 799 (systemd-logind)
     Status: "Processing requests..."
         IO: 4.0K read, 0B written
      Tasks: 1 (limit: 48560)
     Memory: 6.3M (peak: 6.8M)
        CPU: 3.324s
     CGroup: /system.slice/systemd-logind.service
             └─799 /usr/lib/systemd/systemd-logind

Feb 02 18:02:58 compute-0 systemd-logind[799]: New session 51 of user zuul.
Feb 02 18:04:23 compute-0 systemd-logind[799]: Session 51 logged out. Waiting for processes to exit.
Feb 02 18:04:23 compute-0 systemd-logind[799]: Removed session 51.
Feb 02 18:04:23 compute-0 systemd-logind[799]: New session 52 of user zuul.
Feb 02 18:04:23 compute-0 systemd-logind[799]: Session 52 logged out. Waiting for processes to exit.
Feb 02 18:04:23 compute-0 systemd-logind[799]: Removed session 52.
Feb 02 18:04:24 compute-0 systemd-logind[799]: New session 53 of user zuul.
Feb 02 18:04:24 compute-0 systemd-logind[799]: Session 53 logged out. Waiting for processes to exit.
Feb 02 18:04:24 compute-0 systemd-logind[799]: Removed session 53.
Feb 02 18:12:40 compute-0 systemd-logind[799]: New session 54 of user zuul.

○ systemd-machine-id-commit.service - Commit a transient machine-id on disk
     Loaded: loaded (/usr/lib/systemd/system/systemd-machine-id-commit.service; static)
     Active: inactive (dead)
  Condition: start condition failed at Mon 2026-02-02 16:46:03 UTC; 1h 27min ago
       Docs: man:systemd-machine-id-commit.service(8)

Feb 02 16:46:03 localhost systemd[1]: Commit a transient machine-id on disk was skipped because of an unmet condition check (ConditionPathIsMountPoint=/etc/machine-id).

● systemd-machined.service - Virtual Machine and Container Registration Service
     Loaded: loaded (/usr/lib/systemd/system/systemd-machined.service; static)
     Active: active (running) since Mon 2026-02-02 17:34:24 UTC; 38min ago
       Docs: man:systemd-machined.service(8)
             man:org.freedesktop.machine1(5)
   Main PID: 208080 (systemd-machine)
     Status: "Processing requests..."
         IO: 0B read, 0B written
      Tasks: 1 (limit: 48560)
     Memory: 1.5M (peak: 2.0M)
        CPU: 1.623s
     CGroup: /system.slice/systemd-machined.service
             └─208080 /usr/lib/systemd/systemd-machined

Feb 02 17:58:42 compute-0 systemd-machined[208080]: New machine qemu-25-instance-00000019.
Feb 02 17:59:04 compute-0 systemd-machined[208080]: Machine qemu-25-instance-00000019 terminated.
Feb 02 17:59:22 compute-0 systemd-machined[208080]: New machine qemu-26-instance-0000001b.
Feb 02 17:59:22 compute-0 systemd-machined[208080]: New machine qemu-27-instance-0000001a.
Feb 02 17:59:45 compute-0 systemd-machined[208080]: Machine qemu-26-instance-0000001b terminated.
Feb 02 18:00:00 compute-0 systemd-machined[208080]: New machine qemu-28-instance-0000001c.
Feb 02 18:00:01 compute-0 systemd-machined[208080]: Machine qemu-27-instance-0000001a terminated.
Feb 02 18:00:26 compute-0 systemd-machined[208080]: Machine qemu-28-instance-0000001c terminated.
Feb 02 18:00:48 compute-0 systemd-machined[208080]: New machine qemu-29-instance-0000001d.
Feb 02 18:01:55 compute-0 systemd-machined[208080]: Machine qemu-29-instance-0000001d terminated.

● systemd-modules-load.service - Load Kernel Modules
     Loaded: loaded (/usr/lib/systemd/system/systemd-modules-load.service; static)
     Active: active (exited) since Mon 2026-02-02 17:35:51 UTC; 37min ago
       Docs: man:systemd-modules-load.service(8)
             man:modules-load.d(5)
   Main PID: 222739 (code=exited, status=0/SUCCESS)
        CPU: 9ms

Feb 02 17:35:51 compute-0 systemd[1]: Starting Load Kernel Modules...
Feb 02 17:35:51 compute-0 systemd[1]: Finished Load Kernel Modules.

● systemd-network-generator.service - Generate network units from Kernel command line
     Loaded: loaded (/usr/lib/systemd/system/systemd-network-generator.service; enabled; preset: enabled)
     Active:Unit systemd-networkd-wait-online.service could not be found.
 active (exited) since Mon 2026-02-02 16:46:02 UTC; 1h 27min ago
       Docs: man:systemd-network-generator.service(8)
   Main PID: 680 (code=exited, status=0/SUCCESS)
        CPU: 11ms

Feb 02 16:46:02 localhost systemd[1]: Finished Generate network units from Kernel command line.

○ systemd-pcrmachine.service - TPM2 PCR Machine ID Measurement
     Loaded: loaded (/usr/lib/systemd/system/systemd-pcrmachine.service; static)
     Active: inactive (dead)
  Condition: start condition failed at Mon 2026-02-02 16:46:02 UTC; 1h 27min ago
       Docs: man:systemd-pcrmachine.service(8)

○ systemd-pcrphase-initrd.service - TPM2 PCR Barrier (initrd)
     Loaded: loaded (/usr/lib/systemd/system/systemd-pcrphase-initrd.service; static)
     Active: inactive (dead)
       Docs: man:systemd-pcrphase-initrd.service(8)

○ systemd-pcrphase-sysinit.service - TPM2 PCR Barrier (Initialization)
     Loaded: loaded (/usr/lib/systemd/system/systemd-pcrphase-sysinit.service; static)
     Active: inactive (dead)
  Condition: start condition failed at Mon 2026-02-02 16:46:03 UTC; 1h 27min ago
       Docs: man:systemd-pcrphase-sysinit.service(8)

Feb 02 16:46:03 localhost systemd[1]: TPM2 PCR Barrier (Initialization) was skipped because of an unmet condition check (ConditionPathExists=/sys/firmware/efi/efivars/StubPcrKernelImage-4a67b082-0a4c-41cf-b6c7-440b29bb8c4f).

○ systemd-pcrphase.service - TPM2 PCR Barrier (User)
     Loaded: loaded (/usr/lib/systemd/system/systemd-pcrphase.service; static)
     Active: inactive (dead)
  Condition: start condition failed at Mon 2026-02-02 16:46:05 UTC; 1h 27min ago
       Docs: man:systemd-pcrphase.service(8)

Feb 02 16:46:05 np0005605476.novalocal systemd[1]: TPM2 PCR Barrier (User) was skipped because of an unmet condition check (ConditionPathExists=/sys/firmware/efi/efivars/StubPcrKernelImage-4a67b082-0a4c-41cf-b6c7-440b29bb8c4f).

○ systemd-pstore.service - Platform Persistent Storage Archival
     Loaded: loaded (/usr/lib/systemd/system/systemd-pstore.service; enabled; preset: enabled)
     Active: inactive (dead)
  Condition: start condition failed at Mon 2026-02-02 16:46:02 UTC; 1h 27min ago
       Docs: man:systemd-pstore(8)

Feb 02 16:46:02 localhost systemd[1]: Platform Persistent Storage Archival was skipped because of an unmet condition check (ConditionDirectoryNotEmpty=/sys/fs/pstore).

● systemd-random-seed.service - Load/Save OS Random Seed
     Loaded: loaded (/usr/lib/systemd/system/systemd-random-seed.service; static)
     Active: active (exited) since Mon 2026-02-02 16:46:02 UTC; 1h 27min ago
       Docs: man:systemd-random-seed.service(8)
             man:random(4)
   Main PID: 690 (code=exited, status=0/SUCCESS)
        CPU: 6ms

Feb 02 16:46:02 localhost systemd[1]: Starting Load/Save OS Random Seed...
Feb 02 16:46:02 localhost systemd[1]: Finished Load/Save OS Random Seed.

● systemd-remount-fs.service - Remount Root and Kernel File Systems
     Loaded: loaded (/usr/lib/systemd/system/systemd-remount-fs.service; enabled-runtime; preset: disabled)
     Active: active (exited) since Mon 2026-02-02 16:46:02 UTC; 1h 27min ago
       Docs: man:systemd-remount-fs.service(8)
             https://www.freedesktop.org/wiki/Software/systemd/APIFileSystems
   Main PID: 681 (code=exited, status=0/SUCCESS)
        CPU: 17ms

Feb 02 16:46:02 localhost systemd[1]: Finished Remount Root and Kernel File Systems.

○ systemd-repart.service - Repartition Root Disk
     Loaded: loaded (/usr/lib/systemd/system/systemd-repart.service; static)
     Active: inactive (dead)
  Condition: start condition failed at Mon 2026-02-02 16:46:02 UTC; 1h 27min ago
       Docs: man:systemd-repart.service(8)

○ systemd-rfkill.service - Load/Save RF Kill Switch Status
     Loaded: loaded (/usr/lib/systemd/system/systemd-rfkill.service; static)
     Active: inactive (dead)
TriggeredBy: ● systemd-rfkill.socket
       Docs: man:systemd-rfkill.service(8)

● systemd-sysctl.service - Apply Kernel Variables
     Loaded: loaded (/usr/lib/systemd/system/systemd-sysctl.service; static)
     Active: active (exitedUnit systemd-timesyncd.service could not be found.
Unit systemd-tmpfiles.service could not be found.
) since Mon 2026-02-02 17:15:52 UTC; 57min ago
       Docs: man:systemd-sysctl.service(8)
             man:sysctl.d(5)
   Main PID: 44988 (code=exited, status=0/SUCCESS)
        CPU: 17ms

Feb 02 17:15:52 compute-0 systemd[1]: Starting Apply Kernel Variables...
Feb 02 17:15:52 compute-0 systemd[1]: Finished Apply Kernel Variables.

○ systemd-sysext.service - Merge System Extension Images into /usr/ and /opt/
     Loaded: loaded (/usr/lib/systemd/system/systemd-sysext.service; disabled; preset: disabled)
     Active: inactive (dead)
       Docs: man:systemd-sysext.service(8)

● systemd-sysusers.service - Create System Users
     Loaded: loaded (/usr/lib/systemd/system/systemd-sysusers.service; static)
     Active: active (exited) since Mon 2026-02-02 16:46:02 UTC; 1h 27min ago
       Docs: man:sysusers.d(5)
             man:systemd-sysusers.service(8)
   Main PID: 691 (code=exited, status=0/SUCCESS)
        CPU: 27ms

Feb 02 16:46:02 localhost systemd[1]: Starting Create System Users...
Feb 02 16:46:02 localhost systemd[1]: Finished Create System Users.

○ systemd-tmpfiles-clean.service - Cleanup of Temporary Directories
     Loaded: loaded (/usr/lib/systemd/system/systemd-tmpfiles-clean.service; static)
     Active: inactive (dead) since Mon 2026-02-02 17:01:08 UTC; 1h 12min ago
TriggeredBy: ● systemd-tmpfiles-clean.timer
       Docs: man:tmpfiles.d(5)
             man:systemd-tmpfiles(8)
   Main PID: 30001 (code=exited, status=0/SUCCESS)
        CPU: 44ms

Feb 02 17:01:08 compute-0 systemd[1]: Starting Cleanup of Temporary Directories...
Feb 02 17:01:08 compute-0 systemd[1]: systemd-tmpfiles-clean.service: Deactivated successfully.
Feb 02 17:01:08 compute-0 systemd[1]: Finished Cleanup of Temporary Directories.

● systemd-tmpfiles-setup-dev.service - Create Static Device Nodes in /dev
     Loaded: loaded (/usr/lib/systemd/system/systemd-tmpfiles-setup-dev.service; static)
     Active: active (exited) since Mon 2026-02-02 16:46:03 UTC; 1h 27min ago
       Docs: man:tmpfiles.d(5)
             man:systemd-tmpfiles(8)
   Main PID: 694 (code=exited, status=0/SUCCESS)
        CPU: 31ms

Feb 02 16:46:02 localhost systemd[1]: Starting Create Static Device Nodes in /dev...
Feb 02 16:46:03 localhost systemd[1]: Finished Create Static Device Nodes in /dev.

● systemd-tmpfiles-setup.service - Create Volatile Files and Directories
     Loaded: loaded (/usr/lib/systemd/system/systemd-tmpfiles-setup.service; static)
     Active: active (exited) since Mon 2026-02-02 16:46:03 UTC; 1h 27min ago
       Docs: man:tmpfiles.d(5)
             man:systemd-tmpfiles(8)
   Main PID: 699 (code=exited, status=0/SUCCESS)
        CPU: 71ms

Feb 02 16:46:03 localhost systemd[1]: Starting Create Volatile Files and Directories...
Feb 02 16:46:03 localhost systemd[1]: Finished Create Volatile Files and Directories.

● systemd-udev-settle.service - Wait for udev To Complete Device Initialization
     Loaded: loaded (/usr/lib/systemd/system/systemd-udev-settle.service; static)
     Active: active (exited) since Mon 2026-02-02 17:35:46 UTC; 37min ago
       Docs: man:systemd-udev-settle.service(8)
   Main PID: 221814 (code=exited, status=0/SUCCESS)
        CPU: 16ms

Feb 02 17:35:46 compute-0 systemd[1]: Starting Wait for udev To Complete Device Initialization...
Feb 02 17:35:46 compute-0 udevadm[221814]: systemd-udev-settle.service is deprecated. Please fix multipathd.service not to pull it in.
Feb 02 17:35:46 compute-0 systemd[1]: Finished Wait for udev To Complete Device Initialization.

● systemd-udev-trigger.service - Coldplug All udev Devices
     Loaded: loaded (/usr/lib/systemd/system/systemd-udev-trigger.service; static)
    Drop-In: /usr/lib/systemd/system/systemd-udev-trigger.service.d
             └─systemd-udev-trigger-no-reload.conf
     Active: active (exited) since Mon 2026-02-02 16:46:02 UTC; 1h 27min ago
       Docs: man:udev(7)
             man:systemd-udevd.service(8)
   Main PID: 684 (code=exited, status=0/SUCCESS)
        CPU: 81ms

Feb 02 16:46:02 localhost systemd[1]: Finished Coldplug All udev Devices.

● systemd-udevd.service - Rule-based Manager for Device Events and Files
     Loaded: loaded (/usr/lib/systemd/system/systemd-udevd.service; static)
     Active: active (running) since Mon 2026-02-02 16:46:03 UTC; 1h 27min ago
TriggeredBy: ● systemd-udevd-control.socket
             ● systemd-udevd-kernel.socket
       Docs: man:systemd-udevd.service(8)
             man:udev(7)
   Main PID: 732 (systemd-udevd)
     Status: "Processing with 32 children at max"
         IO: 213.7M read, 103.7M written
      Tasks: 1
     Memory: 65.0M (peak: 101.9M)
        CPU: 14.694s
     CGroup: /system.slice/systemd-udevd.service
             └─udev
               └─732 /usr/lib/systemd/systemd-udevd

Feb 02 18:12:48 compute-0 lvm[288414]: PV /dev/loop5 online, VG ceph_vg2 is complete.
Feb 02 18:12:48 compute-0 lvm[288414]: VG ceph_vg2 finished
Feb 02 18:12:48 compute-0 lvm[288442]: PV /dev/loop4 online, VG ceph_vg1 is complete.
Feb 02 18:12:48 compute-0 lvm[288442]: VG ceph_vg1 finished
Feb 02 18:13:13 compute-0 lvm[292218]: PV /dev/loop4 online, VG ceph_vg1 is complete.
Feb 02 18:13:13 compute-0 lvm[292218]: VG ceph_vg1 finished
Feb 02 18:13:13 compute-0 lvm[292217]: PV /dev/loop3 online, VG ceph_vg0 is complete.
Feb 02 18:13:13 compute-0 lvm[292217]: VG ceph_vg0 finished
Feb 02 18:13:13 compute-0 lvm[292222]: PV /dev/loop5 online, VG ceph_vg2 is complete.
Feb 02 18:13:13 compute-0 lvm[292222]: VG ceph_vg2 finished

● systemd-update-done.service - Update is Completed
     Loaded: loaded (/usr/lib/systemd/system/systemd-update-done.service; static)
     Active: active (exited) since Mon 2026-02-02 16:46:03 UTC; 1h 27min ago
       Docs: man:systemd-update-done.service(8)
   Main PID: 734 (code=exited, status=0/SUCCESS)
        CPU: 12ms

Feb 02 16:46:03 localhost systemd[1]: Starting Update is Completed...
Feb 02 16:46:03 localhost systemd[1]: Finished Update is Completed.

○ systemd-update-utmp-runlevel.service - Record Runlevel Change in UTMP
     Loaded: loaded (/usr/lib/systemd/system/systemd-update-utmp-runlevel.service; static)
     Active: inactive (dead) since Mon 2026-02-02 16:46:07 UTC; 1h 27min ago
       Docs: man:systemd-update-utmp-runlevel.service(8)
             man:utmp(5)
   Main PID: 1023 (code=exited, status=0/SUCCESS)
        CPU: 5ms

Feb 02 16:46:07 np0005605476.novalocal systemd[1]: Starting Record Runlevel Change in UTMP...
Feb 02 16:46:07 np0005605476.novalocal systemd[1]: systemd-update-utmp-runlevel.service: Deactivated successfully.
Feb 02 16:46:07 np0005605476.novalocal systemd[1]: Finished Record Runlevel Change in UTMP.

● systemd-update-utmp.service - Record System Boot/Shutdown in UTMP
     Loaded: loaded (/usr/lib/systemd/system/systemd-update-utmp.service; static)
     Active: active (exited) since Mon 2026-02-02 16:46:03 UTC; 1h 27min ago
       Docs: man:systemd-update-utmp.service(8)
             man:utmp(5)
   Main PID: 731 (code=exited, status=0/SUCCESS)
        CPU: 7ms

Feb 02 16:46:03 localhost systemd[1]: Starting Record System Boot/Shutdown in UTMP...
Feb 02 16:46:03 localhost systemd[1]: Finished Record System Boot/Shutdown in UTMP.

● systemd-user-sessions.service - Permit User Sessions
     Loaded: loaded (/usr/lib/systemd/system/systemd-user-sessions.service; static)
     Active: active (exited) since Mon 2026-02-02 16:46:07 UTC; 1h 27min ago
       Docs: man:systemd-user-sessions.service(8)
   Main PID: 1008 (code=exited, status=0/SUCCESS)
        CPU: 8ms

Feb 02 16:46:07 np0005605476.novalocal systemd[1]: Starting Permit User Sessions...
Feb 02 16:46:07 np0005605476.novalocal systemd[1]: Finished Permit User Sessions.

○ systemd-vconsole-setup.service - Setup Virtual Console
     Loaded: loaded (/usr/lib/systemd/system/systemd-vconsole-setup.service; static)
     Active: inactive (dead) since Mon 2026-02-02 16:46:02 UTC; 1h 27min ago
   Duration: 1.816s
       Docs: man:systemd-vconsole-setup.service(8)
             man:vconsole.conf(5)
   Main PID: 312 (code=exited, status=0/SUCCESS)
        CPU: 171ms

Feb 02 16:46:00 localhost systemd[1]: Finished Setup Virtual CoUnit tlp.service could not be found.
nsole.
Feb 02 16:46:02 localhost systemd[1]: systemd-vconsole-setup.service: Deactivated successfully.
Feb 02 16:46:02 localhost systemd[1]: Stopped Setup Virtual Console.
Notice: journal has been rotated since unit was started, output may be incomplete.

● tuned.service - Dynamic System Tuning Daemon
     Loaded: loaded (/usr/lib/systemd/system/tuned.service; enabled; preset: enabled)
     Active: active (running) since Mon 2026-02-02 17:25:26 UTC; 47min ago
       Docs: man:tuned(8)
             man:tuned.conf(5)
             man:tuned-adm(8)
   Main PID: 106772 (tuned)
         IO: 0B read, 0B written
      Tasks: 4 (limit: 48560)
     Memory: 13.8M (peak: 16.4M)
        CPU: 1.302s
     CGroup: /system.slice/tuned.service
             └─106772 /usr/bin/python3 -Es /usr/sbin/tuned -l -P

Feb 02 17:25:26 compute-0 systemd[1]: Starting Dynamic System Tuning Daemon...
Feb 02 17:25:26 compute-0 systemd[1]: Started Dynamic System Tuning Daemon.

○ unbound-anchor.service - update of the root trust anchor for DNSSEC validation in unbound
     Loaded: loaded (/usr/lib/systemd/system/unbound-anchor.service; static)
     Active: inactive (dead)
TriggeredBy: ● unbound-anchor.timer
       Docs: man:unbound-anchor(8)

● user-runtime-dir@1000.service - User Runtime Directory /run/user/1000
     Loaded: loaded (/usr/lib/systemd/system/user-runtime-dir@.service; static)
     Active: active (exited) since Mon 2026-02-02 16:46:28 UTC; 1h 26min ago
       Docs: man:user@.service(5)
   Main PID: 4306 (code=exited, status=0/SUCCESS)
        CPU: 20ms

Feb 02 16:46:28 np0005605476.novalocal systemd[1]: Starting User Runtime Directory /run/user/1000...
Feb 02 16:46:28 np0005605476.novalocal systemd[1]: Finished User Runtime Directory /run/user/1000.

● user-runtime-dir@42477.service - User Runtime Directory /run/user/42477
     Loaded: loaded (/usr/lib/systemd/system/user-runtime-dir@.service; static)
     Active: active (exited) since Mon 2026-02-02 17:20:42 UTC; 52min ago
       Docs: man:user@.service(5)
   Main PID: 76565 (code=exited, status=0/SUCCESS)
        CPU: 12ms

Feb 02 17:20:42 compute-0 systemd[1]: Starting User Runtime Directory /run/user/42477...
Feb 02 17:20:42 compute-0 systemd[1]: Finished User Runtime Directory /run/user/42477.

● user@1000.service - User Manager for UID 1000
     Loaded: loaded (/usr/lib/systemd/system/user@.service; static)
    Drop-In: /usr/lib/systemd/system/user@.service.d
             └─10-login-barrier.conf
     Active: active (running) since Mon 2026-02-02 16:46:29 UTC; 1h 26min ago
       Docs: man:user@.service(5)
   Main PID: 4307 (systemd)
     Status: "Ready."
         IO: 676.0K read, 8.0K written
      Tasks: 5
     Memory: 7.4M (peak: 14.9M)
        CPU: 4.635s
     CGroup: /user.slice/user-1000.slice/user@1000.service
             ├─app.slice
             │ └─dbus-broker.service
             │   ├─13900 /usr/bin/dbus-broker-launch --scope user
             │   └─13910 dbus-broker --log 4 --controller 9 --machine-id bf0bc0bb03de29b24cba1cc9599cf5d0 --max-bytes 100000000000000 --max-fds 25000000000000 --max-matches 5000000000
             ├─init.scope
             │ ├─4307 /usr/lib/systemd/systemd --user
             │ └─4309 "(sd-pam)"
             └─user.slice
               └─podman-pause-6e894c72.scope
                 └─13874 catatonit -P

Feb 02 16:56:56 np0005605476.novalocal dbus-broker-launch[13900]: Policy to allow eavesdropping in /usr/share/dbus-1/session.conf +31: Eavesdropping is deprecated and ignored
Feb 02 16:56:56 np0005605476.novalocal dbus-broker-launch[13900]: Policy to allow eavesdropping in /usr/share/dbus-1/session.conf +33: Eavesdropping is deprecated and ignored
Feb 02 16:56:56 np0005605476.novalocal systemd[4307]: Started D-Bus User Message Bus.
Feb 02 16:56:56 np0005605476.novalocal dbus-broker-lau[13900]: Ready
Feb 02 16:56:56 np0005605476.novalocal systemd[4307]: selinux: avc:  op=load_policy lsm=selinux seqno=4 res=1
Feb 02 16:56:56 np0005605476.novalocal systemd[4307]: Created slice Slice /user.
Feb 02 16:56:56 np0005605476.novalocal systemd[4307]: podman-13869.scope: unit configures an IP firewall, but not running as root.
Feb 02 16:56:56 np0005605476.novalocal systemd[4307]: (This warning is only shown for the first unit using IP firewalling.)
Feb 02 16:56:56 np0005605476.novalocal systemd[4307]: Started podman-13869.scope.
Feb 02 16:56:57 np0005605476.novalocal systemd[4307]: Started podman-pause-6e894c72.scope.

● user@42477.service - User Manager for UID 42477
     Loaded: loaded (/usr/lib/systemd/system/user@.service; static)
    Drop-In: /usr/lib/systemd/system/user@.service.d
             └─10-login-barrier.conf
     Active: active (running) since Mon 2026-02-02 17:20:42 UTC; 52min ago
       Docs: man:user@.service(5)
   Main PID: 76566 (systemd)
     Status: "Ready."
         IO: 0B read, 0B written
      Tasks: 2
     Memory: 9.3M (peak: 10.9M)
        CPU: 3.157s
     CGroup: /user.slice/user-42477.slice/user@42477.service
             └─init.scope
               ├─76566 /usr/lib/systemd/systemd --user
               └─76568 "(sd-pam)"

Feb 02 17:20:42 compute-0 systemd[76566]: Reached target Sockets.
Feb 02 17:20:42 compute-0 systemd[76566]: Reached target Basic System.
Feb 02 17:20:42 compute-0 systemd[76566]: Reached target Main User Target.
Feb 02 17:20:42 compute-0 systemd[76566]: Startup finished in 116ms.
Feb 02 17:20:42 compute-0 systemd[1]: Started User Manager for UID 42477.
Feb 02 17:22:55 compute-0 systemd[76566]: Starting Mark boot as successful...
Feb 02 17:22:55 compute-0 systemd[76566]: Finished Mark boot as successful.
Feb 02 17:26:28 compute-0 systemd[76566]: Created slice User Background Tasks Slice.
Feb 02 17:26:28 compute-0 systemd[76566]: Starting Cleanup of User's Temporary Files and Directories...
Feb 02 17:26:28 compute-0 systemd[76566]: Finished Cleanup of User's Temporary Files and Directories.

○ virtinterfaced.service - libvirt interface daemon
     Loaded: loaded (/usr/lib/systemd/system/virtinterfaced.service; disabled; preset: disabled)
     Active: inactive (dead)
TriggeredBy: ○ virtinterfaced-ro.socket
             ○ virtinterfaced-admin.socket
             ○ virtinterfaced.socket
       Docs: man:virtinterfaced(8)
             https://libvirt.org/

○ virtlockd.service - libvirt locking daemon
     Loaded: loaded (/usr/lib/systemd/system/virtlockd.service; disabled; preset: disabled)
     Active: inactive (dead)
TriggeredBy: ○ virtlockd-admin.socket
             ● virtlockd.socket
       Docs: man:virtlockd(8)
             https://libvirt.org/

● virtlogd.service - libvirt logging daemon
     Loaded: loaded (/usr/lib/systemd/system/virtlogd.service; enabled; preset: disabled)
     Active: active (running) since Mon 2026-02-02 17:34:21 UTC; 39min ago
TriggeredBy: ● virtlogd.socket
             ● virtlogd-admin.socket
       Docs: man:virtlogd(8)
             https://libvirt.org/
   Main PID: 207424 (virtlogd)
         IO: 644.0K read, 2.4M written
      Tasks: 1 (limit: 48560)
     Memory: 3.6M (peak: 4.1M)
        CPU: 29.044s
     CGroup: /system.slice/virtlogd.service
             └─207424 /usr/sbin/virtlogd

Feb 02 17:34:21 compute-0 systemd[1]: Starting libvirt logging daemon...
Feb 02 17:34:21 compute-0 systemd[1]: Started libvirt logging daemon.

○ virtnetworkd.service - libvirt network daemon
     Loaded: loaded (/usr/lib/systemd/system/virtnetworkd.service; disabled; preset: disabled)
     Active: inactive (dead)
TriggeredBy: ○ virtnetworkd-ro.socket
             ○ virtnetworkd-admin.socket
             ○ virtnetworkd.socket
       Docs: man:virtnetworkd(8)
             https://libvirt.org/

● virtnodedevd.service - libvirt nodedev daemon
     Loaded: loaded (/usr/lib/systemd/system/virtnodedevd.service; enabled; preset: disabled)
     Active: active (running) since Mon 2026-02-02 17:37:24 UTC; 35min ago
TriggeredBy: ● virtnodedevd-admin.socket
             ● virtnodedevd-ro.socket
             ● virtnodedevd.socket
       Docs: man:virtnodedevd(8)
             https://libvirt.org/
   Main PID: 240340 (virtnodedevd)
         IO: 4.3M read, 0B written
      Tasks: 20 (limit: 48560)
     Memory: 10.1M (peak: 11.4M)
        CPU: 2.627s
     CGroup: /system.slice/virtnodedevd.service
             └─240340 /usr/sbin/virtnodedevd --timeout 120

Feb 02 17:37:24 compute-0 systemd[1]: Starting libvirt nodedev daemon...
Feb 02 17:37:24 compute-0 systemd[1]: Started libvirt nodedev daemon.

○ virtnwfilterd.service - libvirt nwfilter daemon
     Loaded: loaded (/usr/lib/systemd/system/virtnwfilterd.service; disabled; preset: disabled)
     Active: inactive (dead)
TriggeredBy: ○ virtnwfilterd-admin.socket
             ○ virtnwfilterd.socket
             ○ virtnwfilterd-ro.socket
       Docs: man:virtnwfilterd(8)
             https://libvirt.org/

○ virtproxyd.service - libvirt proxy daemon
     Loaded: loaded (/usr/lib/systemd/system/virtproxyd.service; enabled; preset: disabled)
     Active: inactive (dead) since Mon 2026-02-02 17:36:23 UTC; 37min ago
   Duration: 2min 8ms
TriggeredBy: ● virtproxyd-ro.socket
             ● virtproxyd.socket
             ● virtproxyd-admin.socket
             ● virtproxyd-tls.socket
       Docs: man:virtproxyd(8)
             https://libvirt.org/
   Main PID: 207861 (code=exited, status=0/SUCCESS)
        CPU: 54ms

Feb 02 17:34:23 compute-0 systemd[1]: Starting libvirt proxy daemon...
Feb 02 17:34:23 compute-0 systemd[1]: Started libvirt proxy daemon.
Feb 02 17:36:23 compute-0 systemd[1]: virtproxyd.service: Deactivated successfully.

● virtqemud.service - libvirt QEMU daemon
     Loaded: loaded (/usr/lib/systemd/system/virtqemud.service; enabled; preset: enabled)
     Active: active (running) since Mon 2026-02-02 17:37:18 UTC; 36min ago
TriggeredBy: ● virtqemud-ro.socket
             ● virtqemud.socket
             ● virtqemud-admin.socket
       Docs: man:virtqemud(8)
             https://libvirt.org/
   Main PID: 239321 (virtqemud)
         IO: 45.7M read, 1.2M written
      Tasks: 19 (limit: 32768)
     Memory: 69.8M (peak: 88.2M)
        CPU: 7.149s
     CGroup: /system.slice/virtqemud.service
             └─239321 /usr/sbin/virtqemud --timeout 120

Feb 02 17:37:19 compute-0 virtqemud[239321]: libvirt version: 11.10.0, package: 3.el9 (builder@centos.org, 2026-01-13-15:14:57, )
Feb 02 17:37:19 compute-0 virtqemud[239321]: hostname: compute-0
Feb 02 17:37:19 compute-0 virtqemud[239321]: End of file while reading data: Input/output error
Feb 02 18:03:10 compute-0 virtqemud[239321]: Failed to connect socket to '/var/run/libvirt/virtnetworkd-sock-ro': No such file or directory
Feb 02 18:03:10 compute-0 virtqemud[239321]: Failed to connect socket to '/var/run/libvirt/virtnwfilterd-sock-ro': No such file or directory
Feb 02 18:03:10 compute-0 virtqemud[239321]: Failed to connect socket to '/var/run/libvirt/virtstoraged-sock-ro': No such file or directory
Feb 02 18:03:49 compute-0 virtqemud[239321]: Failed to connect socket to '/var/run/libvirt/virtstoraged-sock-ro': No such file or directory
Feb 02 18:12:47 compute-0 virtqemud[239321]: Failed to connect socket to '/var/run/libvirt/virtnetworkd-sock-ro': No such file or directory
Feb 02 18:12:47 compute-0 virtqemud[239321]: Failed to connect socket to '/var/run/libvirt/virtnwfilterd-sock-ro': No such file or directory
Feb 02 18:12:47 compute-0 virtqemud[239321]: Failed to connect socket to '/var/run/libvirt/virtstoraged-sock-ro': No such file or directory

● virtsecretd.service - libvirt secret daemon
     Loaded: loaded (/usr/lib/systemd/system/virtsecretd.service; enabled; preset: disabled)
     Active: active (running) since Mon 2026-02-02 17:45:17 UTC; 28min ago
TriggeredBy: ● virtsecretd.socket
             ● virtsecretd-ro.socket
             ● virtsecretd-admin.socket
       Docs: man:virtsecretd(8)
             https://libvirt.org/
   Main PID: 246591 (virtsecretd)
         IO: 8.0K read, 176.0K written
      Tasks: 18 (limit: 48560)
     Memory: 4.1M (peak: 5.1M)
        CPU: 375ms
     CGroup: /system.slice/virtsecretd.service
             └─246591 /usr/sbin/virtsecretd --timeout 120

FUnit ypbind.service could not be found.
Unit yppasswdd.service could not be found.
Unit ypserv.service could not be found.
Unit ypxfrd.service could not be found.
eb 02 17:45:17 compute-0 systemd[1]: Starting libvirt secret daemon...
Feb 02 17:45:17 compute-0 systemd[1]: Started libvirt secret daemon.

○ virtstoraged.service - libvirt storage daemon
     Loaded: loaded (/usr/lib/systemd/system/virtstoraged.service; disabled; preset: disabled)
     Active: inactive (dead)
TriggeredBy: ○ virtstoraged-ro.socket
             ○ virtstoraged-admin.socket
             ○ virtstoraged.socket
       Docs: man:virtstoraged(8)
             https://libvirt.org/

● -.slice - Root Slice
     Loaded: loaded
     Active: active since Mon 2026-02-02 16:45:59 UTC; 1h 27min ago
      Until: Mon 2026-02-02 16:45:59 UTC; 1h 27min ago
       Docs: man:systemd.special(7)
      Tasks: 1432
     Memory: 3.5G
        CPU: 48min 30.020s
     CGroup: /
             ├─294028 turbostat --debug sleep 10
             ├─294037 sleep 10
             ├─init.scope
             │ └─1 /usr/lib/systemd/systemd --switched-root --system --deserialize 31
             ├─machine.slice
             │ ├─libpod-17e8fd462ba5e72ae3430209748fcdbe5242ab00cbe0d2070f3bc042ab5b9681.scope
             │ │ └─container
             │ │   ├─239848 dumb-init --single-child -- kolla_start
             │ │   ├─239853 /usr/bin/python3 /usr/bin/nova-compute
             │ │   ├─246527 /usr/bin/python3 /bin/privsep-helper --config-file /etc/nova/nova.conf --config-file /etc/nova/nova-compute.conf --config-dir /etc/nova/nova.conf.d --privsep_context vif_plug_ovs.privsep.vif_plug --privsep_sock_path /tmp/tmp70ca1rbh/privsep.sock
             │ │   └─249256 /usr/bin/python3 /bin/privsep-helper --config-file /etc/nova/nova.conf --config-file /etc/nova/nova-compute.conf --config-dir /etc/nova/nova.conf.d --privsep_context os_brick.privileged.default --privsep_sock_path /tmp/tmp9qdvacqx/privsep.sock
             │ ├─libpod-70e0d83cb45fbe649a29519f5074ad11df900a5702b7e7e666708ce90ca8d783.scope
             │ │ └─container
             │ │   ├─146043 dumb-init --single-child -- kolla_start
             │ │   └─146046 /usr/bin/ovn-controller --pidfile unix:/run/openvswitch/db.sock -p /etc/pki/tls/private/ovndb.key -c /etc/pki/tls/certs/ovndb.crt -C /etc/pki/tls/certs/ovndbca.crt
             │ └─libpod-983aad36fbefc6eb42f7b2455e6339d70a90e29e2ce721c2d9ecdd2cd91b9e7b.scope
             │   └─container
             │     ├─155388 dumb-init --single-child -- kolla_start
             │     ├─155391 "neutron-ovn-metadata-agent (/usr/bin/python3 /usr/bin/neutron-ovn-metadata-agent)"
             │     ├─155696 "neutron-ovn-metadata-agent (/usr/bin/python3 /usr/bin/neutron-ovn-metadata-agent)"
             │     ├─155891 /usr/bin/python3 /bin/privsep-helper --config-file /etc/neutron/neutron.conf --config-dir /etc/neutron.conf.d --privsep_context neutron.privileged.namespace_cmd --privsep_sock_path /tmp/tmpty0fqf9d/privsep.sock
             │     ├─246686 /usr/bin/python3 /bin/privsep-helper --config-file /etc/neutron/neutron.conf --config-dir /etc/neutron.conf.d --privsep_context neutron.privileged.default --privsep_sock_path /tmp/tmpx1bw3qnr/privsep.sock
             │     └─246720 /usr/bin/python3 /bin/privsep-helper --config-file /etc/neutron/neutron.conf --config-dir /etc/neutron.conf.d --privsep_context neutron.privileged.link_cmd --privsep_sock_path /tmp/tmp037046cp/privsep.sock
             ├─system.slice
             │ ├─NetworkManager.service
             │ │ └─49022 /usr/sbin/NetworkManager --no-daemon
             │ ├─auditd.service
             │ │ ├─704 /sbin/auditd
             │ │ └─706 /usr/sbin/sedispatch
             │ ├─chronyd.service
             │ │ └─58579 /usr/sbin/chronyd -F 2
             │ ├─crond.service
             │ │ ├─ 1010 /usr/sbin/crond -n
             │ │ └─29996 /usr/sbin/anacron -s
             │ ├─dbus-broker.service
             │ │ ├─770 /usr/bin/dbus-broker-launch --scope system --audit
             │ │ └─777 dbus-broker --log 4 --controller 9 --machine-id bf0bc0bb03de29b24cba1cc9599cf5d0 --max-bytes 536870912 --max-fds 4096 --max-matches 131072 --audit
             │ ├─edpm_nova_compute.service
             │ │ └─239846 /usr/bin/conmon --api-version 1 -c 17e8fd462ba5e72ae3430209748fcdbe5242ab00cbe0d2070f3bc042ab5b9681 -u 17e8fd462ba5e72ae3430209748fcdbe5242ab00cbe0d2070f3bc042ab5b9681 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/17e8fd462ba5e72ae3430209748fcdbe5242ab00cbe0d2070f3bc042ab5b9681/userdata -p /run/containers/storage/overlay-containers/17e8fd462ba5e72ae3430209748fcdbe5242ab00cbe0d2070f3bc042ab5b9681/userdata/pidfile -n nova_compute --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/17e8fd462ba5e72ae3430209748fcdbe5242ab00cbe0d2070f3bc042ab5b9681 --full-attach -s -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/17e8fd462ba5e72ae3430209748fcdbe5242ab00cbe0d2070f3bc042ab5b9681/userdata/oci-log --conmon-pidfile /run/nova_compute.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg 17e8fd462ba5e72ae3430209748fcdbe5242ab00cbe0d2070f3bc042ab5b9681
             │ ├─edpm_ovn_controller.service
             │ │ └─146041 /usr/bin/conmon --api-version 1 -c 70e0d83cb45fbe649a29519f5074ad11df900a5702b7e7e666708ce90ca8d783 -u 70e0d83cb45fbe649a29519f5074ad11df900a5702b7e7e666708ce90ca8d783 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/70e0d83cb45fbe649a29519f5074ad11df900a5702b7e7e666708ce90ca8d783/userdata -p /run/containers/storage/overlay-containers/70e0d83cb45fbe649a29519f5074ad11df900a5702b7e7e666708ce90ca8d783/userdata/pidfile -n ovn_controller --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/70e0d83cb45fbe649a29519f5074ad11df900a5702b7e7e666708ce90ca8d783 --full-attach -s -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/70e0d83cb45fbe649a29519f5074ad11df900a5702b7e7e666708ce90ca8d783/userdata/oci-log --conmon-pidfile /run/ovn_controller.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg 70e0d83cb45fbe649a29519f5074ad11df900a5702b7e7e666708ce90ca8d783
             │ ├─edpm_ovn_metadata_agent.service
             │ │ └─155386 /usr/bin/conmon --api-version 1 -c 983aad36fbefc6eb42f7b2455e6339d70a90e29e2ce721c2d9ecdd2cd91b9e7b -u 983aad36fbefc6eb42f7b2455e6339d70a90e29e2ce721c2d9ecdd2cd91b9e7b -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/983aad36fbefc6eb42f7b2455e6339d70a90e29e2ce721c2d9ecdd2cd91b9e7b/userdata -p /run/containers/storage/overlay-containers/983aad36fbefc6eb42f7b2455e6339d70a90e29e2ce721c2d9ecdd2cd91b9e7b/userdata/pidfile -n ovn_metadata_agent --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/983aad36fbefc6eb42f7b2455e6339d70a90e29e2ce721c2d9ecdd2cd91b9e7b --full-attach -s -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/983aad36fbefc6eb42f7b2455e6339d70a90e29e2ce721c2d9ecdd2cd91b9e7b/userdata/oci-log --conmon-pidfile /run/ovn_metadata_agent.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg 983aad36fbefc6eb42f7b2455e6339d70a90e29e2ce721c2d9ecdd2cd91b9e7b
             │ ├─gssproxy.service
             │ │ └─873 /usr/sbin/gssproxy -D
             │ ├─irqbalance.service
             │ │ └─795 /usr/sbin/irqbalance
             │ ├─iscsid.service
             │ │ └─224391 /usr/sbin/iscsid -f
             │ ├─multipathd.service
             │ │ └─224550 /sbin/multipathd -d -s
             │ ├─ovs-vswitchd.service
             │ │ └─47317 ovs-vswitchd unix:/var/run/openvswitch/db.sock -vconsole:emer -vsyslog:err -vfile:info --mlockall --user openvswitch:hugetlbfs --no-chdir --log-file=/var/log/openvswitch/ovs-vswitchd.log --pidfile=/var/run/openvswitch/ovs-vswitchd.pid --detach
             │ ├─ovsdb-server.service
             │ │ └─47236 ovsdb-server /etc/openvswitch/conf.db -vconsole:emer -vsyslog:err -vfile:info --remote=punix:/var/run/openvswitch/db.sock --private-key=db:Open_vSwitch,SSL,private_key --certificate=db:Open_vSwitch,SSL,certificate --bootstrap-ca-cert=db:Open_vSwitch,SSL,ca_cert --user openvswitch:hugetlbfs --no-chdir --log-file=/var/log/openvswitch/ovsdb-server.log --pidfile=/var/run/openvswitch/ovsdb-server.pid --detach
             │ ├─polkit.service
             │ │ └─43458 /usr/lib/polkit-1/polkitd --no-debug
             │ ├─rpcbind.service
             │ │ └─702 /usr/bin/rpcbind -w -f
             │ ├─rsyslog.service
             │ │ └─1006 /usr/sbin/rsyslogd -n
             │ ├─sshd.service
             │ │ └─182591 "sshd: /usr/sbin/sshd -D [listener] 0 of 10-100 startups"
             │ ├─system-ceph\x2deb48d0ef\x2d3496\x2d563c\x2db73d\x2d661fb962013e.slice
             │ │ ├─ceph-eb48d0ef-3496-563c-b73d-661fb962013e@crash.compute-0.service
             │ │ │ ├─libpod-payload-43c95f4962571d6ee3a5291e1f020e4230f3e56f098995891d143922a65cac4b
             │ │ │ │ ├─80152 /run/podman-init -- /usr/bin/ceph-crash -n client.crash.compute-0
             │ │ │ │ └─80154 /usr/bin/python3 -s /usr/bin/ceph-crash -n client.crash.compute-0
             │ │ │ └─runtime
             │ │ │   └─80150 /usr/bin/conmon --api-version 1 -c 43c95f4962571d6ee3a5291e1f020e4230f3e56f098995891d143922a65cac4b -u 43c95f4962571d6ee3a5291e1f020e4230f3e56f098995891d143922a65cac4b -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/43c95f4962571d6ee3a5291e1f020e4230f3e56f098995891d143922a65cac4b/userdata -p /run/containers/storage/overlay-containers/43c95f4962571d6ee3a5291e1f020e4230f3e56f098995891d143922a65cac4b/userdata/pidfile -n ceph-eb48d0ef-3496-563c-b73d-661fb962013e-crash-compute-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/43c95f4962571d6ee3a5291e1f020e4230f3e56f098995891d143922a65cac4b --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/43c95f4962571d6ee3a5291e1f020e4230f3e56f098995891d143922a65cac4b/userdata/oci-log --conmon-pidfile /run/ceph-eb48d0ef-3496-563c-b73d-661fb962013e@crash.compute-0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 43c95f4962571d6ee3a5291e1f020e4230f3e56f098995891d143922a65cac4b
             │ │ ├─ceph-eb48d0ef-3496-563c-b73d-661fb962013e@mds.cephfs.compute-0.vvdoei.service
             │ │ │ ├─libpod-payload-30ea8cb4e62f17278ebefb0cae478bc9b6467ba0762d054bfe95a0976430da56
             │ │ │ │ ├─95612 /run/podman-init -- /usr/bin/ceph-mds -n mds.cephfs.compute-0.vvdoei -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ │ │ └─95614 /usr/bin/ceph-mds -n mds.cephfs.compute-0.vvdoei -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ │ └─runtime
             │ │ │   └─95610 /usr/bin/conmon --api-version 1 -c 30ea8cb4e62f17278ebefb0cae478bc9b6467ba0762d054bfe95a0976430da56 -u 30ea8cb4e62f17278ebefb0cae478bc9b6467ba0762d054bfe95a0976430da56 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/30ea8cb4e62f17278ebefb0cae478bc9b6467ba0762d054bfe95a0976430da56/userdata -p /run/containers/storage/overlay-containers/30ea8cb4e62f17278ebefb0cae478bc9b6467ba0762d054bfe95a0976430da56/userdata/pidfile -n ceph-eb48d0ef-3496-563c-b73d-661fb962013e-mds-cephfs-compute-0-vvdoei --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/30ea8cb4e62f17278ebefb0cae478bc9b6467ba0762d054bfe95a0976430da56 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/30ea8cb4e62f17278ebefb0cae478bc9b6467ba0762d054bfe95a0976430da56/userdata/oci-log --conmon-pidfile /run/ceph-eb48d0ef-3496-563c-b73d-661fb962013e@mds.cephfs.compute-0.vvdoei.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 30ea8cb4e62f17278ebefb0cae478bc9b6467ba0762d054bfe95a0976430da56
             │ │ ├─ceph-eb48d0ef-3496-563c-b73d-661fb962013e@mgr.compute-0.hccdnu.service
             │ │ │ ├─libpod-payload-f51dea2484a885b1ef464f71470d5eb130f74e7dd5065bf2b6342dc1451e6a4c
             │ │ │ │ ├─75491 /run/podman-init -- /usr/bin/ceph-mgr -n mgr.compute-0.hccdnu -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ │ │ └─75493 /usr/bin/ceph-mgr -n mgr.compute-0.hccdnu -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ │ └─runtime
             │ │ │   └─75489 /usr/bin/conmon --api-version 1 -c f51dea2484a885b1ef464f71470d5eb130f74e7dd5065bf2b6342dc1451e6a4c -u f51dea2484a885b1ef464f71470d5eb130f74e7dd5065bf2b6342dc1451e6a4c -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/f51dea2484a885b1ef464f71470d5eb130f74e7dd5065bf2b6342dc1451e6a4c/userdata -p /run/containers/storage/overlay-containers/f51dea2484a885b1ef464f71470d5eb130f74e7dd5065bf2b6342dc1451e6a4c/userdata/pidfile -n ceph-eb48d0ef-3496-563c-b73d-661fb962013e-mgr-compute-0-hccdnu --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/f51dea2484a885b1ef464f71470d5eb130f74e7dd5065bf2b6342dc1451e6a4c --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/f51dea2484a885b1ef464f71470d5eb130f74e7dd5065bf2b6342dc1451e6a4c/userdata/oci-log --conmon-pidfile /run/ceph-eb48d0ef-3496-563c-b73d-661fb962013e@mgr.compute-0.hccdnu.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg f51dea2484a885b1ef464f71470d5eb130f74e7dd5065bf2b6342dc1451e6a4c
             │ │ ├─ceph-eb48d0ef-3496-563c-b73d-661fb962013e@mon.compute-0.service
             │ │ │ ├─libpod-payload-49cf601899989c75e64c17f569867f9dc2bb2a6ce1f21706d993305a3a0b4d26
             │ │ │ │ ├─75195 /run/podman-init -- /usr/bin/ceph-mon -n mon.compute-0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false
             │ │ │ │ └─75197 /usr/bin/ceph-mon -n mon.compute-0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false
             │ │ │ └─runtime
             │ │ │   └─75193 /usr/bin/conmon --api-version 1 -c 49cf601899989c75e64c17f569867f9dc2bb2a6ce1f21706d993305a3a0b4d26 -u 49cf601899989c75e64c17f569867f9dc2bb2a6ce1f21706d993305a3a0b4d26 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/49cf601899989c75e64c17f569867f9dc2bb2a6ce1f21706d993305a3a0b4d26/userdata -p /run/containers/storage/overlay-containers/49cf601899989c75e64c17f569867f9dc2bb2a6ce1f21706d993305a3a0b4d26/userdata/pidfile -n ceph-eb48d0ef-3496-563c-b73d-661fb962013e-mon-compute-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/49cf601899989c75e64c17f569867f9dc2bb2a6ce1f21706d993305a3a0b4d26 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/49cf601899989c75e64c17f569867f9dc2bb2a6ce1f21706d993305a3a0b4d26/userdata/oci-log --conmon-pidfile /run/ceph-eb48d0ef-3496-563c-b73d-661fb962013e@mon.compute-0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 49cf601899989c75e64c17f569867f9dc2bb2a6ce1f21706d993305a3a0b4d26
             │ │ ├─ceph-eb48d0ef-3496-563c-b73d-661fb962013e@osd.0.service
             │ │ │ ├─libpod-payload-5ec5d30977a06416493ca65ffb2bc1efb368bd71870a20ffcf4a99fc8c30655f
             │ │ │ │ ├─85694 /run/podman-init -- /usr/bin/ceph-osd -n osd.0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --osd-objectstore=bluestore
             │ │ │ │ └─85696 /usr/bin/ceph-osd -n osd.0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --osd-objectstore=bluestore
             │ │ │ └─runtime
             │ │ │   └─85692 /usr/bin/conmon --api-version 1 -c 5ec5d30977a06416493ca65ffb2bc1efb368bd71870a20ffcf4a99fc8c30655f -u 5ec5d30977a06416493ca65ffb2bc1efb368bd71870a20ffcf4a99fc8c30655f -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/5ec5d30977a06416493ca65ffb2bc1efb368bd71870a20ffcf4a99fc8c30655f/userdata -p /run/containers/storage/overlay-containers/5ec5d30977a06416493ca65ffb2bc1efb368bd71870a20ffcf4a99fc8c30655f/userdata/pidfile -n ceph-eb48d0ef-3496-563c-b73d-661fb962013e-osd-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/5ec5d30977a06416493ca65ffb2bc1efb368bd71870a20ffcf4a99fc8c30655f --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/5ec5d30977a06416493ca65ffb2bc1efb368bd71870a20ffcf4a99fc8c30655f/userdata/oci-log --conmon-pidfile /run/ceph-eb48d0ef-3496-563c-b73d-661fb962013e@osd.0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 5ec5d30977a06416493ca65ffb2bc1efb368bd71870a20ffcf4a99fc8c30655f
             │ │ ├─ceph-eb48d0ef-3496-563c-b73d-661fb962013e@osd.1.service
             │ │ │ ├─libpod-payload-849770b4bec6f7bda5e71b5d2b63467f261481956bf4b714be6269cbddd8aa42
             │ │ │ │ ├─86735 /run/podman-init -- /usr/bin/ceph-osd -n osd.1 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --osd-objectstore=bluestore
             │ │ │ │ └─86737 /usr/bin/ceph-osd -n osd.1 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --osd-objectstore=bluestore
             │ │ │ └─runtime
             │ │ │   └─86733 /usr/bin/conmon --api-version 1 -c 849770b4bec6f7bda5e71b5d2b63467f261481956bf4b714be6269cbddd8aa42 -u 849770b4bec6f7bda5e71b5d2b63467f261481956bf4b714be6269cbddd8aa42 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/849770b4bec6f7bda5e71b5d2b63467f261481956bf4b714be6269cbddd8aa42/userdata -p /run/containers/storage/overlay-containers/849770b4bec6f7bda5e71b5d2b63467f261481956bf4b714be6269cbddd8aa42/userdata/pidfile -n ceph-eb48d0ef-3496-563c-b73d-661fb962013e-osd-1 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/849770b4bec6f7bda5e71b5d2b63467f261481956bf4b714be6269cbddd8aa42 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/849770b4bec6f7bda5e71b5d2b63467f261481956bf4b714be6269cbddd8aa42/userdata/oci-log --conmon-pidfile /run/ceph-eb48d0ef-3496-563c-b73d-661fb962013e@osd.1.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 849770b4bec6f7bda5e71b5d2b63467f261481956bf4b714be6269cbddd8aa42
             │ │ ├─ceph-eb48d0ef-3496-563c-b73d-661fb962013e@osd.2.service
             │ │ │ ├─libpod-payload-49ee9de1004adc227a28add932102ae6c092e83b89c3463cc66529c1092a3071
             │ │ │ │ ├─87790 /run/podman-init -- /usr/bin/ceph-osd -n osd.2 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --osd-objectstore=bluestore
             │ │ │ │ └─87792 /usr/bin/ceph-osd -n osd.2 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --osd-objectstore=bluestore
             │ │ │ └─runtime
             │ │ │   └─87788 /usr/bin/conmon --api-version 1 -c 49ee9de1004adc227a28add932102ae6c092e83b89c3463cc66529c1092a3071 -u 49ee9de1004adc227a28add932102ae6c092e83b89c3463cc66529c1092a3071 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/49ee9de1004adc227a28add932102ae6c092e83b89c3463cc66529c1092a3071/userdata -p /run/containers/storage/overlay-containers/49ee9de1004adc227a28add932102ae6c092e83b89c3463cc66529c1092a3071/userdata/pidfile -n ceph-eb48d0ef-3496-563c-b73d-661fb962013e-osd-2 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/49ee9de1004adc227a28add932102ae6c092e83b89c3463cc66529c1092a3071 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/49ee9de1004adc227a28add932102ae6c092e83b89c3463cc66529c1092a3071/userdata/oci-log --conmon-pidfile /run/ceph-eb48d0ef-3496-563c-b73d-661fb962013e@osd.2.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 49ee9de1004adc227a28add932102ae6c092e83b89c3463cc66529c1092a3071
             │ │ └─ceph-eb48d0ef-3496-563c-b73d-661fb962013e@rgw.rgw.compute-0.molmny.service
             │ │   ├─libpod-payload-31c23500b425f65c54ced9f2de36b162413b6fa358f90c128fa457114a0a693e
             │ │   │ ├─95127 /run/podman-init -- /usr/bin/radosgw -n client.rgw.rgw.compute-0.molmny -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --stop-timeout=120
             │ │   │ └─95129 /usr/bin/radosgw -n client.rgw.rgw.compute-0.molmny -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --stop-timeout=120
             │ │   └─runtime
             │ │     └─95125 /usr/bin/conmon --api-version 1 -c 31c23500b425f65c54ced9f2de36b162413b6fa358f90c128fa457114a0a693e -u 31c23500b425f65c54ced9f2de36b162413b6fa358f90c128fa457114a0a693e -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/31c23500b425f65c54ced9f2de36b162413b6fa358f90c128fa457114a0a693e/userdata -p /run/containers/storage/overlay-containers/31c23500b425f65c54ced9f2de36b162413b6fa358f90c128fa457114a0a693e/userdata/pidfile -n ceph-eb48d0ef-3496-563c-b73d-661fb962013e-rgw-rgw-compute-0-molmny --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/31c23500b425f65c54ced9f2de36b162413b6fa358f90c128fa457114a0a693e --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/31c23500b425f65c54ced9f2de36b162413b6fa358f90c128fa457114a0a693e/userdata/oci-log --conmon-pidfile /run/ceph-eb48d0ef-3496-563c-b73d-661fb962013e@rgw.rgw.compute-0.molmny.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 31c23500b425f65c54ced9f2de36b162413b6fa358f90c128fa457114a0a693e
             │ ├─system-getty.slice
             │ │ └─getty@tty1.service
             │ │   └─1013 /sbin/agetty -o "-p -- \\u" --noclear - linux
             │ ├─system-serial\x2dgetty.slice
             │ │ └─serial-getty@ttyS0.service
             │ │   └─1014 /sbin/agetty -o "-p -- \\u" --keep-baud 115200,57600,38400,9600 - vt220
             │ ├─systemd-hostnamed.service
             │ │ └─290803 /usr/lib/systemd/systemd-hostnamed
             │ ├─systemd-journald.service
             │ │ └─679 /usr/lib/systemd/systemd-journald
             │ ├─systemd-logind.service
             │ │ └─799 /usr/lib/systemd/systemd-logind
             │ ├─systemd-machined.service
             │ │ └─208080 /usr/lib/systemd/systemd-machined
             │ ├─systemd-udevd.service
             │ │ └─udev
             │ │   └─732 /usr/lib/systemd/systemd-udevd
             │ ├─tuned.service
             │ │ └─106772 /usr/bin/python3 -Es /usr/sbin/tuned -l -P
             │ ├─virtlogd.service
             │ │ └─207424 /usr/sbin/virtlogd
             │ ├─virtnodedevd.service
             │ │ └─240340 /usr/sbin/virtnodedevd --timeout 120
             │ ├─virtqemud.service
             │ │ └─239321 /usr/sbin/virtqemud --timeout 120
             │ └─virtsecretd.service
             │   └─246591 /usr/sbin/virtsecretd --timeout 120
             └─user.slice
               ├─user-1000.slice
               │ ├─session-1.scope
               │ │ └─4520 /usr/bin/python3
               │ ├─session-54.scope
               │ │ ├─287794 "sshd-session: zuul [priv]"
               │ │ ├─287797 "sshd-session: zuul@notty"
               │ │ ├─287798 sudo bash -c "rm -rf /var/tmp/sos-osp && mkdir /var/tmp/sos-osp && sos report --batch --all-logs --tmp-dir=/var/tmp/sos-osp  -p container,openstack_edpm,system,storage,virt"
               │ │ ├─287822 /usr/bin/python3 -s /sbin/sos report --batch --all-logs --tmp-dir=/var/tmp/sos-osp -p container,openstack_edpm,system,storage,virt
               │ │ ├─294026 timeout 15s turbostat --debug sleep 10
               │ │ ├─294374 timeout 300s systemctl status --all
               │ │ ├─294375 systemctl status --all
               │ │ ├─294399 timeout 300s semanage module -l
               │ │ ├─294400 /usr/bin/python3 -EsI /usr/sbin/semanage module -l
               │ │ ├─294403 timeout 300s ceph mon dump --format json-pretty
               │ │ └─294404 /usr/bin/python3 -s /usr/bin/ceph mon dump --format json-pretty
               │ └─user@1000.service
               │   ├─app.slice
               │   │ └─dbus-broker.service
               │   │   ├─13900 /usr/bin/dbus-broker-launch --scope user
               │   │   └─13910 dbus-broker --log 4 --controller 9 --machine-id bf0bc0bb03de29b24cba1cc9599cf5d0 --max-bytes 100000000000000 --max-fds 25000000000000 --max-matches 5000000000
               │   ├─init.scope
               │   │ ├─4307 /usr/lib/systemd/systemd --user
               │   │ └─4309 "(sd-pam)"
               │   └─user.slice
               │     └─podman-pause-6e894c72.scope
               │       └─13874 catatonit -P
               └─user-42477.slice
                 ├─session-20.scope
                 │ ├─76562 "sshd-session: ceph-admin [priv]"
                 │ └─76584 "sshd-session: ceph-admin"
                 ├─session-22.scope
                 │ ├─76579 "sshd-session: ceph-admin [priv]"
                 │ └─76585 "sshd-session: ceph-admin@notty"
                 ├─session-23.scope
                 │ ├─76611 "sshd-session: ceph-admin [priv]"
                 │ └─76614 "sshd-session: ceph-admin@notty"
                 ├─session-24.scope
                 │ ├─76640 "sshd-session: ceph-admin [priv]"
                 │ └─76643 "sshd-session: ceph-admin@notty"
                 ├─session-25.scope
                 │ ├─76669 "sshd-session: ceph-admin [priv]"
                 │ └─76672 "sshd-session: ceph-admin@notty"
                 ├─session-26.scope
                 │ ├─76698 "sshd-session: ceph-admin [priv]"
                 │ └─76701 "sshd-session: ceph-admin@notty"
                 ├─session-27.scope
                 │ ├─76727 "sshd-session: ceph-admin [priv]"
                 │ └─76730 "sshd-session: ceph-admin@notty"
                 ├─session-28.scope
                 │ ├─76756 "sshd-session: ceph-admin [priv]"
                 │ └─76759 "sshd-session: ceph-admin@notty"
                 ├─session-29.scope
                 │ ├─76785 "sshd-session: ceph-admin [priv]"
                 │ └─76788 "sshd-session: ceph-admin@notty"
                 ├─session-30.scope
                 │ ├─76814 "sshd-session: ceph-admin [priv]"
                 │ └─76817 "sshd-session: ceph-admin@notty"
                 ├─session-31.scope
                 │ ├─76841 "sshd-session: ceph-admin [priv]"
                 │ └─76844 "sshd-session: ceph-admin@notty"
                 ├─session-32.scope
                 │ ├─76870 "sshd-session: ceph-admin [priv]"
                 │ └─76873 "sshd-session: ceph-admin@notty"
                 └─user@42477.service
                   └─init.scope
                     ├─76566 /usr/lib/systemd/systemd --user
                     └─76568 "(sd-pam)"

Feb 02 18:13:12 compute-0 systemd[1]: Started libcrun container.
Feb 02 18:13:12 compute-0 systemd[1]: libpod-32082b334d08a6d5ab5daf434d029569f670f60ee0b0f99762b8ec4bb1548a07.scope: Deactivated successfully.
Feb 02 18:13:12 compute-0 systemd[1]: var-lib-containers-storage-overlay-e3bb365c1ad1b8e2c8dd8f20c75b00a6fbc7872df31f8b4a252e80a1ccbd1da7-merged.mount: Deactivated successfully.
Feb 02 18:13:12 compute-0 systemd[1]: libpod-conmon-32082b334d08a6d5ab5daf434d029569f670f60ee0b0f99762b8ec4bb1548a07.scope: Deactivated successfully.
Feb 02 18:13:12 compute-0 systemd[1]: Started libpod-conmon-f04134b56be7ae51a2491dd83e89562165b2581d3a93a5029ed331cdcb914e4e.scope.
Feb 02 18:13:12 compute-0 systemd[1]: Started libcrun container.
Feb 02 18:13:13 compute-0 systemd[1]: libpod-f04134b56be7ae51a2491dd83e89562165b2581d3a93a5029ed331cdcb914e4e.scope: Deactivated successfully.
Feb 02 18:13:13 compute-0 systemd[1]: libpod-f04134b56be7ae51a2491dd83e89562165b2581d3a93a5029ed331cdcb914e4e.scope: Consumed 1.199s CPU time.
Feb 02 18:13:13 compute-0 systemd[1]: var-lib-containers-storage-overlay-cad04384d7d78b50320d0896bfe81ac6c3c4299bb9d54a3cc4c94015227deb0d-merged.mount: Deactivated successfully.
Feb 02 18:13:13 compute-0 systemd[1]: libpod-conmon-f04134b56be7ae51a2491dd83e89562165b2581d3a93a5029ed331cdcb914e4e.scope: Deactivated successfully.

● machine.slice - Virtual Machine and Container Slice
     Loaded: loaded (/usr/lib/systemd/system/machine.slice; static)
     Active: active since Mon 2026-02-02 17:20:13 UTC; 53min ago
      Until: Mon 2026-02-02 17:20:13 UTC; 53min ago
       Docs: man:systemd.special(7)
         IO: 975.5M read, 78.4M written
      Tasks: 44
     Memory: 940.4M (peak: 1.7G)
        CPU: 11min 29.405s
     CGroup: /machine.slice
             ├─libpod-17e8fd462ba5e72ae3430209748fcdbe5242ab00cbe0d2070f3bc042ab5b9681.scope
             │ └─container
             │   ├─239848 dumb-init --single-child -- kolla_start
             │   ├─239853 /usr/bin/python3 /usr/bin/nova-compute
             │   ├─246527 /usr/bin/python3 /bin/privsep-helper --config-file /etc/nova/nova.conf --config-file /etc/nova/nova-compute.conf --config-dir /etc/nova/nova.conf.d --privsep_context vif_plug_ovs.privsep.vif_plug --privsep_sock_path /tmp/tmp70ca1rbh/privsep.sock
             │   └─249256 /usr/bin/python3 /bin/privsep-helper --config-file /etc/nova/nova.conf --config-file /etc/nova/nova-compute.conf --config-dir /etc/nova/nova.conf.d --privsep_context os_brick.privileged.default --privsep_sock_path /tmp/tmp9qdvacqx/privsep.sock
             ├─libpod-70e0d83cb45fbe649a29519f5074ad11df900a5702b7e7e666708ce90ca8d783.scope
             │ └─container
             │   ├─146043 dumb-init --single-child -- kolla_start
             │   └─146046 /usr/bin/ovn-controller --pidfile unix:/run/openvswitch/db.sock -p /etc/pki/tls/private/ovndb.key -c /etc/pki/tls/certs/ovndb.crt -C /etc/pki/tls/certs/ovndbca.crt
             └─libpod-983aad36fbefc6eb42f7b2455e6339d70a90e29e2ce721c2d9ecdd2cd91b9e7b.scope
               └─container
                 ├─155388 dumb-init --single-child -- kolla_start
                 ├─155391 "neutron-ovn-metadata-agent (/usr/bin/python3 /usr/bin/neutron-ovn-metadata-agent)"
                 ├─155696 "neutron-ovn-metadata-agent (/usr/bin/python3 /usr/bin/neutron-ovn-metadata-agent)"
                 ├─155891 /usr/bin/python3 /bin/privsep-helper --config-file /etc/neutron/neutron.conf --config-dir /etc/neutron.conf.d --privsep_context neutron.privileged.namespace_cmd --privsep_sock_path /tmp/tmpty0fqf9d/privsep.sock
                 ├─246686 /usr/bin/python3 /bin/privsep-helper --config-file /etc/neutron/neutron.conf --config-dir /etc/neutron.conf.d --privsep_context neutron.privileged.default --privsep_sock_path /tmp/tmpx1bw3qnr/privsep.sock
                 └─246720 /usr/bin/python3 /bin/privsep-helper --config-file /etc/neutron/neutron.conf --config-dir /etc/neutron.conf.d --privsep_context neutron.privileged.link_cmd --privsep_sock_path /tmp/tmp037046cp/privsep.sock

Feb 02 18:13:11 compute-0 pensive_dijkstra[291747]:                 "ceph.vdo": "0",
Feb 02 18:13:11 compute-0 pensive_dijkstra[291747]:                 "ceph.with_tpm": "0"
Feb 02 18:13:11 compute-0 pensive_dijkstra[291747]:             },
Feb 02 18:13:11 compute-0 pensive_dijkstra[291747]:             "type": "block",
Feb 02 18:13:11 compute-0 pensive_dijkstra[291747]:             "vg_name": "ceph_vg2"
Feb 02 18:13:11 compute-0 pensive_dijkstra[291747]:         }
Feb 02 18:13:11 compute-0 pensive_dijkstra[291747]:     ]
Feb 02 18:13:11 compute-0 pensive_dijkstra[291747]: }
Feb 02 18:13:12 compute-0 pensive_kepler[291933]: 167 167
Feb 02 18:13:13 compute-0 brave_lederberg[292012]: {}

● system-ceph\x2deb48d0ef\x2d3496\x2d563c\x2db73d\x2d661fb962013e.slice - Slice /system/ceph-eb48d0ef-3496-563c-b73d-661fb962013e
     Loaded: loaded
     Active: active since Mon 2026-02-02 17:20:16 UTC; 53min ago
      Until: Mon 2026-02-02 17:20:16 UTC; 53min ago
         IO: 1.6G read, 25.0G written
      Tasks: 1002
     Memory: 3.4G (peak: 4.1G)
        CPU: 4min 20.360s
     CGroup: /system.slice/system-ceph\x2deb48d0ef\x2d3496\x2d563c\x2db73d\x2d661fb962013e.slice
             ├─ceph-eb48d0ef-3496-563c-b73d-661fb962013e@crash.compute-0.service
             │ ├─libpod-payload-43c95f4962571d6ee3a5291e1f020e4230f3e56f098995891d143922a65cac4b
             │ │ ├─80152 /run/podman-init -- /usr/bin/ceph-crash -n client.crash.compute-0
             │ │ └─80154 /usr/bin/python3 -s /usr/bin/ceph-crash -n client.crash.compute-0
             │ └─runtime
             │   └─80150 /usr/bin/conmon --api-version 1 -c 43c95f4962571d6ee3a5291e1f020e4230f3e56f098995891d143922a65cac4b -u 43c95f4962571d6ee3a5291e1f020e4230f3e56f098995891d143922a65cac4b -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/43c95f4962571d6ee3a5291e1f020e4230f3e56f098995891d143922a65cac4b/userdata -p /run/containers/storage/overlay-containers/43c95f4962571d6ee3a5291e1f020e4230f3e56f098995891d143922a65cac4b/userdata/pidfile -n ceph-eb48d0ef-3496-563c-b73d-661fb962013e-crash-compute-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/43c95f4962571d6ee3a5291e1f020e4230f3e56f098995891d143922a65cac4b --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/43c95f4962571d6ee3a5291e1f020e4230f3e56f098995891d143922a65cac4b/userdata/oci-log --conmon-pidfile /run/ceph-eb48d0ef-3496-563c-b73d-661fb962013e@crash.compute-0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 43c95f4962571d6ee3a5291e1f020e4230f3e56f098995891d143922a65cac4b
             ├─ceph-eb48d0ef-3496-563c-b73d-661fb962013e@mds.cephfs.compute-0.vvdoei.service
             │ ├─libpod-payload-30ea8cb4e62f17278ebefb0cae478bc9b6467ba0762d054bfe95a0976430da56
             │ │ ├─95612 /run/podman-init -- /usr/bin/ceph-mds -n mds.cephfs.compute-0.vvdoei -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ └─95614 /usr/bin/ceph-mds -n mds.cephfs.compute-0.vvdoei -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ └─runtime
             │   └─95610 /usr/bin/conmon --api-version 1 -c 30ea8cb4e62f17278ebefb0cae478bc9b6467ba0762d054bfe95a0976430da56 -u 30ea8cb4e62f17278ebefb0cae478bc9b6467ba0762d054bfe95a0976430da56 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/30ea8cb4e62f17278ebefb0cae478bc9b6467ba0762d054bfe95a0976430da56/userdata -p /run/containers/storage/overlay-containers/30ea8cb4e62f17278ebefb0cae478bc9b6467ba0762d054bfe95a0976430da56/userdata/pidfile -n ceph-eb48d0ef-3496-563c-b73d-661fb962013e-mds-cephfs-compute-0-vvdoei --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/30ea8cb4e62f17278ebefb0cae478bc9b6467ba0762d054bfe95a0976430da56 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/30ea8cb4e62f17278ebefb0cae478bc9b6467ba0762d054bfe95a0976430da56/userdata/oci-log --conmon-pidfile /run/ceph-eb48d0ef-3496-563c-b73d-661fb962013e@mds.cephfs.compute-0.vvdoei.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 30ea8cb4e62f17278ebefb0cae478bc9b6467ba0762d054bfe95a0976430da56
             ├─ceph-eb48d0ef-3496-563c-b73d-661fb962013e@mgr.compute-0.hccdnu.service
             │ ├─libpod-payload-f51dea2484a885b1ef464f71470d5eb130f74e7dd5065bf2b6342dc1451e6a4c
             │ │ ├─75491 /run/podman-init -- /usr/bin/ceph-mgr -n mgr.compute-0.hccdnu -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ └─75493 /usr/bin/ceph-mgr -n mgr.compute-0.hccdnu -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ └─runtime
             │   └─75489 /usr/bin/conmon --api-version 1 -c f51dea2484a885b1ef464f71470d5eb130f74e7dd5065bf2b6342dc1451e6a4c -u f51dea2484a885b1ef464f71470d5eb130f74e7dd5065bf2b6342dc1451e6a4c -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/f51dea2484a885b1ef464f71470d5eb130f74e7dd5065bf2b6342dc1451e6a4c/userdata -p /run/containers/storage/overlay-containers/f51dea2484a885b1ef464f71470d5eb130f74e7dd5065bf2b6342dc1451e6a4c/userdata/pidfile -n ceph-eb48d0ef-3496-563c-b73d-661fb962013e-mgr-compute-0-hccdnu --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/f51dea2484a885b1ef464f71470d5eb130f74e7dd5065bf2b6342dc1451e6a4c --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/f51dea2484a885b1ef464f71470d5eb130f74e7dd5065bf2b6342dc1451e6a4c/userdata/oci-log --conmon-pidfile /run/ceph-eb48d0ef-3496-563c-b73d-661fb962013e@mgr.compute-0.hccdnu.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg f51dea2484a885b1ef464f71470d5eb130f74e7dd5065bf2b6342dc1451e6a4c
             ├─ceph-eb48d0ef-3496-563c-b73d-661fb962013e@mon.compute-0.service
             │ ├─libpod-payload-49cf601899989c75e64c17f569867f9dc2bb2a6ce1f21706d993305a3a0b4d26
             │ │ ├─75195 /run/podman-init -- /usr/bin/ceph-mon -n mon.compute-0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false
             │ │ └─75197 /usr/bin/ceph-mon -n mon.compute-0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false
             │ └─runtime
             │   └─75193 /usr/bin/conmon --api-version 1 -c 49cf601899989c75e64c17f569867f9dc2bb2a6ce1f21706d993305a3a0b4d26 -u 49cf601899989c75e64c17f569867f9dc2bb2a6ce1f21706d993305a3a0b4d26 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/49cf601899989c75e64c17f569867f9dc2bb2a6ce1f21706d993305a3a0b4d26/userdata -p /run/containers/storage/overlay-containers/49cf601899989c75e64c17f569867f9dc2bb2a6ce1f21706d993305a3a0b4d26/userdata/pidfile -n ceph-eb48d0ef-3496-563c-b73d-661fb962013e-mon-compute-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/49cf601899989c75e64c17f569867f9dc2bb2a6ce1f21706d993305a3a0b4d26 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/49cf601899989c75e64c17f569867f9dc2bb2a6ce1f21706d993305a3a0b4d26/userdata/oci-log --conmon-pidfile /run/ceph-eb48d0ef-3496-563c-b73d-661fb962013e@mon.compute-0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 49cf601899989c75e64c17f569867f9dc2bb2a6ce1f21706d993305a3a0b4d26
             ├─ceph-eb48d0ef-3496-563c-b73d-661fb962013e@osd.0.service
             │ ├─libpod-payload-5ec5d30977a06416493ca65ffb2bc1efb368bd71870a20ffcf4a99fc8c30655f
             │ │ ├─85694 /run/podman-init -- /usr/bin/ceph-osd -n osd.0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --osd-objectstore=bluestore
             │ │ └─85696 /usr/bin/ceph-osd -n osd.0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --osd-objectstore=bluestore
             │ └─runtime
             │   └─85692 /usr/bin/conmon --api-version 1 -c 5ec5d30977a06416493ca65ffb2bc1efb368bd71870a20ffcf4a99fc8c30655f -u 5ec5d30977a06416493ca65ffb2bc1efb368bd71870a20ffcf4a99fc8c30655f -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/5ec5d30977a06416493ca65ffb2bc1efb368bd71870a20ffcf4a99fc8c30655f/userdata -p /run/containers/storage/overlay-containers/5ec5d30977a06416493ca65ffb2bc1efb368bd71870a20ffcf4a99fc8c30655f/userdata/pidfile -n ceph-eb48d0ef-3496-563c-b73d-661fb962013e-osd-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/5ec5d30977a06416493ca65ffb2bc1efb368bd71870a20ffcf4a99fc8c30655f --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/5ec5d30977a06416493ca65ffb2bc1efb368bd71870a20ffcf4a99fc8c30655f/userdata/oci-log --conmon-pidfile /run/ceph-eb48d0ef-3496-563c-b73d-661fb962013e@osd.0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 5ec5d30977a06416493ca65ffb2bc1efb368bd71870a20ffcf4a99fc8c30655f
             ├─ceph-eb48d0ef-3496-563c-b73d-661fb962013e@osd.1.service
             │ ├─libpod-payload-849770b4bec6f7bda5e71b5d2b63467f261481956bf4b714be6269cbddd8aa42
             │ │ ├─86735 /run/podman-init -- /usr/bin/ceph-osd -n osd.1 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --osd-objectstore=bluestore
             │ │ └─86737 /usr/bin/ceph-osd -n osd.1 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --osd-objectstore=bluestore
             │ └─runtime
             │   └─86733 /usr/bin/conmon --api-version 1 -c 849770b4bec6f7bda5e71b5d2b63467f261481956bf4b714be6269cbddd8aa42 -u 849770b4bec6f7bda5e71b5d2b63467f261481956bf4b714be6269cbddd8aa42 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/849770b4bec6f7bda5e71b5d2b63467f261481956bf4b714be6269cbddd8aa42/userdata -p /run/containers/storage/overlay-containers/849770b4bec6f7bda5e71b5d2b63467f261481956bf4b714be6269cbddd8aa42/userdata/pidfile -n ceph-eb48d0ef-3496-563c-b73d-661fb962013e-osd-1 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/849770b4bec6f7bda5e71b5d2b63467f261481956bf4b714be6269cbddd8aa42 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/849770b4bec6f7bda5e71b5d2b63467f261481956bf4b714be6269cbddd8aa42/userdata/oci-log --conmon-pidfile /run/ceph-eb48d0ef-3496-563c-b73d-661fb962013e@osd.1.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 849770b4bec6f7bda5e71b5d2b63467f261481956bf4b714be6269cbddd8aa42
             ├─ceph-eb48d0ef-3496-563c-b73d-661fb962013e@osd.2.service
             │ ├─libpod-payload-49ee9de1004adc227a28add932102ae6c092e83b89c3463cc66529c1092a3071
             │ │ ├─87790 /run/podman-init -- /usr/bin/ceph-osd -n osd.2 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --osd-objectstore=bluestore
             │ │ └─87792 /usr/bin/ceph-osd -n osd.2 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --osd-objectstore=bluestore
             │ └─runtime
             │   └─87788 /usr/bin/conmon --api-version 1 -c 49ee9de1004adc227a28add932102ae6c092e83b89c3463cc66529c1092a3071 -u 49ee9de1004adc227a28add932102ae6c092e83b89c3463cc66529c1092a3071 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/49ee9de1004adc227a28add932102ae6c092e83b89c3463cc66529c1092a3071/userdata -p /run/containers/storage/overlay-containers/49ee9de1004adc227a28add932102ae6c092e83b89c3463cc66529c1092a3071/userdata/pidfile -n ceph-eb48d0ef-3496-563c-b73d-661fb962013e-osd-2 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/49ee9de1004adc227a28add932102ae6c092e83b89c3463cc66529c1092a3071 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/49ee9de1004adc227a28add932102ae6c092e83b89c3463cc66529c1092a3071/userdata/oci-log --conmon-pidfile /run/ceph-eb48d0ef-3496-563c-b73d-661fb962013e@osd.2.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 49ee9de1004adc227a28add932102ae6c092e83b89c3463cc66529c1092a3071
             └─ceph-eb48d0ef-3496-563c-b73d-661fb962013e@rgw.rgw.compute-0.molmny.service
               ├─libpod-payload-31c23500b425f65c54ced9f2de36b162413b6fa358f90c128fa457114a0a693e
               │ ├─95127 /run/podman-init -- /usr/bin/radosgw -n client.rgw.rgw.compute-0.molmny -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --stop-timeout=120
               │ └─95129 /usr/bin/radosgw -n client.rgw.rgw.compute-0.molmny -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --stop-timeout=120
               └─runtime
                 └─95125 /usr/bin/conmon --api-version 1 -c 31c23500b425f65c54ced9f2de36b162413b6fa358f90c128fa457114a0a693e -u 31c23500b425f65c54ced9f2de36b162413b6fa358f90c128fa457114a0a693e -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/31c23500b425f65c54ced9f2de36b162413b6fa358f90c128fa457114a0a693e/userdata -p /run/containers/storage/overlay-containers/31c23500b425f65c54ced9f2de36b162413b6fa358f90c128fa457114a0a693e/userdata/pidfile -n ceph-eb48d0ef-3496-563c-b73d-661fb962013e-rgw-rgw-compute-0-molmny --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/31c23500b425f65c54ced9f2de36b162413b6fa358f90c128fa457114a0a693e --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/31c23500b425f65c54ced9f2de36b162413b6fa358f90c128fa457114a0a693e/userdata/oci-log --conmon-pidfile /run/ceph-eb48d0ef-3496-563c-b73d-661fb962013e@rgw.rgw.compute-0.molmny.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 31c23500b425f65c54ced9f2de36b162413b6fa358f90c128fa457114a0a693e

Feb 02 18:13:22 compute-0 ceph-mon[75197]: from='client.? 192.168.122.100:0/1767462686' entity='client.admin' cmd={"prefix": "fs dump", "format": "json-pretty"} : dispatch
Feb 02 18:13:22 compute-0 ceph-mon[75197]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "fs ls", "format": "json-pretty"} v 0)
Feb 02 18:13:22 compute-0 ceph-mon[75197]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/2436103772' entity='client.admin' cmd={"prefix": "fs ls", "format": "json-pretty"} : dispatch
Feb 02 18:13:23 compute-0 ceph-mgr[75493]: log_channel(audit) log [DBG] : from='client.19618 -' entity='client.admin' cmd=[{"prefix": "fs status", "target": ["mon-mgr", ""], "format": "json-pretty"}]: dispatch
Feb 02 18:13:23 compute-0 ceph-mon[75197]: pgmap v2046: 305 pgs: 305 active+clean; 271 MiB data, 632 MiB used, 59 GiB / 60 GiB avail
Feb 02 18:13:23 compute-0 ceph-mon[75197]: from='client.? 192.168.122.100:0/2436103772' entity='client.admin' cmd={"prefix": "fs ls", "format": "json-pretty"} : dispatch
Feb 02 18:13:23 compute-0 ceph-mon[75197]: from='client.19618 -' entity='client.admin' cmd=[{"prefix": "fs status", "target": ["mon-mgr", ""], "format": "json-pretty"}]: dispatch
Feb 02 18:13:23 compute-0 ceph-mon[75197]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "mds stat", "format": "json-pretty"} v 0)
Feb 02 18:13:23 compute-0 ceph-mon[75197]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/2548876183' entity='client.admin' cmd={"prefix": "mds stat", "format": "json-pretty"} : dispatch
Feb 02 18:13:24 compute-0 ceph-mgr[75493]: log_channel(cluster) log [DBG] : pgmap v2047: 305 pgs: 305 active+clean; 271 MiB data, 632 MiB used, 59 GiB / 60 GiB avail

● system-dbus\x2d:1.1\x2dorg.fedoraproject.SetroubleshootPrivileged.slice - Slice /system/dbus-:1.1-org.fedoraproject.SetroubleshootPrivileged
     Loaded: loaded
     Active: active since Mon 2026-02-02 17:34:23 UTC; 39min ago
      Until: Mon 2026-02-02 17:34:23 UTC; 39min ago
         IO: 4.0K read, 0B written
      Tasks: 0
     Memory: 4.0K (peak: 58.8M)
        CPU: 915ms
     CGroup: /system.slice/system-dbus\x2d:1.1\x2dorg.fedoraproject.SetroubleshootPrivileged.slice

Feb 02 17:34:23 compute-0 systemd[1]: Created slice Slice /system/dbus-:1.1-org.fedoraproject.SetroubleshootPrivileged.

● system-getty.slice - Slice /system/getty
     Loaded: loaded
     Active: active since Mon 2026-02-02 16:46:02 UTC; 1h 27min ago
      Until: Mon 2026-02-02 16:46:02 UTC; 1h 27min ago
         IO: 0B read, 0B written
      Tasks: 1
     Memory: 232.0K (peak: 452.0K)
        CPU: 8ms
     CGroup: /system.slice/system-getty.slice
             └─getty@tty1.service
               └─1013 /sbin/agetty -o "-p -- \\u" --noclear - linux

● system-modprobe.slice - Slice /system/modprobe
     Loaded: loaded
     Active: active since Mon 2026-02-02 16:46:00 UTC; 1h 27min ago
      Until: Mon 2026-02-02 16:46:00 UTC; 1h 27min ago
         IO: 0B read, 0B written
      Tasks: 0
     Memory: 52.0K (peak: 11.7M)
        CPU: 152ms
     CGroup: /system.slice/system-modprobe.slice

Feb 02 16:46:00 localhost systemd[1]: Created slice Slice /system/modprobe.

● system-serial\x2dgetty.slice - Slice /system/serial-getty
     Loaded: loaded
     Active: active since Mon 2026-02-02 16:46:02 UTC; 1h 27min ago
      Until: Mon 2026-02-02 16:46:02 UTC; 1h 27min ago
         IO: 0B read, 0B written
      Tasks: 1
     Memory: 320.0K (peak: 564.0K)
        CPU: 9ms
     CGroup: /system.slice/system-serial\x2dgetty.slice
             └─serial-getty@ttyS0.service
               └─1014 /sbin/agetty -o "-p -- \\u" --keep-baud 115200,57600,38400,9600 - vt220

● system-sshd\x2dkeygen.slice - Slice /system/sshd-keygen
     Loaded: loaded
     Active: active since Mon 2026-02-02 16:46:02 UTC; 1h 27min ago
      Until: Mon 2026-02-02 16:46:02 UTC; 1h 27min ago
         IO: 0B read, 0B written
      Tasks: 0
     Memory: 0B (peak: 0B)
        CPU: 0
     CGroup: /system.slice/system-sshd\x2dkeygen.slice

● system.slice - System Slice
     Loaded: loaded
     Active: active since Mon 2026-02-02 16:45:59 UTC; 1h 27min ago
      Until: Mon 2026-02-02 16:45:59 UTC; 1h 27min ago
       Docs: man:systemd.special(7)
         IO: 1.9G read, 25.2G written
      Tasks: 1133
     Memory: 4.1G (peak: 4.7G)
        CPU: 8min 52.118s
     CGroup: /system.slice
             ├─NetworkManager.service
             │ └─49022 /usr/sbin/NetworkManager --no-daemon
             ├─auditd.service
             │ ├─704 /sbin/auditd
             │ └─706 /usr/sbin/sedispatch
             ├─chronyd.service
             │ └─58579 /usr/sbin/chronyd -F 2
             ├─crond.service
             │ ├─ 1010 /usr/sbin/crond -n
             │ └─29996 /usr/sbin/anacron -s
             ├─dbus-broker.service
             │ ├─770 /usr/bin/dbus-broker-launch --scope system --audit
             │ └─777 dbus-broker --log 4 --controller 9 --machine-id bf0bc0bb03de29b24cba1cc9599cf5d0 --max-bytes 536870912 --max-fds 4096 --max-matches 131072 --audit
             ├─edpm_nova_compute.service
             │ └─239846 /usr/bin/conmon --api-version 1 -c 17e8fd462ba5e72ae3430209748fcdbe5242ab00cbe0d2070f3bc042ab5b9681 -u 17e8fd462ba5e72ae3430209748fcdbe5242ab00cbe0d2070f3bc042ab5b9681 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/17e8fd462ba5e72ae3430209748fcdbe5242ab00cbe0d2070f3bc042ab5b9681/userdata -p /run/containers/storage/overlay-containers/17e8fd462ba5e72ae3430209748fcdbe5242ab00cbe0d2070f3bc042ab5b9681/userdata/pidfile -n nova_compute --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/17e8fd462ba5e72ae3430209748fcdbe5242ab00cbe0d2070f3bc042ab5b9681 --full-attach -s -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/17e8fd462ba5e72ae3430209748fcdbe5242ab00cbe0d2070f3bc042ab5b9681/userdata/oci-log --conmon-pidfile /run/nova_compute.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg 17e8fd462ba5e72ae3430209748fcdbe5242ab00cbe0d2070f3bc042ab5b9681
             ├─edpm_ovn_controller.service
             │ └─146041 /usr/bin/conmon --api-version 1 -c 70e0d83cb45fbe649a29519f5074ad11df900a5702b7e7e666708ce90ca8d783 -u 70e0d83cb45fbe649a29519f5074ad11df900a5702b7e7e666708ce90ca8d783 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/70e0d83cb45fbe649a29519f5074ad11df900a5702b7e7e666708ce90ca8d783/userdata -p /run/containers/storage/overlay-containers/70e0d83cb45fbe649a29519f5074ad11df900a5702b7e7e666708ce90ca8d783/userdata/pidfile -n ovn_controller --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/70e0d83cb45fbe649a29519f5074ad11df900a5702b7e7e666708ce90ca8d783 --full-attach -s -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/70e0d83cb45fbe649a29519f5074ad11df900a5702b7e7e666708ce90ca8d783/userdata/oci-log --conmon-pidfile /run/ovn_controller.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg 70e0d83cb45fbe649a29519f5074ad11df900a5702b7e7e666708ce90ca8d783
             ├─edpm_ovn_metadata_agent.service
             │ └─155386 /usr/bin/conmon --api-version 1 -c 983aad36fbefc6eb42f7b2455e6339d70a90e29e2ce721c2d9ecdd2cd91b9e7b -u 983aad36fbefc6eb42f7b2455e6339d70a90e29e2ce721c2d9ecdd2cd91b9e7b -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/983aad36fbefc6eb42f7b2455e6339d70a90e29e2ce721c2d9ecdd2cd91b9e7b/userdata -p /run/containers/storage/overlay-containers/983aad36fbefc6eb42f7b2455e6339d70a90e29e2ce721c2d9ecdd2cd91b9e7b/userdata/pidfile -n ovn_metadata_agent --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/983aad36fbefc6eb42f7b2455e6339d70a90e29e2ce721c2d9ecdd2cd91b9e7b --full-attach -s -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/983aad36fbefc6eb42f7b2455e6339d70a90e29e2ce721c2d9ecdd2cd91b9e7b/userdata/oci-log --conmon-pidfile /run/ovn_metadata_agent.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg 983aad36fbefc6eb42f7b2455e6339d70a90e29e2ce721c2d9ecdd2cd91b9e7b
             ├─gssproxy.service
             │ └─873 /usr/sbin/gssproxy -D
             ├─irqbalance.service
             │ └─795 /usr/sbin/irqbalance
             ├─iscsid.service
             │ └─224391 /usr/sbin/iscsid -f
             ├─multipathd.service
             │ └─224550 /sbin/multipathd -d -s
             ├─ovs-vswitchd.service
             │ └─47317 ovs-vswitchd unix:/var/run/openvswitch/db.sock -vconsole:emer -vsyslog:err -vfile:info --mlockall --user openvswitch:hugetlbfs --no-chdir --log-file=/var/log/openvswitch/ovs-vswitchd.log --pidfile=/var/run/openvswitch/ovs-vswitchd.pid --detach
             ├─ovsdb-server.service
             │ └─47236 ovsdb-server /etc/openvswitch/conf.db -vconsole:emer -vsyslog:err -vfile:info --remote=punix:/var/run/openvswitch/db.sock --private-key=db:Open_vSwitch,SSL,private_key --certificate=db:Open_vSwitch,SSL,certificate --bootstrap-ca-cert=db:Open_vSwitch,SSL,ca_cert --user openvswitch:hugetlbfs --no-chdir --log-file=/var/log/openvswitch/ovsdb-server.log --pidfile=/var/run/openvswitch/ovsdb-server.pid --detach
             ├─polkit.service
             │ └─43458 /usr/lib/polkit-1/polkitd --no-debug
             ├─rpcbind.service
             │ └─702 /usr/bin/rpcbind -w -f
             ├─rsyslog.service
             │ └─1006 /usr/sbin/rsyslogd -n
             ├─sshd.service
             │ └─182591 "sshd: /usr/sbin/sshd -D [listener] 0 of 10-100 startups"
             ├─system-ceph\x2deb48d0ef\x2d3496\x2d563c\x2db73d\x2d661fb962013e.slice
             │ ├─ceph-eb48d0ef-3496-563c-b73d-661fb962013e@crash.compute-0.service
             │ │ ├─libpod-payload-43c95f4962571d6ee3a5291e1f020e4230f3e56f098995891d143922a65cac4b
             │ │ │ ├─80152 /run/podman-init -- /usr/bin/ceph-crash -n client.crash.compute-0
             │ │ │ └─80154 /usr/bin/python3 -s /usr/bin/ceph-crash -n client.crash.compute-0
             │ │ └─runtime
             │ │   └─80150 /usr/bin/conmon --api-version 1 -c 43c95f4962571d6ee3a5291e1f020e4230f3e56f098995891d143922a65cac4b -u 43c95f4962571d6ee3a5291e1f020e4230f3e56f098995891d143922a65cac4b -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/43c95f4962571d6ee3a5291e1f020e4230f3e56f098995891d143922a65cac4b/userdata -p /run/containers/storage/overlay-containers/43c95f4962571d6ee3a5291e1f020e4230f3e56f098995891d143922a65cac4b/userdata/pidfile -n ceph-eb48d0ef-3496-563c-b73d-661fb962013e-crash-compute-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/43c95f4962571d6ee3a5291e1f020e4230f3e56f098995891d143922a65cac4b --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/43c95f4962571d6ee3a5291e1f020e4230f3e56f098995891d143922a65cac4b/userdata/oci-log --conmon-pidfile /run/ceph-eb48d0ef-3496-563c-b73d-661fb962013e@crash.compute-0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 43c95f4962571d6ee3a5291e1f020e4230f3e56f098995891d143922a65cac4b
             │ ├─ceph-eb48d0ef-3496-563c-b73d-661fb962013e@mds.cephfs.compute-0.vvdoei.service
             │ │ ├─libpod-payload-30ea8cb4e62f17278ebefb0cae478bc9b6467ba0762d054bfe95a0976430da56
             │ │ │ ├─95612 /run/podman-init -- /usr/bin/ceph-mds -n mds.cephfs.compute-0.vvdoei -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ │ └─95614 /usr/bin/ceph-mds -n mds.cephfs.compute-0.vvdoei -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ └─runtime
             │ │   └─95610 /usr/bin/conmon --api-version 1 -c 30ea8cb4e62f17278ebefb0cae478bc9b6467ba0762d054bfe95a0976430da56 -u 30ea8cb4e62f17278ebefb0cae478bc9b6467ba0762d054bfe95a0976430da56 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/30ea8cb4e62f17278ebefb0cae478bc9b6467ba0762d054bfe95a0976430da56/userdata -p /run/containers/storage/overlay-containers/30ea8cb4e62f17278ebefb0cae478bc9b6467ba0762d054bfe95a0976430da56/userdata/pidfile -n ceph-eb48d0ef-3496-563c-b73d-661fb962013e-mds-cephfs-compute-0-vvdoei --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/30ea8cb4e62f17278ebefb0cae478bc9b6467ba0762d054bfe95a0976430da56 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/30ea8cb4e62f17278ebefb0cae478bc9b6467ba0762d054bfe95a0976430da56/userdata/oci-log --conmon-pidfile /run/ceph-eb48d0ef-3496-563c-b73d-661fb962013e@mds.cephfs.compute-0.vvdoei.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 30ea8cb4e62f17278ebefb0cae478bc9b6467ba0762d054bfe95a0976430da56
             │ ├─ceph-eb48d0ef-3496-563c-b73d-661fb962013e@mgr.compute-0.hccdnu.service
             │ │ ├─libpod-payload-f51dea2484a885b1ef464f71470d5eb130f74e7dd5065bf2b6342dc1451e6a4c
             │ │ │ ├─75491 /run/podman-init -- /usr/bin/ceph-mgr -n mgr.compute-0.hccdnu -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ │ └─75493 /usr/bin/ceph-mgr -n mgr.compute-0.hccdnu -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ └─runtime
             │ │   └─75489 /usr/bin/conmon --api-version 1 -c f51dea2484a885b1ef464f71470d5eb130f74e7dd5065bf2b6342dc1451e6a4c -u f51dea2484a885b1ef464f71470d5eb130f74e7dd5065bf2b6342dc1451e6a4c -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/f51dea2484a885b1ef464f71470d5eb130f74e7dd5065bf2b6342dc1451e6a4c/userdata -p /run/containers/storage/overlay-containers/f51dea2484a885b1ef464f71470d5eb130f74e7dd5065bf2b6342dc1451e6a4c/userdata/pidfile -n ceph-eb48d0ef-3496-563c-b73d-661fb962013e-mgr-compute-0-hccdnu --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/f51dea2484a885b1ef464f71470d5eb130f74e7dd5065bf2b6342dc1451e6a4c --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/f51dea2484a885b1ef464f71470d5eb130f74e7dd5065bf2b6342dc1451e6a4c/userdata/oci-log --conmon-pidfile /run/ceph-eb48d0ef-3496-563c-b73d-661fb962013e@mgr.compute-0.hccdnu.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg f51dea2484a885b1ef464f71470d5eb130f74e7dd5065bf2b6342dc1451e6a4c
             │ ├─ceph-eb48d0ef-3496-563c-b73d-661fb962013e@mon.compute-0.service
             │ │ ├─libpod-payload-49cf601899989c75e64c17f569867f9dc2bb2a6ce1f21706d993305a3a0b4d26
             │ │ │ ├─75195 /run/podman-init -- /usr/bin/ceph-mon -n mon.compute-0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false
             │ │ │ └─75197 /usr/bin/ceph-mon -n mon.compute-0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false
             │ │ └─runtime
             │ │   └─75193 /usr/bin/conmon --api-version 1 -c 49cf601899989c75e64c17f569867f9dc2bb2a6ce1f21706d993305a3a0b4d26 -u 49cf601899989c75e64c17f569867f9dc2bb2a6ce1f21706d993305a3a0b4d26 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/49cf601899989c75e64c17f569867f9dc2bb2a6ce1f21706d993305a3a0b4d26/userdata -p /run/containers/storage/overlay-containers/49cf601899989c75e64c17f569867f9dc2bb2a6ce1f21706d993305a3a0b4d26/userdata/pidfile -n ceph-eb48d0ef-3496-563c-b73d-661fb962013e-mon-compute-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/49cf601899989c75e64c17f569867f9dc2bb2a6ce1f21706d993305a3a0b4d26 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/49cf601899989c75e64c17f569867f9dc2bb2a6ce1f21706d993305a3a0b4d26/userdata/oci-log --conmon-pidfile /run/ceph-eb48d0ef-3496-563c-b73d-661fb962013e@mon.compute-0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 49cf601899989c75e64c17f569867f9dc2bb2a6ce1f21706d993305a3a0b4d26
             │ ├─ceph-eb48d0ef-3496-563c-b73d-661fb962013e@osd.0.service
             │ │ ├─libpod-payload-5ec5d30977a06416493ca65ffb2bc1efb368bd71870a20ffcf4a99fc8c30655f
             │ │ │ ├─85694 /run/podman-init -- /usr/bin/ceph-osd -n osd.0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --osd-objectstore=bluestore
             │ │ │ └─85696 /usr/bin/ceph-osd -n osd.0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --osd-objectstore=bluestore
             │ │ └─runtime
             │ │   └─85692 /usr/bin/conmon --api-version 1 -c 5ec5d30977a06416493ca65ffb2bc1efb368bd71870a20ffcf4a99fc8c30655f -u 5ec5d30977a06416493ca65ffb2bc1efb368bd71870a20ffcf4a99fc8c30655f -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/5ec5d30977a06416493ca65ffb2bc1efb368bd71870a20ffcf4a99fc8c30655f/userdata -p /run/containers/storage/overlay-containers/5ec5d30977a06416493ca65ffb2bc1efb368bd71870a20ffcf4a99fc8c30655f/userdata/pidfile -n ceph-eb48d0ef-3496-563c-b73d-661fb962013e-osd-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/5ec5d30977a06416493ca65ffb2bc1efb368bd71870a20ffcf4a99fc8c30655f --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/5ec5d30977a06416493ca65ffb2bc1efb368bd71870a20ffcf4a99fc8c30655f/userdata/oci-log --conmon-pidfile /run/ceph-eb48d0ef-3496-563c-b73d-661fb962013e@osd.0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 5ec5d30977a06416493ca65ffb2bc1efb368bd71870a20ffcf4a99fc8c30655f
             │ ├─ceph-eb48d0ef-3496-563c-b73d-661fb962013e@osd.1.service
             │ │ ├─libpod-payload-849770b4bec6f7bda5e71b5d2b63467f261481956bf4b714be6269cbddd8aa42
             │ │ │ ├─86735 /run/podman-init -- /usr/bin/ceph-osd -n osd.1 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --osd-objectstore=bluestore
             │ │ │ └─86737 /usr/bin/ceph-osd -n osd.1 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --osd-objectstore=bluestore
             │ │ └─runtime
             │ │   └─86733 /usr/bin/conmon --api-version 1 -c 849770b4bec6f7bda5e71b5d2b63467f261481956bf4b714be6269cbddd8aa42 -u 849770b4bec6f7bda5e71b5d2b63467f261481956bf4b714be6269cbddd8aa42 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/849770b4bec6f7bda5e71b5d2b63467f261481956bf4b714be6269cbddd8aa42/userdata -p /run/containers/storage/overlay-containers/849770b4bec6f7bda5e71b5d2b63467f261481956bf4b714be6269cbddd8aa42/userdata/pidfile -n ceph-eb48d0ef-3496-563c-b73d-661fb962013e-osd-1 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/849770b4bec6f7bda5e71b5d2b63467f261481956bf4b714be6269cbddd8aa42 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/849770b4bec6f7bda5e71b5d2b63467f261481956bf4b714be6269cbddd8aa42/userdata/oci-log --conmon-pidfile /run/ceph-eb48d0ef-3496-563c-b73d-661fb962013e@osd.1.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 849770b4bec6f7bda5e71b5d2b63467f261481956bf4b714be6269cbddd8aa42
             │ ├─ceph-eb48d0ef-3496-563c-b73d-661fb962013e@osd.2.service
             │ │ ├─libpod-payload-49ee9de1004adc227a28add932102ae6c092e83b89c3463cc66529c1092a3071
             │ │ │ ├─87790 /run/podman-init -- /usr/bin/ceph-osd -n osd.2 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --osd-objectstore=bluestore
             │ │ │ └─87792 /usr/bin/ceph-osd -n osd.2 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --osd-objectstore=bluestore
             │ │ └─runtime
             │ │   └─87788 /usr/bin/conmon --api-version 1 -c 49ee9de1004adc227a28add932102ae6c092e83b89c3463cc66529c1092a3071 -u 49ee9de1004adc227a28add932102ae6c092e83b89c3463cc66529c1092a3071 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/49ee9de1004adc227a28add932102ae6c092e83b89c3463cc66529c1092a3071/userdata -p /run/containers/storage/overlay-containers/49ee9de1004adc227a28add932102ae6c092e83b89c3463cc66529c1092a3071/userdata/pidfile -n ceph-eb48d0ef-3496-563c-b73d-661fb962013e-osd-2 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/49ee9de1004adc227a28add932102ae6c092e83b89c3463cc66529c1092a3071 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/49ee9de1004adc227a28add932102ae6c092e83b89c3463cc66529c1092a3071/userdata/oci-log --conmon-pidfile /run/ceph-eb48d0ef-3496-563c-b73d-661fb962013e@osd.2.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 49ee9de1004adc227a28add932102ae6c092e83b89c3463cc66529c1092a3071
             │ └─ceph-eb48d0ef-3496-563c-b73d-661fb962013e@rgw.rgw.compute-0.molmny.service
             │   ├─libpod-payload-31c23500b425f65c54ced9f2de36b162413b6fa358f90c128fa457114a0a693e
             │   │ ├─95127 /run/podman-init -- /usr/bin/radosgw -n client.rgw.rgw.compute-0.molmny -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --stop-timeout=120
             │   │ └─95129 /usr/bin/radosgw -n client.rgw.rgw.compute-0.molmny -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --stop-timeout=120
             │   └─runtime
             │     └─95125 /usr/bin/conmon --api-version 1 -c 31c23500b425f65c54ced9f2de36b162413b6fa358f90c128fa457114a0a693e -u 31c23500b425f65c54ced9f2de36b162413b6fa358f90c128fa457114a0a693e -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/31c23500b425f65c54ced9f2de36b162413b6fa358f90c128fa457114a0a693e/userdata -p /run/containers/storage/overlay-containers/31c23500b425f65c54ced9f2de36b162413b6fa358f90c128fa457114a0a693e/userdata/pidfile -n ceph-eb48d0ef-3496-563c-b73d-661fb962013e-rgw-rgw-compute-0-molmny --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/31c23500b425f65c54ced9f2de36b162413b6fa358f90c128fa457114a0a693e --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/31c23500b425f65c54ced9f2de36b162413b6fa358f90c128fa457114a0a693e/userdata/oci-log --conmon-pidfile /run/ceph-eb48d0ef-3496-563c-b73d-661fb962013e@rgw.rgw.compute-0.molmny.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 31c23500b425f65c54ced9f2de36b162413b6fa358f90c128fa457114a0a693e
             ├─system-getty.slice
             │ └─getty@tty1.service
             │   └─1013 /sbin/agetty -o "-p -- \\u" --noclear - linux
             ├─system-serial\x2dgetty.slice
             │ └─serial-getty@ttyS0.service
             │   └─1014 /sbin/agetty -o "-p -- \\u" --keep-baud 115200,57600,38400,9600 - vt220
             ├─systemd-hostnamed.service
             │ └─290803 /usr/lib/systemd/systemd-hostnamed
             ├─systemd-journald.service
             │ └─679 /usr/lib/systemd/systemd-journald
             ├─systemd-logind.service
             │ └─799 /usr/lib/systemd/systemd-logind
             ├─systemd-machined.service
             │ └─208080 /usr/lib/systemd/systemd-machined
             ├─systemd-udevd.service
             │ └─udev
             │   └─732 /usr/lib/systemd/systemd-udevd
             ├─tuned.service
             │ └─106772 /usr/bin/python3 -Es /usr/sbin/tuned -l -P
             ├─virtlogd.service
             │ └─207424 /usr/sbin/virtlogd
             ├─virtnodedevd.service
             │ └─240340 /usr/sbin/virtnodedevd --timeout 120
             ├─virtqemud.service
             │ └─239321 /usr/sbin/virtqemud --timeout 120
             └─virtsecretd.service
               └─246591 /usr/sbin/virtsecretd --timeout 120

Feb 02 18:13:13 compute-0 lvm[292222]: PV /dev/loop5 online, VG ceph_vg2 is complete.
Feb 02 18:13:13 compute-0 lvm[292222]: VG ceph_vg2 finished
Feb 02 18:13:14 compute-0 nova_compute[239846]: 2026-02-02 18:13:14.278 239853 DEBUG oslo_service.periodic_task [None req-2829bb05-3ca0-460a-873d-6129b7c9c50b - - - - - -] Running periodic task ComputeManager._run_pending_deletes run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210[00m
Feb 02 18:13:14 compute-0 nova_compute[239846]: 2026-02-02 18:13:14.279 239853 DEBUG nova.compute.manager [None req-2829bb05-3ca0-460a-873d-6129b7c9c50b - - - - - -] Cleaning up deleted instances _run_pending_deletes /usr/lib/python3.9/site-packages/nova/compute/manager.py:11145[00m
Feb 02 18:13:14 compute-0 nova_compute[239846]: 2026-02-02 18:13:14.305 239853 DEBUG nova.compute.manager [None req-2829bb05-3ca0-460a-873d-6129b7c9c50b - - - - - -] There are 0 instances to clean _run_pending_deletes /usr/lib/python3.9/site-packages/nova/compute/manager.py:11154[00m
Feb 02 18:13:16 compute-0 nova_compute[239846]: 2026-02-02 18:13:16.048 239853 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 39 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263[00m
Feb 02 18:13:16 compute-0 nova_compute[239846]: 2026-02-02 18:13:16.050 239853 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 39 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263[00m
Feb 02 18:13:20 compute-0 podman[294136]: 2026-02-02 18:13:20.611048372 +0000 UTC m=+0.057621445 container health_status 983aad36fbefc6eb42f7b2455e6339d70a90e29e2ce721c2d9ecdd2cd91b9e7b (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=ovn_metadata_agent, health_status=healthy, health_failing_streak=0, health_log=, managed_by=edpm_ansible, org.label-schema.build-date=20260127, container_name=ovn_metadata_agent, io.buildah.version=1.41.3, tcib_build_tag=b85d0548925081ae8c6bdd697658cec4, config_data={'cgroupns': 'host', 'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'EDPM_CONFIG_HASH': 'fb44d116753823076754339ecdff5d26c5c02250617a2157b9bf22160a92362b-b679b18dd4e53db9e352e8eb6b265beb4b106035d3e3bfb3cb99fdf41954fcec-b679b18dd4e53db9e352e8eb6b265beb4b106035d3e3bfb3cb99fdf41954fcec-b679b18dd4e53db9e352e8eb6b265beb4b106035d3e3bfb3cb99fdf41954fcec-0823bd3e096c75f72e4a95820d41b0d4b6a1172bd2892ddb9f29b788a11bc87d'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_metadata_agent', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified', 'net': 'host', 'pid': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/run/openvswitch:/run/openvswitch:z', '/var/lib/openstack/neutron-ovn-metadata-agent:/etc/neutron.conf.d:z', '/run/netns:/run/netns:shared', '/var/lib/kolla/config_files/ovn_metadata_agent.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/neutron:/var/lib/neutron:shared,z', '/var/lib/neutron/ovn_metadata_haproxy_wrapper:/usr/local/bin/haproxy:ro', '/var/lib/neutron/kill_scripts:/etc/neutron/kill_scripts:ro', '/var/lib/openstack/cacerts/neutron-metadata/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_metadata_agent:/openstack:ro,z']}, config_id=ovn_metadata_agent, maintainer=OpenStack Kubernetes Operator team, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, tcib_managed=true, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image)
Feb 02 18:13:21 compute-0 nova_compute[239846]: 2026-02-02 18:13:21.050 239853 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 39 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263[00m
Feb 02 18:13:22 compute-0 podman[294309]: 2026-02-02 18:13:22.893892451 +0000 UTC m=+0.066878965 container health_status 70e0d83cb45fbe649a29519f5074ad11df900a5702b7e7e666708ce90ca8d783 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, tcib_build_tag=b85d0548925081ae8c6bdd697658cec4, tcib_managed=true, container_name=ovn_controller, io.buildah.version=1.41.3, org.label-schema.name=CentOS Stream 9 Base Image, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'EDPM_CONFIG_HASH': 'fb44d116753823076754339ecdff5d26c5c02250617a2157b9bf22160a92362b-b679b18dd4e53db9e352e8eb6b265beb4b106035d3e3bfb3cb99fdf41954fcec-b679b18dd4e53db9e352e8eb6b265beb4b106035d3e3bfb3cb99fdf41954fcec-b679b18dd4e53db9e352e8eb6b265beb4b106035d3e3bfb3cb99fdf41954fcec'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, org.label-schema.vendor=CentOS, config_id=ovn_controller, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, org.label-schema.build-date=20260127)

● user-1000.slice - User Slice of UID 1000
     Loaded: loaded
    Drop-In: /usr/lib/systemd/system/user-.slice.d
             └─10-defaults.conf
     Active: active since Mon 2026-02-02 16:46:28 UTC; 1h 26min ago
      Until: Mon 2026-02-02 16:46:28 UTC; 1h 26min ago
       Docs: man:user@.service(5)
         IO: 661.1M read, 8.6G written
      Tasks: 35 (limit: 20031)
     Memory: 1.4G (peak: 4.1G)
        CPU: 21min 33.616s
     CGroup: /user.slice/user-1000.slice
             ├─session-1.scope
             │ └─4520 /usr/bin/python3
             ├─session-54.scope
             │ ├─287794 "sshd-session: zuul [priv]"
             │ ├─287797 "sshd-session: zuul@notty"
             │ ├─287798 sudo bash -c "rm -rf /var/tmp/sos-osp && mkdir /var/tmp/sos-osp && sos report --batch --all-logs --tmp-dir=/var/tmp/sos-osp  -p container,openstack_edpm,system,storage,virt"
             │ ├─287822 /usr/bin/python3 -s /sbin/sos report --batch --all-logs --tmp-dir=/var/tmp/sos-osp -p container,openstack_edpm,system,storage,virt
             │ ├─294026 timeout 15s turbostat --debug sleep 10
             │ ├─294374 timeout 300s systemctl status --all
             │ ├─294375 systemctl status --all
             │ ├─294403 timeout 300s ceph mon dump --format json-pretty
             │ ├─294404 /usr/bin/python3 -s /usr/bin/ceph mon dump --format json-pretty
             │ └─294430 /usr/bin/python3 -s /sbin/sos report --batch --all-logs --tmp-dir=/var/tmp/sos-osp -p container,openstack_edpm,system,storage,virt
             └─user@1000.service
               ├─app.slice
               │ └─dbus-broker.service
               │   ├─13900 /usr/bin/dbus-broker-launch --scope user
               │   └─13910 dbus-broker --log 4 --controller 9 --machine-id bf0bc0bb03de29b24cba1cc9599cf5d0 --max-bytes 100000000000000 --max-fds 25000000000000 --max-matches 5000000000
               ├─init.scope
               │ ├─4307 /usr/lib/systemd/systemd --user
               │ └─4309 "(sd-pam)"
               └─user.slice
                 └─podman-pause-6e894c72.scope
                   └─13874 catatonit -P

Feb 02 18:04:24 compute-0 sudo[281435]:     zuul : PWD=/home/zuul ; USER=root ; COMMAND=/bin/rm -rf /var/tmp/sos-osp
Feb 02 18:04:24 compute-0 sudo[281435]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Feb 02 18:04:24 compute-0 sudo[281435]: pam_unix(sudo:session): session closed for user root
Feb 02 18:04:24 compute-0 sshd-session[281434]: Received disconnect from 192.168.122.10 port 40758:11: disconnected by user
Feb 02 18:04:24 compute-0 sshd-session[281434]: Disconnected from user zuul 192.168.122.10 port 40758
Feb 02 18:12:40 compute-0 sudo[287798]:     zuul : PWD=/home/zuul ; USER=root ; COMMAND=/bin/bash -c 'rm -rf /var/tmp/sos-osp && mkdir /var/tmp/sos-osp && sos report --batch --all-logs --tmp-dir=/var/tmp/sos-osp  -p container,openstack_edpm,system,storage,virt'
Feb 02 18:12:40 compute-0 sudo[287798]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Feb 02 18:13:16 compute-0 ovs-appctl[293162]: ovs|00001|daemon_unix|WARN|/var/run/openvswitch/ovs-monitor-ipsec.pid: open: No such file or directory
Feb 02 18:13:16 compute-0 ovs-appctl[293166]: ovs|00001|daemon_unix|WARN|/var/run/openvswitch/ovs-monitor-ipsec.pid: open: No such file or directory
Feb 02 18:13:16 compute-0 ovs-appctl[293171]: ovs|00001|daemon_unix|WARN|/var/run/openvswitch/ovs-monitor-ipsec.pid: open: No such file or directory

● user-42477.slice - User Slice of UID 42477
     Loaded: loaded
    Drop-In: /usr/lib/systemd/system/user-.slice.d
             └─10-defaults.conf
     Active: active since Mon 2026-02-02 17:20:42 UTC; 52min ago
      Until: Mon 2026-02-02 17:20:42 UTC; 52min ago
       Docs: man:user@.service(5)
         IO: 180.0K read, 191.4M written
      Tasks: 26 (limit: 20031)
     Memory: 28.8M (peak: 78.7M)
        CPU: 3min 6.194s
     CGroup: /user.slice/user-42477.slice
             ├─session-20.scope
             │ ├─76562 "sshd-session: ceph-admin [priv]"
             │ └─76584 "sshd-session: ceph-admin"
             ├─session-22.scope
             │ ├─76579 "sshd-session: ceph-admin [priv]"
             │ └─76585 "sshd-session: ceph-admin@notty"
             ├─session-23.scope
             │ ├─76611 "sshd-session: ceph-admin [priv]"
             │ └─76614 "sshd-session: ceph-admin@notty"
             ├─session-24.scope
             │ ├─76640 "sshd-session: ceph-admin [priv]"
             │ └─76643 "sshd-session: ceph-admin@notty"
             ├─session-25.scope
             │ ├─76669 "sshd-session: ceph-admin [priv]"
             │ └─76672 "sshd-session: ceph-admin@notty"
             ├─session-26.scope
             │ ├─76698 "sshd-session: ceph-admin [priv]"
             │ └─76701 "sshd-session: ceph-admin@notty"
             ├─session-27.scope
             │ ├─76727 "sshd-session: ceph-admin [priv]"
             │ └─76730 "sshd-session: ceph-admin@notty"
             ├─session-28.scope
             │ ├─76756 "sshd-session: ceph-admin [priv]"
             │ └─76759 "sshd-session: ceph-admin@notty"
             ├─session-29.scope
             │ ├─76785 "sshd-session: ceph-admin [priv]"
             │ └─76788 "sshd-session: ceph-admin@notty"
             ├─session-30.scope
             │ ├─76814 "sshd-session: ceph-admin [priv]"
             │ └─76817 "sshd-session: ceph-admin@notty"
             ├─session-31.scope
             │ ├─76841 "sshd-session: ceph-admin [priv]"
             │ └─76844 "sshd-session: ceph-admin@notty"
             ├─session-32.scope
             │ ├─76870 "sshd-session: ceph-admin [priv]"
             │ └─76873 "sshd-session: ceph-admin@notty"
             └─user@42477.service
               └─init.scope
                 ├─76566 /usr/lib/systemd/systemd --user
                 └─76568 "(sd-pam)"

Feb 02 18:13:12 compute-0 podman[291988]: 2026-02-02 18:13:12.539263483 +0000 UTC m=+0.025413827 image pull 524f3da276461682bec27427fb8a63b5139c40ad4185939aede197474a6817b3 quay.io/ceph/ceph@sha256:1228c3d05e45fbc068a8c33614e4409b6dac688bcc77369b06009b5830fa8d86
Feb 02 18:13:12 compute-0 podman[291988]: 2026-02-02 18:13:12.650569939 +0000 UTC m=+0.136720283 container init f04134b56be7ae51a2491dd83e89562165b2581d3a93a5029ed331cdcb914e4e (image=quay.io/ceph/ceph@sha256:1228c3d05e45fbc068a8c33614e4409b6dac688bcc77369b06009b5830fa8d86, name=brave_lederberg, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=tentacle, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=69f84cc2651aa259a15bc192ddaabd3baba07489, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, ceph=True, org.label-schema.build-date=20251030, OSD_FLAVOR=default, io.buildah.version=1.41.3, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9)
Feb 02 18:13:12 compute-0 podman[291988]: 2026-02-02 18:13:12.657800372 +0000 UTC m=+0.143950686 container start f04134b56be7ae51a2491dd83e89562165b2581d3a93a5029ed331cdcb914e4e (image=quay.io/ceph/ceph@sha256:1228c3d05e45fbc068a8c33614e4409b6dac688bcc77369b06009b5830fa8d86, name=brave_lederberg, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=tentacle, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=69f84cc2651aa259a15bc192ddaabd3baba07489, io.buildah.version=1.41.3, org.label-schema.build-date=20251030, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Feb 02 18:13:12 compute-0 podman[291988]: 2026-02-02 18:13:12.662128484 +0000 UTC m=+0.148278818 container attach f04134b56be7ae51a2491dd83e89562165b2581d3a93a5029ed331cdcb914e4e (image=quay.io/ceph/ceph@sha256:1228c3d05e45fbc068a8c33614e4409b6dac688bcc77369b06009b5830fa8d86, name=brave_lederberg, CEPH_REF=tentacle, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=69f84cc2651aa259a15bc192ddaabd3baba07489, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20251030, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.41.3)
Feb 02 18:13:13 compute-0 podman[291988]: 2026-02-02 18:13:13.469816948 +0000 UTC m=+0.955967282 container died f04134b56be7ae51a2491dd83e89562165b2581d3a93a5029ed331cdcb914e4e (image=quay.io/ceph/ceph@sha256:1228c3d05e45fbc068a8c33614e4409b6dac688bcc77369b06009b5830fa8d86, name=brave_lederberg, CEPH_REF=tentacle, org.label-schema.license=GPLv2, io.buildah.version=1.41.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20251030, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.vendor=CentOS, CEPH_SHA1=69f84cc2651aa259a15bc192ddaabd3baba07489, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Feb 02 18:13:13 compute-0 podman[291988]: 2026-02-02 18:13:13.531255448 +0000 UTC m=+1.017405772 container remove f04134b56be7ae51a2491dd83e89562165b2581d3a93a5029ed331cdcb914e4e (image=quay.io/ceph/ceph@sha256:1228c3d05e45fbc068a8c33614e4409b6dac688bcc77369b06009b5830fa8d86, name=brave_lederberg, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=69f84cc2651aa259a15bc192ddaabd3baba07489, io.buildah.version=1.41.3, ceph=True, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=tentacle, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20251030, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/)
Feb 02 18:13:13 compute-0 sudo[291861]: pam_unix(sudo:session): session closed for user root
Feb 02 18:13:13 compute-0 sudo[292316]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/ls /etc/sysctl.d
Feb 02 18:13:13 compute-0 sudo[292316]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Feb 02 18:13:13 compute-0 sudo[292316]: pam_unix(sudo:session): session closed for user root

● user.slice - User and Session Slice
     Loaded: loaded (/usr/lib/systemd/system/user.slice; static)
     Active: active since Mon 2026-02-02 16:46:02 UTC; 1h 27min ago
      Until: Mon 2026-02-02 16:46:02 UTC; 1h 27min ago
       Docs: man:systemd.special(7)
         IO: 661.3M read, 8.7G written
      Tasks: 63
     Memory: 1.5G (peak: 4.2G)
        CPU: 24min 40.301s
     CGroup: /user.slice
             ├─user-1000.slice
             │ ├─session-1.scope
             │ │ └─4520 /usr/bin/python3
             │ ├─session-54.scope
             │ │ ├─287794 "sshd-session: zuul [priv]"
             │ │ ├─287797 "sshd-session: zuul@notty"
             │ │ ├─287798 sudo bash -c "rm -rf /var/tmp/sos-osp && mkdir /var/tmp/sos-osp && sos report --batch --all-logs --tmp-dir=/var/tmp/sos-osp  -p container,openstack_edpm,system,storage,virt"
             │ │ ├─287822 /usr/bin/python3 -s /sbin/sos report --batch --all-logs --tmp-dir=/var/tmp/sos-osp -p container,openstack_edpm,system,storage,virt
             │ │ ├─294026 timeout 15s turbostat --debug sleep 10
             │ │ ├─294374 timeout 300s systemctl status --all
             │ │ ├─294375 systemctl status --all
             │ │ ├─294403 timeout 300s ceph mon dump --format json-pretty
             │ │ ├─294404 /usr/bin/python3 -s /usr/bin/ceph mon dump --format json-pretty
             │ │ ├─294430 timeout 300s tpm2_getcap properties-variable
             │ │ └─294431 tpm2_getcap properties-variable
             │ └─user@1000.service
             │   ├─app.slice
             │   │ └─dbus-broker.service
             │   │   ├─13900 /usr/bin/dbus-broker-launch --scope user
             │   │   └─13910 dbus-broker --log 4 --controller 9 --machine-id bf0bc0bb03de29b24cba1cc9599cf5d0 --max-bytes 100000000000000 --max-fds 25000000000000 --max-matches 5000000000
             │   ├─init.scope
             │   │ ├─4307 /usr/lib/systemd/systemd --user
             │   │ └─4309 "(sd-pam)"
             │   └─user.slice
             │     └─podman-pause-6e894c72.scope
             │       └─13874 catatonit -P
             └─user-42477.slice
               ├─session-20.scope
               │ ├─76562 "sshd-session: ceph-admin [priv]"
               │ └─76584 "sshd-session: ceph-admin"
               ├─session-22.scope
               │ ├─76579 "sshd-session: ceph-admin [priv]"
               │ └─76585 "sshd-session: ceph-admin@notty"
               ├─session-23.scope
               │ ├─76611 "sshd-session: ceph-admin [priv]"
               │ └─76614 "sshd-session: ceph-admin@notty"
               ├─session-24.scope
               │ ├─76640 "sshd-session: ceph-admin [priv]"
               │ └─76643 "sshd-session: ceph-admin@notty"
               ├─session-25.scope
               │ ├─76669 "sshd-session: ceph-admin [priv]"
               │ └─76672 "sshd-session: ceph-admin@notty"
               ├─session-26.scope
               │ ├─76698 "sshd-session: ceph-admin [priv]"
               │ └─76701 "sshd-session: ceph-admin@notty"
               ├─session-27.scope
               │ ├─76727 "sshd-session: ceph-admin [priv]"
               │ └─76730 "sshd-session: ceph-admin@notty"
               ├─session-28.scope
               │ ├─76756 "sshd-session: ceph-admin [priv]"
               │ └─76759 "sshd-session: ceph-admin@notty"
               ├─session-29.scope
               │ ├─76785 "sshd-session: ceph-admin [priv]"
               │ └─76788 "sshd-session: ceph-admin@notty"
               ├─session-30.scope
               │ ├─76814 "sshd-session: ceph-admin [priv]"
               │ └─76817 "sshd-session: ceph-admin@notty"
               ├─session-31.scope
               │ ├─76841 "sshd-session: ceph-admin [priv]"
               │ └─76844 "sshd-session: ceph-admin@notty"
               ├─session-32.scope
               │ ├─76870 "sshd-session: ceph-admin [priv]"
               │ └─76873 "sshd-session: ceph-admin@notty"
               └─user@42477.service
                 └─init.scope
                   ├─76566 /usr/lib/systemd/systemd --user
                   └─76568 "(sd-pam)"

● dbus.socket - D-Bus System Message Bus Socket
     Loaded: loaded (/usr/lib/systemd/system/dbus.socket; enabled; preset: enabled)
     Active: active (running) since Mon 2026-02-02 16:46:03 UTC; 1h 27min ago
      Until: Mon 2026-02-02 16:46:03 UTC; 1h 27min ago
   Triggers: ● dbus-broker.service
     Listen: /run/dbus/system_bus_socket (Stream)
     CGroup: /system.slice/dbus.socket

Feb 02 16:46:03 localhost systemd[1]: Listening on D-Bus System Message Bus Socket.

● dm-event.socket - Device-mapper event daemon FIFOs
     Loaded: loaded (/usr/lib/systemd/system/dm-event.socket; enabled; preset: enabled)
     Active: active (listening) since Mon 2026-02-02 17:14:16 UTC; 59min ago
      Until: Mon 2026-02-02 17:14:16 UTC; 59min ago
   Triggers: ● dm-event.service
       Docs: man:dmeventd(8)
     Listen: /run/dmeventd-server (FIFO)
             /run/dmeventd-client (FIFO)
     CGroup: /system.slice/dm-event.socket

Feb 02 17:14:16 compute-0 systemd[1]: Listening on Device-mapper event daemon FIFOs.

● iscsid.socket - Open-iSCSI iscsid Socket
     Loaded: loaded (/usr/lib/systemd/system/iscsid.socket; enabled; preset: enabled)
     Active: active (running) since Mon 2026-02-02 17:35:22 UTC; 38min ago
      Until: Mon 2026-02-02 17:35:22 UTC; 38min ago
   Triggers: ● iscsid.service
       Docs: man:iscsid(8)
             man:iscsiadm(8)
     Listen: @ISCSIADM_ABSTRACT_NAMESPACE (Stream)
     CGroup: /system.slice/iscsid.socket

Feb 02 17:35:22 compute-0 systemd[1]: Listening on Open-iSCSI iscsid Socket.

○ iscsiuio.socket - Open-iSCSI iscsiuio Socket
     Loaded: loaded (/usr/lib/systemd/system/iscsiuio.socket; enabled; preset: enabled)
     Active: inactive (dead)
   Triggers: ● iscsiuio.service
       Docs: man:iscsiuio(8)
     Listen: @ISCSID_UIP_ABSTRACT_NAMESPACE (Stream)

○ libvirtd-admin.socket - libvirt legacy monolithic daemon admin socket
     Loaded: loaded (/usr/lib/systemd/system/libvirtd-admin.socket; disabled; preset: disabled)
     Active: inactive (dead)
   Triggers: ● libvirtd.service
     Listen: /run/libvirt/libvirt-admin-sock (Stream)

○ libvirtd-ro.socket - libvirt legacy monolithic daemon read-only socket
     Loaded: loaded (/usr/lib/systemd/system/libvirtd-ro.socket; disabled; preset: disabled)
     Active: inactive (dead)
   Triggers: ● libvirtd.service
     Listen: /run/libvirt/libvirt-sock-ro (Stream)

○ libvirtd-tls.socket
     Loaded: masked (Reason: Unit libvirtd-tls.socket is masked.)
     Active: inactive (dead)

○ libvirtd.socket - libvirt legacy monolithic daemon socket
     Loaded: loaded (/usr/lib/systemd/system/libvirtd.socket; disabled; preset: disabled)
     Active: inactive (dead)
   Triggers: ● libvirtd.service
     Listen: /run/libvirt/libvirt-sock (Stream)

● lvm2-lvmpolld.socket - LVM2 poll daemon socket
     Loaded: loaded (/usr/lib/systemd/system/lvm2-lvmpolld.socket; enabled; preset: enabled)
     Active: active (listening) since Mon 2026-02-02 17:14:16 UTC; 59min ago
      Until: Mon 2026-02-02 17:14:16 UTC; 59min ago
   Triggers: ● lvm2-lvmpolld.service
       Docs: man:lvmpolld(8)
     Listen: /run/lvm/lvmpolld.socket (Stream)
     CGroup: /system.slice/lvm2-lvmpolld.socket

Feb 02 17:14:16 compute-0 systemd[1]: Listening on LVM2 poll daemon socket.

● multipathd.socket - multipathd control socket
     Loaded: loaded (/usr/lib/systemd/system/multipathd.socket; enabled; preset: disabled)
     Active: active (running) since Mon 2026-02-02 17:35:45 UTC; 37min ago
      Until: Mon 2026-02-02 17:35:45 UTC; 37min ago
   Triggers: ● multipathd.service
     Listen: @/org/kernel/linux/storage/multipathd (Stream)
             /run/multipathd.socket (Stream)
     CGroup: /system.slice/multipathd.socket

Feb 02 17:35:45 compute-0 systemd[1]: Listening on multipathd control socket.

● rpcbind.socket - RPCbind Server Activation Socket
     Loaded: loaded (/usr/lib/systemd/system/rpcbind.socket; enabled; preset: enabled)
     Active: active (running) since Mon 2026-02-02 16:46:02 UTC; 1h 27min ago
      Until: Mon 2026-02-02 16:46:02 UTC; 1h 27min ago
   Triggers: ● rpcbind.service
     Listen: /run/rpcbind.sock (Stream)
             0.0.0.0:111 (Stream)
             0.0.0.0:111 (Datagram)
             [::]:111 (Stream)
             [::]:111 (Datagram)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48560)
     Memory: 16.0K (peak: 288.0K)
        CPU: 4ms
     CGroup: /system.slice/rpcbind.socket

● sssd-kcm.socket - SSSD Kerberos Cache Manager responder socket
     Loaded: loaded (/usr/lib/systemd/system/sssd-kcm.socket; enabled; preset: enabled)
     Active: active (listening) since Mon 2026-02-02 16:46:03 UTC; 1h 27min ago
      Until: Mon 2026-02-02 16:46:03 UTC; 1h 27min ago
   Triggers: ● sssd-kcm.service
       Docs: man:sssd-kcm(8)
     Listen: /run/.heim_org.h5l.kcm-socket (Stream)
     CGroup: /system.slice/sssd-kcm.socket

Feb 02 16:46:03 localhost systemd[1]: Listening on SSSD Kerberos Cache Manager responder socket.

○ syslog.socket - Syslog Socket
     Loaded: loaded (/usr/lib/systemd/system/syslog.socket; static)
     Active: inactive (dead)
   Triggers: ● syslog.service
       Docs: man:systemd.special(7)
             https://www.freedesktop.org/wiki/Software/systemd/syslog
     Listen: /run/systemd/journal/syslog (Datagram)

● systemd-coredump.socket - Process Core Dump Socket
     Loaded: loaded (/usr/lib/systemd/system/systemd-coredump.socket; static)
     Active: active (listening) since Mon 2026-02-02 16:46:02 UTC; 1h 27min ago
      Until: Mon 2026-02-02 16:46:02 UTC; 1h 27min ago
       Docs: man:systemd-coredump(8)
     Listen: /run/systemd/coredump (SequentialPacket)
   Accepted: 0; Connected: 0;
     CGroup: /system.slice/systemd-coredump.socket

● systemd-initctl.socket - initctl Compatibility Named Pipe
     Loaded: loaded (/usr/lib/systemd/system/systemd-initctl.socket; static)
     Active: active (listening) since Mon 2026-02-02 16:46:02 UTC; 1h 27min ago
      Until: Mon 2026-02-02 16:46:02 UTC; 1h 27min ago
   Triggers: ● systemd-initctl.service
       Docs: man:systemd-initctl.socket(8)
     Listen: /run/initctl (FIFO)
     CGroup: /system.slice/systemd-initctl.socket

● systemd-journald-dev-log.socket - Journal Socket (/dev/log)
     Loaded: loaded (/usr/lib/systemd/system/systemd-journald-dev-log.socket; static)
     Active: active (running) since Mon 2026-02-02 16:45:59 UTC; 1h 27min ago
      Until: Mon 2026-02-02 16:45:59 UTC; 1h 27min ago
   Triggers: ● systemd-journald.service
       Docs: man:systemd-journald.service(8)
             man:journald.conf(5)
     Listen: /run/systemd/journal/dev-log (Datagram)
     CGroup: /system.slice/systemd-journald-dev-log.socket

Notice: journal has been rotated since unit was started, output may be incomplete.

● systemd-journald.socket - Journal Socket
     Loaded: loaded (/usr/lib/systemd/system/systemd-journald.socket; static)
     Active: active (running) since Mon 2026-02-02 16:45:59 UTC; 1h 27min ago
      Until: Mon 2026-02-02 16:45:59 UTC; 1h 27min ago
   Triggers: ● systemd-journald.service
       Docs: man:systemd-journald.service(8)
             man:journald.conf(5)
     Listen: /run/systemd/journal/socket (Datagram)
             /run/systemd/journal/stdout (Stream)
     CGroup: /system.slice/systemd-journald.socket

Notice: journal has been rotated since unit was started, output may be incomplete.

● systemd-rfkill.socket - Load/Save RF Kill Switch Status /dev/rfkill Watch
     Loaded: loaded (/usr/lib/systemd/system/systemd-rfkill.socket; static)
     Active: active (listening) since Mon 2026-02-02 16:46:05 UTC; 1h 27min ago
      Until: Mon 2026-02-02 16:46:05 UTC; 1h 27min ago
   Triggers: ● systemd-rfkill.service
       Docs: man:systemd-rfkill.socket(8)
     Listen: /dev/rfkill (Special)
     CGroup: /system.slice/systemd-rfkill.socket

Feb 02 16:46:05 np0005605476.novalocal systemd[1]: Listening on Load/Save RF Kill Switch Status /dev/rfkill Watch.

● systemd-udevd-control.socket - udev Control Socket
     Loaded: loaded (/usr/lib/systemd/system/systemd-udevd-control.socket; static)
     Active: active (running) since Mon 2026-02-02 16:46:02 UTC; 1h 27min ago
      Until: Mon 2026-02-02 16:46:02 UTC; 1h 27min ago
   Triggers: ● systemd-udevd.service
       Docs: man:systemd-udevd-control.socket(8)
             man:udev(7)
     Listen: /run/udev/control (SequentialPacket)
     CGroup: /system.slice/systemd-udevd-control.socket

● systemd-udevd-kernel.socket - udev Kernel Socket
     Loaded: loaded (/usr/lib/systemd/system/systemd-udevd-kernel.socket; static)
     Active: active (running) since Mon 2026-02-02 16:46:02 UTC; 1h 27min ago
      Until: Mon 2026-02-02 16:46:02 UTC; 1h 27min ago
   Triggers: ● systemd-udevd.service
       Docs: man:systemd-udevd-kernel.socket(8)
             man:udev(7)
     Listen: kobject-uevent 1 (Netlink)
     CGroup: /system.slice/systemd-udevd-kernel.socket

○ virtinterfaced-admin.socket - libvirt interface daemon admin socket
     Loaded: loaded (/usr/lib/systemd/system/virtinterfaced-admin.socket; enabled; preset: enabled)
     Active: inactive (dead)
   Triggers: ● virtinterfaced.service
     Listen: /run/libvirt/virtinterfaced-admin-sock (Stream)

○ virtinterfaced-ro.socket - libvirt interface daemon read-only socket
     Loaded: loaded (/usr/lib/systemd/system/virtinterfaced-ro.socket; enabled; preset: enabled)
     Active: inactive (dead)
   Triggers: ● virtinterfaced.service
     Listen: /run/libvirt/virtinterfaced-sock-ro (Stream)

○ virtinterfaced.socket - libvirt interface daemon socket
     Loaded: loaded (/usr/lib/systemd/system/virtinterfaced.socket; enabled; preset: enabled)
     Active: inactive (dead)
   Triggers: ● virtinterfaced.service
     Listen: /run/libvirt/virtinterfaced-sock (Stream)

○ virtlockd-admin.socket - libvirt locking daemon admin socket
     Loaded: loaded (/usr/lib/systemd/system/virtlockd-admin.socket; enabled; preset: enabled)
     Active: inactive (dead)
   Triggers: ● virtlockd.service
     Listen: /run/libvirt/virtlockd-admin-sock (Stream)

● virtlockd.socket - libvirt locking daemon socket
     Loaded: loaded (/usr/lib/systemd/system/virtlockd.socket; enabled; preset: enabled)
     Active: active (listening) since Mon 2026-02-02 17:34:24 UTC; 38min ago
      Until: Mon 2026-02-02 17:34:24 UTC; 38min ago
   Triggers: ● virtlockd.service
     Listen: /run/libvirt/virtlockd-sock (Stream)
     CGroup: /system.slice/virtlockd.socket

Feb 02 17:34:24 compute-0 systemd[1]: Listening on libvirt locking daemon socket.

● virtlogd-admin.socket - libvirt logging daemon admin socket
     Loaded: loaded (/usr/lib/systemd/system/virtlogd-admin.socket; enabled; preset: enabled)
    Drop-In: /etc/systemd/system/virtlogd-admin.socket.d
             └─override.conf
     Active: active (running) since Mon 2026-02-02 17:34:21 UTC; 39min ago
      Until: Mon 2026-02-02 17:34:21 UTC; 39min ago
   Triggers: ● virtlogd.service
     Listen: /run/libvirt/virtlogd-admin-sock (Stream)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48560)
     Memory: 0B (peak: 512.0K)
        CPU: 2ms
     CGroup: /system.slice/virtlogd-admin.socket

Feb 02 17:34:21 compute-0 systemd[1]: Starting libvirt logging daemon admin socket...
Feb 02 17:34:21 compute-0 systemd[1]: Listening on libvirt logging daemon admin socket.

● virtlogd.socket - libvirt logging daemon socket
     Loaded: loaded (/usr/lib/systemd/system/virtlogd.socket; enabled; preset: enabled)
    Drop-In: /etc/systemd/system/virtlogd.socket.d
             └─override.conf
     Active: active (running) since Mon 2026-02-02 17:34:21 UTC; 39min ago
      Until: Mon 2026-02-02 17:34:21 UTC; 39min ago
   Triggers: ● virtlogd.service
     Listen: /run/libvirt/virtlogd-sock (Stream)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48560)
     Memory: 0B (peak: 512.0K)
        CPU: 3ms
     CGroup: /system.slice/virtlogd.socket

Feb 02 17:34:21 compute-0 systemd[1]: Starting libvirt logging daemon socket...
Feb 02 17:34:21 compute-0 systemd[1]: Listening on libvirt logging daemon socket.

○ virtnetworkd-admin.socket - libvirt network daemon admin socket
     Loaded: loaded (/usr/lib/systemd/system/virtnetworkd-admin.socket; enabled; preset: enabled)
     Active: inactive (dead)
   Triggers: ● virtnetworkd.service
     Listen: /run/libvirt/virtnetworkd-admin-sock (Stream)

○ virtnetworkd-ro.socket - libvirt network daemon read-only socket
     Loaded: loaded (/usr/lib/systemd/system/virtnetworkd-ro.socket; enabled; preset: enabled)
     Active: inactive (dead)
   Triggers: ● virtnetworkd.service
     Listen: /run/libvirt/virtnetworkd-sock-ro (Stream)

○ virtnetworkd.socket - libvirt network daemon socket
     Loaded: loaded (/usr/lib/systemd/system/virtnetworkd.socket; enabled; preset: enabled)
     Active: inactive (dead)
   Triggers: ● virtnetworkd.service
     Listen: /run/libvirt/virtnetworkd-sock (Stream)

● virtnodedevd-admin.socket - libvirt nodedev daemon admin socket
     Loaded: loaded (/usr/lib/systemd/system/virtnodedevd-admin.socket; enabled; preset: enabled)
    Drop-In: /etc/systemd/system/virtnodedevd-admin.socket.d
             └─override.conf
     Active: active (running) since Mon 2026-02-02 17:34:22 UTC; 39min ago
      Until: Mon 2026-02-02 17:34:22 UTC; 39min ago
   Triggers: ● virtnodedevd.service
     Listen: /run/libvirt/virtnodedevd-admin-sock (Stream)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48560)
     Memory: 0B (peak: 512.0K)
        CPU: 2ms
     CGroup: /system.slice/virtnodedevd-admin.socket

Feb 02 17:34:22 compute-0 systemd[1]: Starting libvirt nodedev daemon admin socket...
Feb 02 17:34:22 compute-0 systemd[1]: Listening on libvirt nodedev daemon admin socket.

● virtnodedevd-ro.socket - libvirt nodedev daemon read-only socket
     Loaded: loaded (/usr/lib/systemd/system/virtnodedevd-ro.socket; enabled; preset: enabled)
    Drop-In: /etc/systemd/system/virtnodedevd-ro.socket.d
             └─override.conf
     Active: active (running) since Mon 2026-02-02 17:34:22 UTC; 39min ago
      Until: Mon 2026-02-02 17:34:22 UTC; 39min ago
   Triggers: ● virtnodedevd.service
     Listen: /run/libvirt/virtnodedevd-sock-ro (Stream)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48560)
     Memory: 0B (peak: 512.0K)
        CPU: 3ms
     CGroup: /system.slice/virtnodedevd-ro.socket

Feb 02 17:34:22 compute-0 systemd[1]: Starting libvirt nodedev daemon read-only socket...
Feb 02 17:34:22 compute-0 systemd[1]: Listening on libvirt nodedev daemon read-only socket.

● virtnodedevd.socket - libvirt nodedev daemon socket
     Loaded: loaded (/usr/lib/systemd/system/virtnodedevd.socket; enabled; preset: enabled)
    Drop-In: /etc/systemd/system/virtnodedevd.socket.d
             └─override.conf
     Active: active (running) since Mon 2026-02-02 17:34:22 UTC; 39min ago
      Until: Mon 2026-02-02 17:34:22 UTC; 39min ago
   Triggers: ● virtnodedevd.service
     Listen: /run/libvirt/virtnodedevd-sock (Stream)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48560)
     Memory: 4.0K (peak: 512.0K)
        CPU: 4ms
     CGroup: /system.slice/virtnodedevd.socket

Feb 02 17:34:22 compute-0 systemd[1]: Starting libvirt nodedev daemon socket...
Feb 02 17:34:22 compute-0 systemd[1]: Listening on libvirt nodedev daemon socket.

○ virtnwfilterd-admin.socket - libvirt nwfilter daemon admin socket
     Loaded: loaded (/usr/lib/systemd/system/virtnwfilterd-admin.socket; enabled; preset: enabled)
     Active: inactive (dead)
   Triggers: ● virtnwfilterd.service
     Listen: /run/libvirt/virtnwfilterd-admin-sock (Stream)

○ virtnwfilterd-ro.socket - libvirt nwfilter daemon read-only socket
     Loaded: loaded (/usr/lib/systemd/system/virtnwfilterd-ro.socket; enabled; preset: enabled)
     Active: inactive (dead)
   Triggers: ● virtnwfilterd.service
     Listen: /run/libvirt/virtnwfilterd-sock-ro (Stream)

○ virtnwfilterd.socket - libvirt nwfilter daemon socket
     Loaded: loaded (/usr/lib/systemd/system/virtnwfilterd.socket; enabled; preset: enabled)
     Active: inactive (dead)
   Triggers: ● virtnwfilterd.service
     Listen: /run/libvirt/virtnwfilterd-sock (Stream)

● virtproxyd-admin.socket - libvirt proxy daemon admin socket
     Loaded: loaded (/usr/lib/systemd/system/virtproxyd-admin.socket; enabled; preset: enabled)
    Drop-In: /etc/systemd/system/virtproxyd-admin.socket.d
             └─override.conf
     Active: active (listening) since Mon 2026-02-02 17:34:23 UTC; 39min ago
      Until: Mon 2026-02-02 17:34:23 UTC; 39min ago
   Triggers: ● virtproxyd.service
     Listen: /run/libvirt/libvirt-admin-sock (Stream)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48560)
     Memory: 0B (peak: 512.0K)
        CPU: 3ms
     CGroup: /system.slice/virtproxyd-admin.socket

Feb 02 17:34:23 compute-0 systemd[1]: Starting libvirt proxy daemon admin socket...
Feb 02 17:34:23 compute-0 systemd[1]: Listening on libvirt proxy daemon admin socket.

● virtproxyd-ro.socket - libvirt proxy daemon read-only socket
     Loaded: loaded (/usr/lib/systemd/system/virtproxyd-ro.socket; enabled; preset: enabled)
    Drop-In: /etc/systemd/system/virtproxyd-ro.socket.d
             └─override.conf
     Active: active (listening) since Mon 2026-02-02 17:34:23 UTC; 39min ago
      Until: Mon 2026-02-02 17:34:23 UTC; 39min ago
   Triggers: ● virtproxyd.service
     Listen: /run/libvirt/libvirt-sock-ro (Stream)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48560)
     Memory: 4.0K (peak: 512.0K)
        CPU: 3ms
     CGroup: /system.slice/virtproxyd-ro.socket

Feb 02 17:34:23 compute-0 systemd[1]: Starting libvirt proxy daemon read-only socket...
Feb 02 17:34:23 compute-0 systemd[1]: Listening on libvirt proxy daemon read-only socket.

● virtproxyd-tls.socket - libvirt proxy daemon TLS IP socket
     Loaded: loaded (/usr/lib/systemd/system/virtproxyd-tls.socket; enabled; preset: disabled)
     Active: active (listening) since Mon 2026-02-02 17:33:19 UTC; 40min ago
      Until: Mon 2026-02-02 17:33:19 UTC; 40min ago
   Triggers: ● virtproxyd.service
     Listen: [::]:16514 (Stream)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48560)
     Memory: 8.0K (peak: 256.0K)
        CPU: 1ms
     CGroup: /system.slice/virtproxyd-tls.socket

Feb 02 17:33:19 compute-0 systemd[1]: Listening on libvirt proxy daemon TLS IP socket.

● virtproxyd.socket - libvirt proxy daemon socket
     Loaded: loaded (/usr/lib/systemd/system/virtproxyd.socket; enabled; preset: enabled)
    Drop-In: /etc/systemd/system/virtproxyd.socket.d
             └─override.conf
     Active: active (listening) since Mon 2026-02-02 17:33:19 UTC; 40min ago
      Until: Mon 2026-02-02 17:33:19 UTC; 40min ago
   Triggers: ● virtproxyd.service
     Listen: /run/libvirt/libvirt-sock (Stream)
     CGroup: /system.slice/virtproxyd.socket

Feb 02 17:33:19 compute-0 systemd[1]: Listening on libvirt proxy daemon socket.

● virtqemud-admin.socket - libvirt QEMU daemon admin socket
     Loaded: loaded (/usr/lib/systemd/system/virtqemud-admin.socket; enabled; preset: enabled)
    Drop-In: /etc/systemd/system/virtqemud-admin.socket.d
             └─override.conf
     Active: active (running) since Mon 2026-02-02 17:34:24 UTC; 38min ago
      Until: Mon 2026-02-02 17:34:24 UTC; 38min ago
   Triggers: ● virtqemud.service
     Listen: /run/libvirt/virtqemud-admin-sock (Stream)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48560)
     Memory: 0B (peak: 488.0K)
        CPU: 2ms
     CGroup: /system.slice/virtqemud-admin.socket

Feb 02 17:34:24 compute-0 systemd[1]: Starting libvirt QEMU daemon admin socket...
Feb 02 17:34:24 compute-0 systemd[1]: Listening on libvirt QEMU daemon admin socket.

● virtqemud-ro.socket - libvirt QEMU daemon read-only socket
     Loaded: loaded (/usr/lib/systemd/system/virtqemud-ro.socket; enabled; preset: enabled)
    Drop-In: /etc/systemd/system/virtqemud-ro.socket.d
             └─override.conf
     Active: active (running) since Mon 2026-02-02 17:34:24 UTC; 38min ago
      Until: Mon 2026-02-02 17:34:24 UTC; 38min ago
   Triggers: ● virtqemud.service
     Listen: /run/libvirt/virtqemud-sock-ro (Stream)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48560)
     Memory: 0B (peak: 648.0K)
        CPU: 3ms
     CGroup: /system.slice/virtqemud-ro.socket

Feb 02 17:34:24 compute-0 systemd[1]: Starting libvirt QEMU daemon read-only socket...
Feb 02 17:34:24 compute-0 systemd[1]: Listening on libvirt QEMU daemon read-only socket.

● virtqemud.socket - libvirt QEMU daemon socket
     Loaded: loaded (/usr/lib/systemd/system/virtqemud.socket; enabled; preset: enabled)
    Drop-In: /etc/systemd/system/virtqemud.socket.d
             └─override.conf
     Active: active (running) since Mon 2026-02-02 17:34:24 UTC; 38min ago
      Until: Mon 2026-02-02 17:34:24 UTC; 38min ago
   Triggers: ● virtqemud.service
     Listen: /run/libvirt/virtqemud-sock (Stream)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48560)
     Memory: 0B (peak: 512.0K)
        CPU: 2ms
     CGroup: /system.slice/virtqemud.socket

Feb 02 17:34:24 compute-0 systemd[1]: Starting libvirt QEMU daemon socket...
Feb 02 17:34:24 compute-0 systemd[1]: Listening on libvirt QEMU daemon socket.

● virtsecretd-admin.socket - libvirt secret daemon admin socket
     Loaded: loaded (/usr/lib/systemd/system/virtsecretd-admin.socket; enabled; preset: enabled)
    Drop-In: /etc/systemd/system/virtsecretd-admin.socket.d
             └─override.conf
     Active: active (running) since Mon 2026-02-02 17:34:25 UTC; 38min ago
      Until: Mon 2026-02-02 17:34:25 UTC; 38min ago
   Triggers: ● virtsecretd.service
     Listen: /run/libvirt/virtsecretd-admin-sock (Stream)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48560)
     Memory: 4.0K (peak: 528.0K)
        CPU: 2ms
     CGroup: /system.slice/virtsecretd-admin.socket

Feb 02 17:34:25 compute-0 systemd[1]: Starting libvirt secret daemon admin socket...
Feb 02 17:34:25 compute-0 systemd[1]: Listening on libvirt secret daemon admin socket.

● virtsecretd-ro.socket - libvirt secret daemon read-only socket
     Loaded: loaded (/usr/lib/systemd/system/virtsecretd-ro.socket; enabled; preset: enabled)
    Drop-In: /etc/systemd/system/virtsecretd-ro.socket.d
             └─override.conf
     Active: active (running) since Mon 2026-02-02 17:34:25 UTC; 38min ago
      Until: Mon 2026-02-02 17:34:25 UTC; 38min ago
   Triggers: ● virtsecretd.service
     Listen: /run/libvirt/virtsecretd-sock-ro (Stream)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48560)
     Memory: 0B (peak: 512.0K)
        CPU: 1ms
     CGroup: /system.slice/virtsecretd-ro.socket

Feb 02 17:34:25 compute-0 systemd[1]: Starting libvirt secret daemon read-only socket...
Feb 02 17:34:25 compute-0 systemd[1]: Listening on libvirt secret daemon read-only socket.

● virtsecretd.socket - libvirt secret daemon socket
     Loaded: loaded (/usr/lib/systemd/system/virtsecretd.socket; enabled; preset: enabled)
    Drop-In: /etc/systemd/system/virtsecretd.socket.d
             └─override.conf
     Active: active (running) since Mon 2026-02-02 17:34:25 UTC; 38min ago
      Until: Mon 2026-02-02 17:34:25 UTC; 38min ago
   Triggers: ● virtsecretd.service
     Listen: /run/libvirt/virtsecretd-sock (Stream)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48560)
     Memory: 0B (peak: 560.0K)
        CPU: 2ms
     CGroup: /system.slice/virtsecretd.socket

Feb 02 17:34:25 compute-0 systemd[1]: Starting libvirt secret daemon socket...
Feb 02 17:34:25 compute-0 systemd[1]: Listening on libvirt secret daemon socket.

○ virtstoraged-admin.socket - libvirt storage daemon admin socket
     Loaded: loaded (/usr/lib/systemd/system/virtstoraged-admin.socket; enabled; preset: enabled)
     Active: inactive (dead)
   Triggers: ● virtstoraged.service
     Listen: /run/libvirt/virtstoraged-admin-sock (Stream)

○ virtstoraged-ro.socket - libvirt storage daemon read-only socket
     Loaded: loaded (/usr/lib/systemd/system/virtstoraged-ro.socket; enabled; preset: enabled)
     Active: inactive (dead)
   Triggers: ● virtstoraged.service
     Listen: /run/libvirt/virtstoraged-sock-ro (Stream)

○ virtstoraged.socket - libvirt storage daemon socket
     Loaded: loaded (/usr/lib/systemd/system/virtstoraged.socket; enabled; preset: enabled)
     Active: inactive (dead)
   Triggers: ● virtstoraged.service
     Listen: /run/libvirt/virtstoraged-sock (Stream)

● swap.swap - /swap
     Loaded: loaded (/etc/fstab; generated)
     Active: active since Mon 2026-02-02 17:15:49 UTC; 57min ago
      Until: Mon 2026-02-02 17:15:49 UTC; 57min ago
       What: /swap
       Docs: man:fstab(5)
             man:systemd-fstab-generator(8)

● basic.target - Basic System
     Loaded: loaded (/usr/lib/systemd/system/basic.target; static)
     Active: active since Mon 2026-02-02 16:46:03 UTC; 1h 27min ago
      Until: Mon 2026-02-02 16:46:03 UTC; 1h 27min ago
       Docs: man:systemd.special(7)

Feb 02 16:46:03 localhost systemd[1]: Reached target Basic System.

○ blockdev@dev-disk-by\x2duuid-822f14ea\x2d6e7e\x2d41df\x2db0d8\x2dfbe282d9ded8.target - Block Device Preparation for /dev/disk/by-uuid/822f14ea-6e7e-41df-b0d8-fbe282d9ded8
     Loaded: loaded (/usr/lib/systemd/system/blockdev@.target; static)
     Active: inactive (dead)
       Docs: man:systemd.special(7)

○ blockdev@dev-vda1.target - Block Device Preparation for /dev/vda1
     Loaded: loaded (/usr/lib/systemd/system/blockdev@.target; static)
     Active: inactive (dead)
       Docs: man:systemd.special(7)

● ceph-eb48d0ef-3496-563c-b73d-661fb962013e.target - Ceph cluster eb48d0ef-3496-563c-b73d-661fb962013e
     Loaded: loaded (/etc/systemd/system/ceph-eb48d0ef-3496-563c-b73d-661fb962013e.target; enabled; preset: disabled)
     Active: active since Mon 2026-02-02 17:20:16 UTC; 53min ago
      Until: Mon 2026-02-02 17:20:16 UTC; 53min ago

Feb 02 17:20:16 compute-0 systemd[1]: Reached target Ceph cluster eb48d0ef-3496-563c-b73d-661fb962013e.

● ceph.target - All Ceph clusters and services
     Loaded: loaded (/etc/systemd/system/ceph.target; enabled; preset: disabled)
     Active: active since Mon 2026-02-02 17:20:15 UTC; 53min ago
      Until: Mon 2026-02-02 17:20:15 UTC; 53min ago

Feb 02 17:20:15 compute-0 systemd[1]: Reached target All Ceph clusters and services.

● cloud-config.target - Cloud-config availability
     Loaded: loaded (/usr/lib/systemd/system/cloud-config.target; static)
     Active: active since Mon 2026-02-02 16:46:07 UTC; 1h 27min ago
      Until: Mon 2026-02-02 16:46:07 UTC; 1h 27min ago

Feb 02 16:46:07 np0005605476.novalocal systemd[1]: Reached target Cloud-config availability.

● cloud-init.target - Cloud-init target
     Loaded: loaded (/usr/lib/systemd/system/cloud-init.target; enabled-runtime; preset: disabled)
     Active: active since Mon 2026-02-02 16:46:08 UTC; 1h 27min ago
      Until: Mon 2026-02-02 16:46:08 UTC; 1h 27min ago

Feb 02 16:46:08 np0005605476.novalocal systemd[1]: Reached target Cloud-init target.

○ cryptsetup-pre.target - Local Encrypted Volumes (Pre)
     Loaded: loaded (/usr/lib/systemd/system/cryptsetup-pre.target; static)
     Active: inactive (dead)
       Docs: man:systemd.special(7)

● cryptsetup.target - Local Encrypted Volumes
     Loaded: loaded (/usr/lib/systemd/system/cryptsetup.target; static)
     Active: active since Mon 2026-02-02 16:46:02 UTC; 1h 27min ago
      Until: Mon 2026-02-02 16:46:02 UTC; 1h 27min ago
       Docs: man:systemd.special(7)

● edpm_libvirt.target
     Loaded: loaded (/etc/systemd/system/edpm_libvirt.target; static)
     Active: active since Mon 2026-02-02 17:34:55 UTC; 38min ago
      Until: Mon 2026-02-02 17:34:55 UTC; 38min ago

Feb 02 17:34:55 compute-0 systemd[1]: Reached target edpm_libvirt.target.

○ emergency.target - Emergency Mode
     Loaded: loaded (/usr/lib/systemd/system/emergency.target; static)
     Active: inactive (dead)
       Docs: man:systemd.special(7)

○ first-boot-complete.target - First Boot Complete
     Loaded: loaded (/usr/lib/systemd/system/first-boot-complete.target; static)
     Active: inactive (dead)
  Condition: start condition failed at Mon 2026-02-02 16:46:02 UTC; 1h 27min ago
       Docs: man:systemd.special(7)

Feb 02 16:46:02 localhost systemd[1]: First Boot Complete was skipped because of an unmet condition check (ConditionFirstBoot=yes).

○ getty-pre.target - Preparation for Logins
     Loaded: loaded (/usr/lib/systemd/system/getty-pre.target; static)
     Active: inactive (dead)
       Docs: man:systemd.special(7)
             man:systemd-getty-generator(8)
             http://0pointer.de/blog/projects/serial-console.html

● getty.target - Login Prompts
     Loaded: loaded (/usr/lib/systemd/system/getty.target; static)
     Active: active since Mon 2026-02-02 16:46:07 UTC; 1h 27min ago
      Until: Mon 2026-02-02 16:46:07 UTC; 1h 27min ago
       Docs: man:systemd.special(7)
             man:systemd-getty-generator(8)
             http://0pointer.de/blog/projects/serial-console.html

Feb 02 16:46:07 np0005605476.novalocal systemd[1]: Reached target Login Prompts.

○ graphical.target - Graphical Interface
     Loaded: loaded (/usr/lib/systemd/system/graphical.target; static)
     Active: inactive (dead)
       Docs: man:systemd.special(7)

○ initrd-fs.target - Initrd File Systems
     Loaded: loaded (/usr/lib/systemd/system/initrd-fs.target; static)
     Active: inactive (dead) since Mon 2026-02-02 16:46:02 UTC; 1h 27min ago
       Docs: man:systemd.special(7)

Feb 02 16:46:01 localhost systemd[1]: Reached target Initrd File Systems.

○ initrd-root-device.target - Initrd Root Device
     Loaded: loaded (/usr/lib/systemd/system/initrd-root-device.target; static)
     Active: inactive (dead) since Mon 2026-02-02 16:46:01 UTC; 1h 27min ago
       Docs: man:systemd.special(7)

Feb 02 16:46:00 localhost systemd[1]: Reached target Initrd Root Device.
Feb 02 16:46:01 localhost systemd[1]: Stopped target Initrd Root Device.

○ initrd-root-fs.target - Initrd Root File System
     Loaded: loaded (/usr/lib/systemd/system/initrd-root-fs.target; static)
     Active: inactive (dead) since Mon 2026-02-02 16:46:02 UTC; 1h 27min ago
       Docs: man:systemd.special(7)

Feb 02 16:46:01 localhost systemd[1]: Reached target Initrd Root File System.

○ initrd-switch-root.target - Switch Root
     Loaded: loaded (/usr/lib/systemd/system/initrd-switch-root.target; static)
     Active: inactive (dead) since Mon 2026-02-02 16:46:02 UTC; 1h 27min ago

Feb 02 16:46:02 localhost systemd[1]: Reached target Switch Root.

○ initrd-usr-fs.target - Initrd /usr File System
     Loaded: loaded (/usr/lib/systemd/system/initrd-usr-fs.target; static)
     Active: inactive (dead) since Mon 2026-02-02 16:46:01 UTC; 1h 27min ago
       Docs: man:systemd.special(7)

Feb 02 16:46:01 localhost systemd[1]: Stopped target Initrd /usr File System.
Notice: journal has been rotated since unit was started, output may be incomplete.

○ initrd.target - Initrd Default Target
     Loaded: loaded (/usr/lib/systemd/system/initrd.target; static)
     Active: inactive (dead) since Mon 2026-02-02 16:46:01 UTC; 1h 27min ago
       Docs: man:systemd.special(7)

Feb 02 16:46:01 localhost systemd[1]: Reached target Initrd Default Target.
Feb 02 16:46:01 localhost systemd[1]: Stopped target Initrd Default Target.

● integritysetup.target - Local Integrity Protected Volumes
     Loaded: loaded (/usr/lib/systemd/system/integritysetup.target; static)
     Active: active since Mon 2026-02-02 16:46:02 UTC; 1h 27min ago
      Until: Mon 2026-02-02 16:46:02 UTC; 1h 27min ago
       Docs: man:systemd.special(7)

● local-fs-pre.target - Preparation for Local File Systems
     Loaded: loaded (/usr/lib/systemd/system/local-fs-pre.target; static)
     Active: active since Mon 2026-02-02 16:46:03 UTC; 1h 27min ago
      Until: Mon 2026-02-02 16:46:03 UTC; 1h 27min ago
       Docs: man:systemd.special(7)

Feb 02 16:46:03 localhost systemd[1]: Reached target Preparation for Local File Systems.

● local-fs.target - Local File Systems
     Loaded: loaded (/usr/lib/systemd/system/local-fs.target; static)
     Active: active since Mon 2026-02-02 16:46:03 UTC; 1h 27min ago
      Until: Mon 2026-02-02 16:46:03 UTC; 1h 27min ago
       Docs: man:systemd.special(7)

Feb 02 16:46:03 localhost systemd[1]: Reached target Local File Systems.

● multi-user.target - Multi-User System
     Loaded: loaded (/usr/lib/systemd/system/multi-user.target; indirect; preset: disabled)
     Active: active since Mon 2026-02-02 16:46:07 UTC; 1h 27min ago
      Until: Mon 2026-02-02 16:46:07 UTC; 1h 27min ago
       Docs: man:systemd.special(7)

Feb 02 16:46:07 np0005605476.novalocal systemd[1]: Reached target Multi-User System.

● network-online.target - Network is Online
     Loaded: loaded (/usr/lib/systemd/system/network-online.target; static)
     Active: active since Mon 2026-02-02 16:46:07 UTC; 1h 27min ago
      Until: Mon 2026-02-02 16:46:07 UTC; 1h 27min ago
       Docs: man:systemd.special(7)
             https://systemd.io/NETWORK_ONLINE

Feb 02 16:46:07 np0005605476.novalocal systemd[1]: Reached target Network is Online.

● network-pre.target - Preparation for Network
     Loaded: loaded (/usr/lib/systemd/system/network-pre.target; static)
     Active: active since Mon 2026-02-02 16:46:05 UTC; 1h 27min ago
      Until: Mon 2026-02-02 16:46:05 UTC; 1h 27min ago
       Docs: man:systemd.special(7)
             https://systemd.io/NETWORK_ONLINE

Feb 02 16:46:05 np0005605476.novalocal systemd[1]: Reached target Preparation for Network.

● network.target - Network
     Loaded: loaded (/usr/lib/systemd/system/network.target; static)
     Active: active since Mon 2026-02-02 16:46:05 UTC; 1h 27min ago
      Until: Mon 2026-02-02 16:46:05 UTC; 1h 27min ago
       Docs: man:systemd.special(7)
             https://systemd.io/NETWORK_ONLINE

Feb 02 16:46:05 np0005605476.novalocal systemd[1]: Reached target Network.

● nfs-client.target - NFS client services
     Loaded: loaded (/usr/lib/systemd/system/nfs-client.target; enabled; preset: disabled)
     Active: active since Mon 2026-02-02 16:46:05 UTC; 1h 27min ago
      Until: Mon 2026-02-02 16:46:05 UTC; 1h 27min ago

Feb 02 16:46:05 np0005605476.novalocal systemd[1]: Reached target NFS client services.

○ nss-lookup.target - Host and Network Name Lookups
     Loaded: loaded (/usr/lib/systemd/system/nss-lookup.target; static)
     Active: inactive (dead)
       Docs: man:systemd.special(7)

● nss-user-lookup.target - User and Group Name Lookups
     Loaded: loaded (/usr/lib/systemd/system/nss-user-lookup.target; static)
     Active: active since Mon 2026-02-02 16:46:03 UTC; 1h 27min ago
      Until: Mon 2026-02-02 16:46:03 UTC; 1h 27min ago
       Docs: man:systemd.special(7)

Feb 02 16:46:03 localhost systemd[1]: Reached target User and Group Name Lookups.

● paths.target - Path Units
     Loaded: loaded (/usr/lib/systemd/system/paths.target; static)
     Active: active since Mon 2026-02-02 16:46:02 UTC; 1h 27min ago
      Until: Mon 2026-02-02 16:46:02 UTC; 1h 27min ago
       Docs: man:systemd.special(7)

○ remote-cryptsetup.target - Remote Encrypted Volumes
     Loaded: loaded (/usr/lib/systemd/system/remote-cryptsetup.target; disabled; preset: enabled)
     Active: inactive (dead) since Mon 2026-02-02 16:46:01 UTC; 1h 27min ago
       Docs: man:systemd.special(7)

Feb 02 16:46:01 localhost systemd[1]: Reached target Remote Encrypted Volumes.
Feb 02 16:46:01 localhost systemd[1]: Stopped target Remote Encrypted Volumes.

● remote-fs-pre.target - Preparation for Remote File Systems
     Loaded: loaded (/usr/lib/systemd/system/remote-fs-pre.target; static)
     Active: active since Mon 2026-02-02 16:46:05 UTC; 1h 27min ago
      Until: Mon 2026-02-02 16:46:05 UTC; 1h 27min ago
       Docs: man:systemd.special(7)

Feb 02 16:46:05 np0005605476.novalocal systemd[1]: Reached target Preparation for Remote File Systems.

● remote-fs.target - Remote File Systems
     Loaded: loaded (/usr/lib/systemd/system/remote-fs.target; enabled; preset: enabled)
     Active: active since Mon 2026-02-02 16:46:05 UTC; 1h 27min ago
      Until: Mon 2026-02-02 16:46:05 UTC; 1h 27min ago
       Docs: man:systemd.special(7)

Feb 02 16:46:05 np0005605476.novalocal systemd[1]: Reached target Remote File Systems.

○ remote-veritysetup.target - Remote Verity Protected Volumes
     Loaded: loaded (/usr/lib/systemd/system/remote-veritysetup.target; disabled; preset: disabled)
     Active: inactive (dead)
       Docs: man:systemd.special(7)

○ rescue.target - Rescue Mode
     Loaded: loaded (/usr/lib/systemd/system/rescue.target; static)
     Active: inactive (dead)
       Docs: man:systemd.special(7)

● rpc_pipefs.target
     Loaded: loaded (/usr/lib/systemd/system/rpc_pipefs.target; static)
     Active: active since Mon 2026-02-02 16:46:02 UTC; 1h 27min ago
      Until: Mon 2026-02-02 16:46:02 UTC; 1h 27min ago

● rpcbind.target - RPC Port Mapper
     Loaded: loaded (/usr/lib/systemd/system/rpcbind.target; static)
     Active: active since Mon 2026-02-02 16:46:02 UTC; 1h 27min ago
      Until: Mon 2026-02-02 16:46:02 UTC; 1h 27min ago
       Docs: man:systemd.special(7)

○ shutdown.target - System Shutdown
     Loaded: loaded (/usr/lib/systemd/system/shutdown.target; static)
     Active: inactive (dead)
       Docs: man:systemd.special(7)

● slices.target - Slice Units
     Loaded: loaded (/usr/lib/systemd/system/slices.target; static)
     Active: active since Mon 2026-02-02 16:46:02 UTC; 1h 27min ago
      Until: Mon 2026-02-02 16:46:02 UTC; 1h 27min ago
       Docs: man:systemd.special(7)

● sockets.target - Socket Units
     Loaded: loaded (/usr/lib/systemd/system/sockets.target; statUnit syslog.target could not be found.
ic)
     Active: active since Mon 2026-02-02 16:46:03 UTC; 1h 27min ago
      Until: Mon 2026-02-02 16:46:03 UTC; 1h 27min ago
       Docs: man:systemd.special(7)

Feb 02 16:46:03 localhost systemd[1]: Reached target Socket Units.

● sshd-keygen.target
     Loaded: loaded (/usr/lib/systemd/system/sshd-keygen.target; static)
     Active: active since Mon 2026-02-02 17:33:04 UTC; 40min ago
      Until: Mon 2026-02-02 17:33:04 UTC; 40min ago

Feb 02 17:33:04 compute-0 systemd[1]: Reached target sshd-keygen.target.

● swap.target - Swaps
     Loaded: loaded (/usr/lib/systemd/system/swap.target; static)
     Active: active since Mon 2026-02-02 16:46:02 UTC; 1h 27min ago
      Until: Mon 2026-02-02 16:46:02 UTC; 1h 27min ago
       Docs: man:systemd.special(7)

● sysinit.target - System Initialization
     Loaded: loaded (/usr/lib/systemd/system/sysinit.target; static)
     Active: active since Mon 2026-02-02 16:46:03 UTC; 1h 27min ago
      Until: Mon 2026-02-02 16:46:03 UTC; 1h 27min ago
       Docs: man:systemd.special(7)

Feb 02 16:46:03 localhost systemd[1]: Reached target System Initialization.

● time-set.target - System Time Set
     Loaded: loaded (/usr/lib/systemd/system/time-set.target; static)
     Active: active since Mon 2026-02-02 17:20:16 UTC; 53min ago
      Until: Mon 2026-02-02 17:20:16 UTC; 53min ago
       Docs: man:systemd.special(7)

Feb 02 17:20:16 compute-0 systemd[1]: Reached target System Time Set.

● time-sync.target - System Time Synchronized
     Loaded: loaded (/usr/lib/systemd/system/time-sync.target; static)
     Active: active since Mon 2026-02-02 17:20:16 UTC; 53min ago
      Until: Mon 2026-02-02 17:20:16 UTC; 53min ago
       Docs: man:systemd.special(7)

Feb 02 17:20:16 compute-0 systemd[1]: Reached target System Time Synchronized.

● timers.target - Timer Units
     Loaded: loaded (/usr/lib/systemd/system/timers.target; static)
     Active: active since Mon 2026-02-02 16:46:03 UTC; 1h 27min ago
      Until: Mon 2026-02-02 16:46:03 UTC; 1h 27min ago
       Docs: man:systemd.special(7)

Feb 02 16:46:03 localhost systemd[1]: Reached target Timer Units.

○ umount.target - Unmount All Filesystems
     Loaded: loaded (/usr/lib/systemd/system/umount.target; static)
     Active: inactive (dead)
       Docs: man:systemd.special(7)

○ veritysetup-pre.target - Local Verity Protected Volumes (Pre)
     Loaded: loaded (/usr/lib/systemd/system/veritysetup-pre.target; static)
     Active: inactive (dead)
       Docs: man:systemd.special(7)

● veritysetup.target - Local Verity Protected Volumes
     Loaded: loaded (/usr/lib/systemd/system/veritysetup.target; static)
     Active: active since Mon 2026-02-02 16:46:02 UTC; 1h 27min ago
      Until: Mon 2026-02-02 16:46:02 UTC; 1h 27min ago
       Docs: man:systemd.special(7)

○ virt-guest-shutdown.target - Libvirt guests shutdown
     Loaded: loaded (/etc/systemd/system/virt-guest-shutdown.target; static)
     Active: inactive (dead)
       Docs: https://libvirt.org

● 70e0d83cb45fbe649a29519f5074ad11df900a5702b7e7e666708ce90ca8d783-2c712ae2bc69d601.timer - /usr/bin/podman healthcheck run 70e0d83cb45fbe649a29519f5074ad11df900a5702b7e7e666708ce90ca8d783
     Loaded: loaded (/run/systemd/transient/70e0d83cb45fbe649a29519f5074ad11df900a5702b7e7e666708ce90ca8d783-2c712ae2bc69d601.timer; transient)
  Transient: yes
     Active: active (waiting) since Mon 2026-02-02 17:29:46 UTC; 43min ago
      Until: Mon 2026-02-02 17:29:46 UTC; 43min ago
    Trigger: Mon 2026-02-02 18:13:52 UTC; 28s left
   Triggers: ● 70e0d83cb45fbe649a29519f5074ad11df900a5702b7e7e666708ce90ca8d783-2c712ae2bc69d601.service

Feb 02 17:29:46 compute-0 systemd[1]: Started /usr/bin/podman healthcheck run 70e0d83cb45fbe649a29519f5074ad11df900a5702b7e7e666708ce90ca8d783.

● 983aad36fbefc6eb42f7b2455e6339d70a90e29e2ce721c2d9ecdd2cd91b9e7b-17f04868bd9a498.timer - /usr/bin/podman healthcheck run 983aad36fbefc6eb42f7b2455e6339d70a90e29e2ce721c2d9ecdd2cd91b9e7b
     Loaded: loaded (/run/systemd/transient/983aad36fbefc6eb42f7b2455e6339d70a90e29e2ce721c2d9ecdd2cd91b9e7b-17f04868bd9a498.timer; transient)
  Transient: yes
     Active: active (waiting) since Mon 2026-02-02 17:30:44 UTC; 42min ago
      Until: Mon 2026-02-02 17:30:44 UTC; 42min ago
    Trigger: Mon 2026-02-02 18:13:50 UTC; 26s left
   Triggers: ● 983aad36fbefc6eb42f7b2455e6339d70a90e29e2ce721c2d9ecdd2cd91b9e7b-17f04868bd9a498.service

Feb 02 17:30:44 compute-0 systemd[1]: Started /usr/bin/podman healthcheck run 983aad36fbefc6eb42f7b2455e6339d70a90e29e2ce721c2d9ecdd2cd91b9e7b.

● dnf-makecache.timer - dnf makecache --timer
     Loaded: loaded (/usr/lib/systemd/system/dnf-makecache.timer; enabled; preset: enabled)
     Active: active (waiting) since Mon 2026-02-02 16:46:03 UTC; 1h 27min ago
      Until: Mon 2026-02-02 16:46:03 UTC; 1h 27min ago
    Trigger: Mon 2026-02-02 18:28:15 UTC; 14min left
   Triggers: ● dnf-makecache.service

Feb 02 16:46:03 localhost systemd[1]: Started dnf makecache --timer.

● logrotate.timer - Daily rotation of log files
     Loaded: loaded (/usr/lib/systemd/system/logrotate.timer; enabled; preset: enabled)
     Active: active (waiting) since Mon 2026-02-02 16:46:03 UTC; 1h 27min ago
      Until: Mon 2026-02-02 16:46:03 UTC; 1h 27min ago
    Trigger: Tue 2026-02-03 00:00:00 UTC; 5h 46min left
   Triggers: ● logrotate.service
       Docs: man:logrotate(8)
             man:logrotate.conf(5)

Feb 02 16:46:03 localhost systemd[1]: Started Daily rotation of log files.

○ raid-check.timer - Weekly RAID setup health check
     Loaded: loaded (/usr/lib/systemd/system/raid-check.timer; enabled; preset: enabled)
     Active: inactive (dead)
    Trigger: n/a
   Triggers: ● raid-check.service

○ sysstat-collect.timer - Run system activity accounting tool every 10 minutes
     Loaded: loaded (/usr/lib/systemd/system/sysstat-collect.timer; enabled; preset: disabled)
     Active: inactive (dead)
    Trigger: n/a
   Triggers: ● sysstat-collect.service

○ sysstat-summary.timer - Generate summary of yesterday's process accounting
     Loaded: loaded (/usr/lib/systemd/system/sysstat-summary.timer; enabled; preset: disabled)
     Active: inactive (dead)
    Trigger: n/a
   Triggers: ● sysstat-summary.service

● systemd-tmpfiles-clean.timer - Daily Cleanup of Temporary Directories
     Loaded: loaded (/usr/lib/systemd/system/systemd-tmpfiles-clean.timer; static)
     Active: active (waiting) since Mon 2026-02-02 16:46:03 UTC; 1h 27min ago
      Until: Mon 2026-02-02 16:46:03 UTC; 1h 27min ago
    Trigger: Tue 2026-02-03 17:01:08 UTC; 22h left
   Triggers: ● systemd-tmpfiles-clean.service
       Docs: man:tmpfiles.d(5)
             man:systemd-tmpfiles(8)

Feb 02 16:46:03 localhost systemd[1]: Started Daily Cleanup of Temporary Directories.

● unbound-anchor.timer - daily update of the root trust anchor for DNSSEC
     Loaded: loaded (/usr/lib/systemd/system/unbound-anchor.timer; enabled; preset: enabled)
     Active: active (waiting) since Mon 2026-02-02 17:16:18 UTC; 57min ago
      Until: Mon 2026-02-02 17:16:18 UTC; 57min ago
    Trigger: Tue 2026-02-03 00:00:00 UTC; 5h 46min left
   Triggers: ● unbound-anchor.service
       Docs: man:unbound-anchor(8)

Feb 02 17:16:18 compute-0 systemd[1]: Started daily update of the root trust anchor for DNSSEC.
