● compute-0
    State: running
    Units: 451 loaded (incl. loaded aliases)
     Jobs: 0 queued
   Failed: 0 units
    Since: Sat 2026-01-31 06:09:22 UTC; 3h 11min ago
  systemd: 252-64.el9
   CGroup: /
           ├─442196 turbostat --debug sleep 10
           ├─442200 sleep 10
           ├─init.scope
           │ └─1 /usr/lib/systemd/systemd --switched-root --system --deserialize 31
           ├─machine.slice
           │ ├─libpod-08cc1288983ebbc3e029df4ba78417491faf6dcee06220d37f18f3223d9f406f.scope
           │ │ └─container
           │ │   ├─247706 dumb-init --single-child -- kolla_start
           │ │   ├─247708 /usr/bin/python3 /usr/bin/nova-compute
           │ │   ├─254428 /usr/bin/python3 /bin/privsep-helper --config-file /etc/nova/nova.conf --config-file /etc/nova/nova-compute.conf --config-dir /etc/nova/nova.conf.d --privsep_context vif_plug_ovs.privsep.vif_plug --privsep_sock_path /tmp/tmpf8bmbhs4/privsep.sock
           │ │   ├─255323 /usr/bin/python3 /bin/privsep-helper --config-file /etc/nova/nova.conf --config-file /etc/nova/nova-compute.conf --config-dir /etc/nova/nova.conf.d --privsep_context os_brick.privileged.default --privsep_sock_path /tmp/tmpm6uxsdmh/privsep.sock
           │ │   └─315733 /usr/bin/python3 /bin/privsep-helper --config-file /etc/nova/nova.conf --config-file /etc/nova/nova-compute.conf --config-dir /etc/nova/nova.conf.d --privsep_context nova.privsep.sys_admin_pctxt --privsep_sock_path /tmp/tmpel5it6fi/privsep.sock
           │ ├─libpod-1e25cafb71c13c532a40aebd7c2342bf479b9dc985b78d8189396343297af238.scope
           │ │ └─container
           │ │   ├─149459 dumb-init --single-child -- kolla_start
           │ │   └─149462 /usr/bin/ovn-controller --pidfile unix:/run/openvswitch/db.sock -p /etc/pki/tls/private/ovndb.key -c /etc/pki/tls/certs/ovndb.crt -C /etc/pki/tls/certs/ovndbca.crt
           │ └─libpod-4f21208ce36b9cf814979a8ea23334150fc70d101a3b284bd29d2af80b4ea3b1.scope
           │   └─container
           │     ├─160023 dumb-init --single-child -- kolla_start
           │     ├─160028 "neutron-ovn-metadata-agent (/usr/bin/python3 /usr/bin/neutron-ovn-metadata-agent)"
           │     ├─160292 "neutron-ovn-metadata-agent (/usr/bin/python3 /usr/bin/neutron-ovn-metadata-agent)"
           │     ├─160297 /usr/bin/python3 /bin/privsep-helper --config-file /etc/neutron/neutron.conf --config-dir /etc/neutron.conf.d --privsep_context neutron.privileged.namespace_cmd --privsep_sock_path /tmp/tmprrz9sb_2/privsep.sock
           │     ├─254935 /usr/bin/python3 /bin/privsep-helper --config-file /etc/neutron/neutron.conf --config-dir /etc/neutron.conf.d --privsep_context neutron.privileged.default --privsep_sock_path /tmp/tmptkbyd576/privsep.sock
           │     └─255113 /usr/bin/python3 /bin/privsep-helper --config-file /etc/neutron/neutron.conf --config-dir /etc/neutron.conf.d --privsep_context neutron.privileged.link_cmd --privsep_sock_path /tmp/tmp6f9jilk8/privsep.sock
           ├─system.slice
           │ ├─NetworkManager.service
           │ │ └─49108 /usr/sbin/NetworkManager --no-daemon
           │ ├─auditd.service
           │ │ ├─699 /sbin/auditd
           │ │ └─701 /usr/sbin/sedispatch
           │ ├─chronyd.service
           │ │ └─58682 /usr/sbin/chronyd -F 2
           │ ├─crond.service
           │ │ └─1009 /usr/sbin/crond -n
           │ ├─dbus-broker.service
           │ │ ├─809 /usr/bin/dbus-broker-launch --scope system --audit
           │ │ └─810 dbus-broker --log 4 --controller 9 --machine-id bf0bc0bb03de29b24cba1cc9599cf5d0 --max-bytes 536870912 --max-fds 4096 --max-matches 131072 --audit
           │ ├─edpm_nova_compute.service
           │ │ └─247704 /usr/bin/conmon --api-version 1 -c 08cc1288983ebbc3e029df4ba78417491faf6dcee06220d37f18f3223d9f406f -u 08cc1288983ebbc3e029df4ba78417491faf6dcee06220d37f18f3223d9f406f -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/08cc1288983ebbc3e029df4ba78417491faf6dcee06220d37f18f3223d9f406f/userdata -p /run/containers/storage/overlay-containers/08cc1288983ebbc3e029df4ba78417491faf6dcee06220d37f18f3223d9f406f/userdata/pidfile -n nova_compute --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/08cc1288983ebbc3e029df4ba78417491faf6dcee06220d37f18f3223d9f406f --full-attach -s -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/08cc1288983ebbc3e029df4ba78417491faf6dcee06220d37f18f3223d9f406f/userdata/oci-log --conmon-pidfile /run/nova_compute.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg 08cc1288983ebbc3e029df4ba78417491faf6dcee06220d37f18f3223d9f406f
           │ ├─edpm_ovn_controller.service
           │ │ └─149457 /usr/bin/conmon --api-version 1 -c 1e25cafb71c13c532a40aebd7c2342bf479b9dc985b78d8189396343297af238 -u 1e25cafb71c13c532a40aebd7c2342bf479b9dc985b78d8189396343297af238 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/1e25cafb71c13c532a40aebd7c2342bf479b9dc985b78d8189396343297af238/userdata -p /run/containers/storage/overlay-containers/1e25cafb71c13c532a40aebd7c2342bf479b9dc985b78d8189396343297af238/userdata/pidfile -n ovn_controller --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/1e25cafb71c13c532a40aebd7c2342bf479b9dc985b78d8189396343297af238 --full-attach -s -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/1e25cafb71c13c532a40aebd7c2342bf479b9dc985b78d8189396343297af238/userdata/oci-log --conmon-pidfile /run/ovn_controller.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg 1e25cafb71c13c532a40aebd7c2342bf479b9dc985b78d8189396343297af238
           │ ├─edpm_ovn_metadata_agent.service
           │ │ └─160021 /usr/bin/conmon --api-version 1 -c 4f21208ce36b9cf814979a8ea23334150fc70d101a3b284bd29d2af80b4ea3b1 -u 4f21208ce36b9cf814979a8ea23334150fc70d101a3b284bd29d2af80b4ea3b1 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/4f21208ce36b9cf814979a8ea23334150fc70d101a3b284bd29d2af80b4ea3b1/userdata -p /run/containers/storage/overlay-containers/4f21208ce36b9cf814979a8ea23334150fc70d101a3b284bd29d2af80b4ea3b1/userdata/pidfile -n ovn_metadata_agent --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/4f21208ce36b9cf814979a8ea23334150fc70d101a3b284bd29d2af80b4ea3b1 --full-attach -s -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/4f21208ce36b9cf814979a8ea23334150fc70d101a3b284bd29d2af80b4ea3b1/userdata/oci-log --conmon-pidfile /run/ovn_metadata_agent.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg 4f21208ce36b9cf814979a8ea23334150fc70d101a3b284bd29d2af80b4ea3b1
           │ ├─gssproxy.service
           │ │ └─875 /usr/sbin/gssproxy -D
           │ ├─irqbalance.service
           │ │ └─815 /usr/sbin/irqbalance
           │ ├─iscsid.service
           │ │ └─231340 /usr/sbin/iscsid -f
           │ ├─multipathd.service
           │ │ └─231500 /sbin/multipathd -d -s
           │ ├─ovs-vswitchd.service
           │ │ └─47404 ovs-vswitchd unix:/var/run/openvswitch/db.sock -vconsole:emer -vsyslog:err -vfile:info --mlockall --user openvswitch:hugetlbfs --no-chdir --log-file=/var/log/openvswitch/ovs-vswitchd.log --pidfile=/var/run/openvswitch/ovs-vswitchd.pid --detach
           │ ├─ovsdb-server.service
           │ │ └─47323 ovsdb-server /etc/openvswitch/conf.db -vconsole:emer -vsyslog:err -vfile:info --remote=punix:/var/run/openvswitch/db.sock --private-key=db:Open_vSwitch,SSL,private_key --certificate=db:Open_vSwitch,SSL,certificate --bootstrap-ca-cert=db:Open_vSwitch,SSL,ca_cert --user openvswitch:hugetlbfs --no-chdir --log-file=/var/log/openvswitch/ovsdb-server.log --pidfile=/var/run/openvswitch/ovsdb-server.pid --detach
           │ ├─polkit.service
           │ │ └─43587 /usr/lib/polkit-1/polkitd --no-debug
           │ ├─rpcbind.service
           │ │ └─697 /usr/bin/rpcbind -w -f
           │ ├─rsyslog.service
           │ │ └─1005 /usr/sbin/rsyslogd -n
           │ ├─sshd.service
           │ │ └─187503 "sshd: /usr/sbin/sshd -D [listener] 0 of 10-100 startups"
           │ ├─system-ceph\x2df70fcd2a\x2ddcb4\x2d5f89\x2da4ba\x2d79a09959083b.slice
           │ │ ├─ceph-f70fcd2a-dcb4-5f89-a4ba-79a09959083b@crash.compute-0.service
           │ │ │ ├─libpod-payload-1da6676927c3fbc16088c8bd8068265d57380b59a05a52c32a3be4739c9d9976
           │ │ │ │ ├─81648 /run/podman-init -- /usr/bin/ceph-crash -n client.crash.compute-0
           │ │ │ │ └─81650 /usr/bin/python3 -s /usr/bin/ceph-crash -n client.crash.compute-0
           │ │ │ └─runtime
           │ │ │   └─81645 /usr/bin/conmon --api-version 1 -c 1da6676927c3fbc16088c8bd8068265d57380b59a05a52c32a3be4739c9d9976 -u 1da6676927c3fbc16088c8bd8068265d57380b59a05a52c32a3be4739c9d9976 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/1da6676927c3fbc16088c8bd8068265d57380b59a05a52c32a3be4739c9d9976/userdata -p /run/containers/storage/overlay-containers/1da6676927c3fbc16088c8bd8068265d57380b59a05a52c32a3be4739c9d9976/userdata/pidfile -n ceph-f70fcd2a-dcb4-5f89-a4ba-79a09959083b-crash-compute-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/1da6676927c3fbc16088c8bd8068265d57380b59a05a52c32a3be4739c9d9976 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/1da6676927c3fbc16088c8bd8068265d57380b59a05a52c32a3be4739c9d9976/userdata/oci-log --conmon-pidfile /run/ceph-f70fcd2a-dcb4-5f89-a4ba-79a09959083b@crash.compute-0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 1da6676927c3fbc16088c8bd8068265d57380b59a05a52c32a3be4739c9d9976
           │ │ ├─ceph-f70fcd2a-dcb4-5f89-a4ba-79a09959083b@haproxy.rgw.default.compute-0.cwtxbj.service
           │ │ │ ├─libpod-payload-b160f7ad791bcb51d7340140782e9b69931fca86a37ca28ddd2321bd134bedd3
           │ │ │ │ ├─95977 /run/podman-init -- docker-entrypoint.sh haproxy -f /var/lib/haproxy/haproxy.cfg
           │ │ │ │ ├─95985 haproxy -W -db -f /var/lib/haproxy/haproxy.cfg
           │ │ │ │ └─95987 haproxy -W -db -f /var/lib/haproxy/haproxy.cfg
           │ │ │ └─runtime
           │ │ │   └─95975 /usr/bin/conmon --api-version 1 -c b160f7ad791bcb51d7340140782e9b69931fca86a37ca28ddd2321bd134bedd3 -u b160f7ad791bcb51d7340140782e9b69931fca86a37ca28ddd2321bd134bedd3 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/b160f7ad791bcb51d7340140782e9b69931fca86a37ca28ddd2321bd134bedd3/userdata -p /run/containers/storage/overlay-containers/b160f7ad791bcb51d7340140782e9b69931fca86a37ca28ddd2321bd134bedd3/userdata/pidfile -n ceph-f70fcd2a-dcb4-5f89-a4ba-79a09959083b-haproxy-rgw-default-compute-0-cwtxbj --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/b160f7ad791bcb51d7340140782e9b69931fca86a37ca28ddd2321bd134bedd3 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/b160f7ad791bcb51d7340140782e9b69931fca86a37ca28ddd2321bd134bedd3/userdata/oci-log --conmon-pidfile /run/ceph-f70fcd2a-dcb4-5f89-a4ba-79a09959083b@haproxy.rgw.default.compute-0.cwtxbj.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg b160f7ad791bcb51d7340140782e9b69931fca86a37ca28ddd2321bd134bedd3
           │ │ ├─ceph-f70fcd2a-dcb4-5f89-a4ba-79a09959083b@keepalived.rgw.default.compute-0.rwjfwq.service
           │ │ │ ├─libpod-payload-37a60ce08c4c74b1713cee8109229a211cbb75cdd957895056cf506b036b5f51
           │ │ │ │ ├─96434 /run/podman-init -- ./init.sh
           │ │ │ │ ├─96436 /usr/sbin/keepalived -n -l -f /etc/keepalived/keepalived.conf
           │ │ │ │ └─96438 /usr/sbin/keepalived -n -l -f /etc/keepalived/keepalived.conf
           │ │ │ └─runtime
           │ │ │   └─96432 /usr/bin/conmon --api-version 1 -c 37a60ce08c4c74b1713cee8109229a211cbb75cdd957895056cf506b036b5f51 -u 37a60ce08c4c74b1713cee8109229a211cbb75cdd957895056cf506b036b5f51 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/37a60ce08c4c74b1713cee8109229a211cbb75cdd957895056cf506b036b5f51/userdata -p /run/containers/storage/overlay-containers/37a60ce08c4c74b1713cee8109229a211cbb75cdd957895056cf506b036b5f51/userdata/pidfile -n ceph-f70fcd2a-dcb4-5f89-a4ba-79a09959083b-keepalived-rgw-default-compute-0-rwjfwq --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/37a60ce08c4c74b1713cee8109229a211cbb75cdd957895056cf506b036b5f51 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/37a60ce08c4c74b1713cee8109229a211cbb75cdd957895056cf506b036b5f51/userdata/oci-log --conmon-pidfile /run/ceph-f70fcd2a-dcb4-5f89-a4ba-79a09959083b@keepalived.rgw.default.compute-0.rwjfwq.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 37a60ce08c4c74b1713cee8109229a211cbb75cdd957895056cf506b036b5f51
           │ │ ├─ceph-f70fcd2a-dcb4-5f89-a4ba-79a09959083b@mds.cephfs.compute-0.voybui.service
           │ │ │ ├─libpod-payload-b8961a2bb89ef92ece9fe756e2217169f987d51ab5ed9d70855c2e200dbe66dd
           │ │ │ │ ├─94764 /run/podman-init -- /usr/bin/ceph-mds -n mds.cephfs.compute-0.voybui -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
           │ │ │ │ └─94769 /usr/bin/ceph-mds -n mds.cephfs.compute-0.voybui -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
           │ │ │ └─runtime
           │ │ │   └─94760 /usr/bin/conmon --api-version 1 -c b8961a2bb89ef92ece9fe756e2217169f987d51ab5ed9d70855c2e200dbe66dd -u b8961a2bb89ef92ece9fe756e2217169f987d51ab5ed9d70855c2e200dbe66dd -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/b8961a2bb89ef92ece9fe756e2217169f987d51ab5ed9d70855c2e200dbe66dd/userdata -p /run/containers/storage/overlay-containers/b8961a2bb89ef92ece9fe756e2217169f987d51ab5ed9d70855c2e200dbe66dd/userdata/pidfile -n ceph-f70fcd2a-dcb4-5f89-a4ba-79a09959083b-mds-cephfs-compute-0-voybui --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/b8961a2bb89ef92ece9fe756e2217169f987d51ab5ed9d70855c2e200dbe66dd --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/b8961a2bb89ef92ece9fe756e2217169f987d51ab5ed9d70855c2e200dbe66dd/userdata/oci-log --conmon-pidfile /run/ceph-f70fcd2a-dcb4-5f89-a4ba-79a09959083b@mds.cephfs.compute-0.voybui.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg b8961a2bb89ef92ece9fe756e2217169f987d51ab5ed9d70855c2e200dbe66dd
           │ │ ├─ceph-f70fcd2a-dcb4-5f89-a4ba-79a09959083b@mgr.compute-0.hhuoua.service
           │ │ │ ├─libpod-payload-80eff2094e986ce79ca2f5db33c4e20adbe7c8c35152ee84bfca5e742fce5e26
           │ │ │ │ ├─74789 /run/podman-init -- /usr/bin/ceph-mgr -n mgr.compute-0.hhuoua -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
           │ │ │ │ └─74791 /usr/bin/ceph-mgr -n mgr.compute-0.hhuoua -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
           │ │ │ └─runtime
           │ │ │   └─74787 /usr/bin/conmon --api-version 1 -c 80eff2094e986ce79ca2f5db33c4e20adbe7c8c35152ee84bfca5e742fce5e26 -u 80eff2094e986ce79ca2f5db33c4e20adbe7c8c35152ee84bfca5e742fce5e26 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/80eff2094e986ce79ca2f5db33c4e20adbe7c8c35152ee84bfca5e742fce5e26/userdata -p /run/containers/storage/overlay-containers/80eff2094e986ce79ca2f5db33c4e20adbe7c8c35152ee84bfca5e742fce5e26/userdata/pidfile -n ceph-f70fcd2a-dcb4-5f89-a4ba-79a09959083b-mgr-compute-0-hhuoua --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/80eff2094e986ce79ca2f5db33c4e20adbe7c8c35152ee84bfca5e742fce5e26 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/80eff2094e986ce79ca2f5db33c4e20adbe7c8c35152ee84bfca5e742fce5e26/userdata/oci-log --conmon-pidfile /run/ceph-f70fcd2a-dcb4-5f89-a4ba-79a09959083b@mgr.compute-0.hhuoua.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 80eff2094e986ce79ca2f5db33c4e20adbe7c8c35152ee84bfca5e742fce5e26
           │ │ ├─ceph-f70fcd2a-dcb4-5f89-a4ba-79a09959083b@mon.compute-0.service
           │ │ │ ├─libpod-payload-c6500841c07b36a8c971bbdcb750ce9cda5744cce00d5b6822bb699bead089c7
           │ │ │ │ ├─74494 /run/podman-init -- /usr/bin/ceph-mon -n mon.compute-0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false
           │ │ │ │ └─74496 /usr/bin/ceph-mon -n mon.compute-0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false
           │ │ │ └─runtime
           │ │ │   └─74492 /usr/bin/conmon --api-version 1 -c c6500841c07b36a8c971bbdcb750ce9cda5744cce00d5b6822bb699bead089c7 -u c6500841c07b36a8c971bbdcb750ce9cda5744cce00d5b6822bb699bead089c7 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/c6500841c07b36a8c971bbdcb750ce9cda5744cce00d5b6822bb699bead089c7/userdata -p /run/containers/storage/overlay-containers/c6500841c07b36a8c971bbdcb750ce9cda5744cce00d5b6822bb699bead089c7/userdata/pidfile -n ceph-f70fcd2a-dcb4-5f89-a4ba-79a09959083b-mon-compute-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/c6500841c07b36a8c971bbdcb750ce9cda5744cce00d5b6822bb699bead089c7 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/c6500841c07b36a8c971bbdcb750ce9cda5744cce00d5b6822bb699bead089c7/userdata/oci-log --conmon-pidfile /run/ceph-f70fcd2a-dcb4-5f89-a4ba-79a09959083b@mon.compute-0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg c6500841c07b36a8c971bbdcb750ce9cda5744cce00d5b6822bb699bead089c7
           │ │ ├─ceph-f70fcd2a-dcb4-5f89-a4ba-79a09959083b@osd.0.service
           │ │ │ ├─libpod-payload-7fd99238d214935b3dacb5b0284648e15db53a9954031165abce49a65cde0630
           │ │ │ │ ├─84814 /run/podman-init -- /usr/bin/ceph-osd -n osd.0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
           │ │ │ │ └─84816 /usr/bin/ceph-osd -n osd.0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
           │ │ │ └─runtime
           │ │ │   └─84812 /usr/bin/conmon --api-version 1 -c 7fd99238d214935b3dacb5b0284648e15db53a9954031165abce49a65cde0630 -u 7fd99238d214935b3dacb5b0284648e15db53a9954031165abce49a65cde0630 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/7fd99238d214935b3dacb5b0284648e15db53a9954031165abce49a65cde0630/userdata -p /run/containers/storage/overlay-containers/7fd99238d214935b3dacb5b0284648e15db53a9954031165abce49a65cde0630/userdata/pidfile -n ceph-f70fcd2a-dcb4-5f89-a4ba-79a09959083b-osd-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/7fd99238d214935b3dacb5b0284648e15db53a9954031165abce49a65cde0630 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/7fd99238d214935b3dacb5b0284648e15db53a9954031165abce49a65cde0630/userdata/oci-log --conmon-pidfile /run/ceph-f70fcd2a-dcb4-5f89-a4ba-79a09959083b@osd.0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 7fd99238d214935b3dacb5b0284648e15db53a9954031165abce49a65cde0630
           │ │ └─ceph-f70fcd2a-dcb4-5f89-a4ba-79a09959083b@rgw.rgw.compute-0.njduba.service
           │ │   ├─libpod-payload-7b0621a2e40c1742efa8191945f46ebb87559261384b35471c2d6d71eb9be595
           │ │   │ ├─94237 /run/podman-init -- /usr/bin/radosgw -n client.rgw.rgw.compute-0.njduba -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
           │ │   │ └─94239 /usr/bin/radosgw -n client.rgw.rgw.compute-0.njduba -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
           │ │   └─runtime
           │ │     └─94235 /usr/bin/conmon --api-version 1 -c 7b0621a2e40c1742efa8191945f46ebb87559261384b35471c2d6d71eb9be595 -u 7b0621a2e40c1742efa8191945f46ebb87559261384b35471c2d6d71eb9be595 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/7b0621a2e40c1742efa8191945f46ebb87559261384b35471c2d6d71eb9be595/userdata -p /run/containers/storage/overlay-containers/7b0621a2e40c1742efa8191945f46ebb87559261384b35471c2d6d71eb9be595/userdata/pidfile -n ceph-f70fcd2a-dcb4-5f89-a4ba-79a09959083b-rgw-rgw-compute-0-njduba --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/7b0621a2e40c1742efa8191945f46ebb87559261384b35471c2d6d71eb9be595 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/7b0621a2e40c1742efa8191945f46ebb87559261384b35471c2d6d71eb9be595/userdata/oci-log --conmon-pidfile /run/ceph-f70fcd2a-dcb4-5f89-a4ba-79a09959083b@rgw.rgw.compute-0.njduba.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 7b0621a2e40c1742efa8191945f46ebb87559261384b35471c2d6d71eb9be595
           │ ├─system-getty.slice
           │ │ └─getty@tty1.service
           │ │   └─1010 /sbin/agetty -o "-p -- \\u" --noclear - linux
           │ ├─system-serial\x2dgetty.slice
           │ │ └─serial-getty@ttyS0.service
           │ │   └─1011 /sbin/agetty -o "-p -- \\u" --keep-baud 115200,57600,38400,9600 - vt220
           │ ├─systemd-hostnamed.service
           │ │ └─438269 /usr/lib/systemd/systemd-hostnamed
           │ ├─systemd-journald.service
           │ │ └─675 /usr/lib/systemd/systemd-journald
           │ ├─systemd-logind.service
           │ │ └─816 /usr/lib/systemd/systemd-logind
           │ ├─systemd-machined.service
           │ │ └─214448 /usr/lib/systemd/systemd-machined
           │ ├─systemd-udevd.service
           │ │ └─udev
           │ │   └─727 /usr/lib/systemd/systemd-udevd
           │ ├─tuned.service
           │ │ └─108365 /usr/bin/python3 -Es /usr/sbin/tuned -l -P
           │ ├─virtlogd.service
           │ │ └─213815 /usr/sbin/virtlogd
           │ ├─virtnodedevd.service
           │ │ └─248127 /usr/sbin/virtnodedevd --timeout 120
           │ ├─virtqemud.service
           │ │ └─247621 /usr/sbin/virtqemud --timeout 120
           │ └─virtsecretd.service
           │   └─253515 /usr/sbin/virtsecretd --timeout 120
           └─user.slice
             ├─user-1000.slice
             │ ├─session-1.scope
             │ │ └─4520 /usr/bin/python3
             │ ├─session-67.scope
             │ │ ├─435980 "sshd-session: zuul [priv]"
             │ │ ├─435983 "sshd-session: zuul@notty"
             │ │ ├─435984 sudo bash -c "rm -rf /var/tmp/sos-osp && mkdir /var/tmp/sos-osp && sos report --batch --all-logsUnit boot.automount could not be found.
 --tmp-dir=/var/tmp/sos-osp  -p container,openstack_edpm,system,storage,virt"
             │ │ ├─436008 /usr/bin/python3 -s /sbin/sos report --batch --all-logs --tmp-dir=/var/tmp/sos-osp -p container,openstack_edpm,system,storage,virt
             │ │ ├─442195 timeout 15s turbostat --debug sleep 10
             │ │ ├─442899 timeout 300s ceph fs dump --format json-pretty
             │ │ ├─442900 /usr/bin/python3 -s /usr/bin/ceph fs dump --format json-pretty
             │ │ ├─442903 timeout 300s systemctl status --all
             │ │ └─442904 systemctl status --all
             │ └─user@1000.service
             │   ├─app.slice
             │   │ └─dbus-broker.service
             │   │   ├─12265 /usr/bin/dbus-broker-launch --scope user
             │   │   └─12293 dbus-broker --log 4 --controller 9 --machine-id bf0bc0bb03de29b24cba1cc9599cf5d0 --max-bytes 100000000000000 --max-fds 25000000000000 --max-matches 5000000000
             │   ├─init.scope
             │   │ ├─4308 /usr/lib/systemd/systemd --user
             │   │ └─4310 "(sd-pam)"
             │   └─user.slice
             │     └─podman-pause-03c69239.scope
             │       └─12088 catatonit -P
             └─user-42477.slice
               ├─session-21.scope
               │ ├─76126 "sshd-session: ceph-admin [priv]"
               │ └─76148 "sshd-session: ceph-admin"
               ├─session-23.scope
               │ ├─76143 "sshd-session: ceph-admin [priv]"
               │ └─76149 "sshd-session: ceph-admin@notty"
               ├─session-24.scope
               │ ├─76200 "sshd-session: ceph-admin [priv]"
               │ └─76203 "sshd-session: ceph-admin@notty"
               ├─session-25.scope
               │ ├─76254 "sshd-session: ceph-admin [priv]"
               │ └─76257 "sshd-session: ceph-admin@notty"
               ├─session-26.scope
               │ ├─76308 "sshd-session: ceph-admin [priv]"
               │ └─76311 "sshd-session: ceph-admin@notty"
               ├─session-27.scope
               │ ├─76362 "sshd-session: ceph-admin [priv]"
               │ └─76365 "sshd-session: ceph-admin@notty"
               ├─session-28.scope
               │ ├─76416 "sshd-session: ceph-admin [priv]"
               │ └─76419 "sshd-session: ceph-admin@notty"
               ├─session-29.scope
               │ ├─76470 "sshd-session: ceph-admin [priv]"
               │ └─76473 "sshd-session: ceph-admin@notty"
               ├─session-30.scope
               │ ├─76524 "sshd-session: ceph-admin [priv]"
               │ └─76527 "sshd-session: ceph-admin@notty"
               ├─session-31.scope
               │ ├─76578 "sshd-session: ceph-admin [priv]"
               │ └─76581 "sshd-session: ceph-admin@notty"
               ├─session-32.scope
               │ ├─76605 "sshd-session: ceph-admin [priv]"
               │ └─76608 "sshd-session: ceph-admin@notty"
               ├─session-33.scope
               │ ├─76659 "sshd-session: ceph-admin [priv]"
               │ └─76662 "sshd-session: ceph-admin@notty"
               └─user@42477.service
                 └─init.scope
                   ├─76130 /usr/lib/systemd/systemd --user
                   └─76132 "(sd-pam)"

● proc-sys-fs-binfmt_misc.automount - Arbitrary Executable File Formats File System Automount Point
     Loaded: loaded (/usr/lib/systemd/system/proc-sys-fs-binfmt_misc.automount; static)
     Active: active (running) since Sat 2026-01-31 06:09:23 UTC; 3h 11min ago
      Until: Sat 2026-01-31 06:09:23 UTC; 3h 11min ago
   Triggers: ● proc-sys-fs-binfmt_misc.mount
      Where: /proc/sys/fs/binfmt_misc
       Docs: https://docs.kernel.org/admin-guide/binfmt-misc.html
             https://www.freedesktop.org/wiki/Software/systemd/APIFileSystems

Jan 31 07:05:33 compute-0 systemd[1]: proc-sys-fs-binfmt_misc.automount: Got automount request for /proc/sys/fs/binfmt_misc, triggered by 77573 (sysctl)

● dev-cdrom.device - QEMU_DVD-ROM config-2
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:01.1-ata1-host0-target0:0:0-0:0:0:0-block-sr0.device
     Loaded: loaded
     Active: active (plugged) since Sat 2026-01-31 06:09:24 UTC; 3h 11min ago
      Until: Sat 2026-01-31 06:09:24 UTC; 3h 11min ago
     Device: /sys/devices/pci0000:00/0000:00:01.1/ata1/host0/target0:0:0/0:0:0:0/block/sr0

● dev-ceph_vg0-ceph_lv0.device - /dev/ceph_vg0/ceph_lv0
    Follows: unit currently follows state of sys-devices-virtual-block-dm\x2d0.device
     Loaded: loaded
     Active: active (plugged) since Sat 2026-01-31 07:03:58 UTC; 2h 17min ago
      Until: Sat 2026-01-31 07:03:58 UTC; 2h 17min ago
     Device: /sys/devices/virtual/block/dm-0

● dev-disk-by\x2ddiskseq-1.device - /dev/disk/by-diskseq/1
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:04.0-virtio2-block-vda.device
     Loaded: loaded
     Active: active (plugged) since Sat 2026-01-31 06:09:24 UTC; 3h 11min ago
      Until: Sat 2026-01-31 06:09:24 UTC; 3h 11min ago
     Device: /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda

● dev-disk-by\x2ddiskseq-3.device - QEMU_DVD-ROM config-2
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:01.1-ata1-host0-target0:0:0-0:0:0:0-block-sr0.device
     Loaded: loaded
     Active: active (plugged) since Sat 2026-01-31 06:09:24 UTC; 3h 11min ago
      Until: Sat 2026-01-31 06:09:24 UTC; 3h 11min ago
     Device: /sys/devices/pci0000:00/0000:00:01.1/ata1/host0/target0:0:0/0:0:0:0/block/sr0

● dev-disk-by\x2ddiskseq-5.device - /dev/disk/by-diskseq/5
    Follows: unit currently follows state of sys-devices-virtual-block-loop3.device
     Loaded: loaded
     Active: active (plugged) since Sat 2026-01-31 07:03:57 UTC; 2h 17min ago
      Until: Sat 2026-01-31 07:03:57 UTC; 2h 17min ago
     Device: /sys/devices/virtual/block/loop3

● dev-disk-by\x2ddiskseq-6.device - /dev/disk/by-diskseq/6
    Follows: unit currently follows state of sys-devices-virtual-block-dm\x2d0.device
     Loaded: loaded
     Active: active (plugged) since Sat 2026-01-31 07:03:58 UTC; 2h 17min ago
      Until: Sat 2026-01-31 07:03:58 UTC; 2h 17min ago
     Device: /sys/devices/virtual/block/dm-0

● dev-disk-by\x2did-ata\x2dQEMU_DVD\x2dROM_QM00001.device - QEMU_DVD-ROM config-2
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:01.1-ata1-host0-target0:0:0-0:0:0:0-block-sr0.device
     Loaded: loaded
     Active: active (plugged) since Sat 2026-01-31 06:09:24 UTC; 3h 11min ago
      Until: Sat 2026-01-31 06:09:24 UTC; 3h 11min ago
     Device: /sys/devices/pci0000:00/0000:00:01.1/ata1/host0/target0:0:0/0:0:0:0/block/sr0

● dev-disk-by\x2did-dm\x2dname\x2dceph_vg0\x2dceph_lv0.device - /dev/disk/by-id/dm-name-ceph_vg0-ceph_lv0
    Follows: unit currently follows state of sys-devices-virtual-block-dm\x2d0.device
     Loaded: loaded
     Active: active (plugged) since Sat 2026-01-31 07:03:58 UTC; 2h 17min ago
      Until: Sat 2026-01-31 07:03:58 UTC; 2h 17min ago
     Device: /sys/devices/virtual/block/dm-0

● dev-disk-by\x2did-dm\x2duuid\x2dLVM\x2dDGzFdrhzTpWLLvoz8Nc00NnYADdFCWf4Yax8EchteUk4Ux8Rk31oChXAANr8dowq.device - /dev/disk/by-id/dm-uuid-LVM-DGzFdrhzTpWLLvoz8Nc00NnYADdFCWf4Yax8EchteUk4Ux8Rk31oChXAANr8dowq
    Follows: unit currently follows state of sys-devices-virtual-block-dm\x2d0.device
     Loaded: loaded
     Active: active (plugged) since Sat 2026-01-31 07:03:58 UTC; 2h 17min ago
      Until: Sat 2026-01-31 07:03:58 UTC; 2h 17min ago
     Device: /sys/devices/virtual/block/dm-0

● dev-disk-by\x2did-lvm\x2dpv\x2duuid\x2dryzuLz\x2db0Rp\x2dfMqt\x2dojG7\x2dv58t\x2d1NPY\x2dhLGqSU.device - /dev/disk/by-id/lvm-pv-uuid-ryzuLz-b0Rp-fMqt-ojG7-v58t-1NPY-hLGqSU
    Follows: unit currently follows state of sys-devices-virtual-block-loop3.device
     Loaded: loaded
     Active: active (plugged) since Sat 2026-01-31 07:03:58 UTC; 2h 17min ago
      Until: Sat 2026-01-31 07:03:58 UTC; 2h 17min ago
     Device: /sys/devices/virtual/block/loop3

● dev-disk-by\x2dlabel-config\x2d2.device - QEMU_DVD-ROM config-2
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:01.1-ata1-host0-target0:0:0-0:0:0:0-block-sr0.device
     Loaded: loaded
     Active: active (plugged) since Sat 2026-01-31 06:09:24 UTC; 3h 11min ago
      Until: Sat 2026-01-31 06:09:24 UTC; 3h 11min ago
     Device: /sys/devices/pci0000:00/0000:00:01.1/ata1/host0/target0:0:0/0:0:0:0/block/sr0

● dev-disk-by\x2dpartuuid-714daac1\x2d01.device - /dev/disk/by-partuuid/714daac1-01
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:04.0-virtio2-block-vda-vda1.device
     Loaded: loaded
     Active: active (plugged) since Sat 2026-01-31 06:09:24 UTC; 3h 11min ago
      Until: Sat 2026-01-31 06:09:24 UTC; 3h 11min ago
     Device: /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda/vda1

● dev-disk-by\x2dpath-pci\x2d0000:00:01.1\x2data\x2d1.0.device - QEMU_DVD-ROM config-2
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:01.1-ata1-host0-target0:0:0-0:0:0:0-block-sr0.device
     Loaded: loaded
     Active: active (plugged) since Sat 2026-01-31 06:09:24 UTC; 3h 11min ago
      Until: Sat 2026-01-31 06:09:24 UTC; 3h 11min ago
     Device: /sys/devices/pci0000:00/0000:00:01.1/ata1/host0/target0:0:0/0:0:0:0/block/sr0

● dev-disk-by\x2dpath-pci\x2d0000:00:01.1\x2data\x2d1.device - QEMU_DVD-ROM config-2
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:01.1-ata1-host0-target0:0:0-0:0:0:0-block-sr0.device
     Loaded: loaded
     Active: active (plugged) since Sat 2026-01-31 06:09:24 UTC; 3h 11min ago
      Until: Sat 2026-01-31 06:09:24 UTC; 3h 11min ago
     Device: /sys/devices/pci0000:00/0000:00:01.1/ata1/host0/target0:0:0/0:0:0:0/block/sr0

● dev-disk-by\x2dpath-pci\x2d0000:00:04.0.device - /dev/disk/by-path/pci-0000:00:04.0
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:04.0-virtio2-block-vda.device
     Loaded: loaded
     Active: active (plugged) since Sat 2026-01-31 06:09:24 UTC; 3h 11min ago
      Until: Sat 2026-01-31 06:09:24 UTC; 3h 11min ago
     Device: /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda

● dev-disk-by\x2dpath-pci\x2d0000:00:04.0\x2dpart1.device - /dev/disk/by-path/pci-0000:00:04.0-part1
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:04.0-virtio2-block-vda-vda1.device
     Loaded: loaded
     Active: active (plugged) since Sat 2026-01-31 06:09:24 UTC; 3h 11min ago
      Until: Sat 2026-01-31 06:09:24 UTC; 3h 11min ago
     Device: /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda/vda1

● dev-disk-by\x2dpath-virtio\x2dpci\x2d0000:00:04.0.device - /dev/disk/by-path/virtio-pci-0000:00:04.0
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:04.0-virtio2-block-vda.device
     Loaded: loaded
     Active: active (plugged) since Sat 2026-01-31 06:09:24 UTC; 3h 11min ago
      Until: Sat 2026-01-31 06:09:24 UTC; 3h 11min ago
     Device: /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda

● dev-disk-by\x2dpath-virtio\x2dpci\x2d0000:00:04.0\x2dpart1.device - /dev/disk/by-path/virtio-pci-0000:00:04.0-part1
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:04.0-virtio2-block-vda-vda1.device
     Loaded: loaded
     Active: active (plugged) since Sat 2026-01-31 06:09:24 UTC; 3h 11min ago
      Until: Sat 2026-01-31 06:09:24 UTC; 3h 11min ago
     Device: /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda/vda1

● dev-disk-by\x2duuid-2026\x2d01\x2d31\x2d06\x2d09\x2d00\x2d00.device - QEMU_DVD-ROM config-2
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:01.1-ata1-host0-target0:0:0-0:0:0:0-block-sr0.device
     Loaded: loaded
     Active: active (plugged) since Sat 2026-01-31 06:09:24 UTC; 3h 11min ago
      Until: Sat 2026-01-31 06:09:24 UTC; 3h 11min ago
     Device: /sys/devices/pci0000:00/0000:00:01.1/ata1/host0/target0:0:0/0:0:0:0/block/sr0

● dev-disk-by\x2duuid-822f14ea\x2d6e7e\x2d41df\x2db0d8\x2dfbe282d9ded8.device - /dev/disk/by-uuid/822f14ea-6e7e-41df-b0d8-fbe282d9ded8
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:04.0-virtio2-block-vda-vda1.device
     Loaded: loaded
     Active: active (plugged) since Sat 2026-01-31 06:09:24 UTC; 3h 11min ago
      Until: Sat 2026-01-31 06:09:24 UTC; 3h 11min ago
     Device: /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda/vda1

Jan 31 06:09:19 localhost systemd[1]: Found device /dev/disk/by-uuid/822f14ea-6e7e-41df-b0d8-fbe282d9ded8.

● dev-dm\x2d0.device - /dev/dm-0
    Follows: unit currently follows state of sys-devices-virtual-block-dm\x2d0.device
     Loaded: loaded
     Active: active (plugged) since Sat 2026-01-31 07:03:58 UTC; 2h 17min ago
      Until: Sat 2026-01-31 07:03:58 UTC; 2h 17min ago
     Device: /sys/devices/virtual/block/dm-0

● dev-loop3.device - /dev/loop3
    Follows: unit currently follows state of sys-devices-virtual-block-loop3.device
     Loaded: loaded
     Active: active (plugged) since Sat 2026-01-31 07:03:57 UTC; 2h 17min ago
      Until: Sat 2026-01-31 07:03:57 UTC; 2h 17min ago
     Device: /sys/devices/virtual/block/loop3

● dev-mapper-ceph_vg0\x2dceph_lv0.device - /dev/mapper/ceph_vg0-ceph_lv0
    Follows: unit currently follows state of sys-devices-virtual-block-dm\x2d0.device
     Loaded: loaded
     Active: active (plugged) since Sat 2026-01-31 07:03:58 UTC; 2h 17min ago
      Until: Sat 2026-01-31 07:03:58 UTC; 2h 17min ago
     Device: /sys/devices/virtual/block/dm-0

● dev-rfkill.device - /dev/rfkill
    Follows: unit currently follows state of sys-devices-virtual-misc-rfkill.device
     Loaded: loaded
     Active: active (plugged) since Sat 2026-01-31 06:09:27 UTC; 3h 11min ago
      Until: Sat 2026-01-31 06:09:27 UTC; 3h 11min ago
     Device: /sys/devices/virtual/misc/rfkill

● dev-sr0.device - QEMU_DVD-ROM config-2
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:01.1-ata1-host0-target0:0:0-0:0:0:0-block-sr0.device
     Loaded: loaded
     Active: active (plugged) since Sat 2026-01-31 06:09:24 UTC; 3h 11min ago
      Until: Sat 2026-01-31 06:09:24 UTC; 3h 11min ago
     Device: /sys/devices/pci0000:00/0000:00:01.1/ata1/host0/target0:0:0/0:0:0:0/block/sr0

● dev-ttyS0.device - /dev/ttyS0
    Follows: unit currently follows state of sys-devices-pnp0-00:00-tty-ttyS0.device
     Loaded: loaded
     Active: active (plugged) since Sat 2026-01-31 06:09:24 UTC; 3h 11min ago
      Until: Sat 2026-01-31 06:09:24 UTC; 3h 11min ago
     Device: /sys/devices/pnp0/00:00/tty/ttyS0

Jan 31 06:09:24 localhost systemd[1]: Condition check resulted in /dev/ttyS0 being skipped.

● dev-ttyS1.device - /dev/ttyS1
    Follows: unit currently follows state of sys-devices-platform-serial8250-tty-ttyS1.device
     Loaded: loaded
     Active: active (plugged) since Sat 2026-01-31 06:09:24 UTC; 3h 11min ago
      Until: Sat 2026-01-31 06:09:24 UTC; 3h 11min ago
     Device: /sys/devices/platform/serial8250/tty/ttyS1

● dev-ttyS2.device - /dev/ttyS2
    Follows: unit currently follows state of sys-devices-platform-serial8250-tty-ttyS2.device
     Loaded: loaded
     Active: active (plugged) since Sat 2026-01-31 06:09:24 UTC; 3h 11min ago
      Until: Sat 2026-01-31 06:09:24 UTC; 3h 11min ago
     Device: /sys/devices/platform/serial8250/tty/ttyS2

● dev-ttyS3.device - /dev/ttyS3
    Follows: unit currently follows state of sys-devices-platform-serial8250-tty-ttyS3.device
     Loaded: loaded
     Active: active (plugged) since Sat 2026-01-31 06:09:24 UTC; 3h 11min ago
      Until: Sat 2026-01-31 06:09:24 UTC; 3h 11min ago
     Device: /sys/devices/platform/serial8250/tty/ttyS3

● dev-vda.device - /dev/vda
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:04.0-virtio2-block-vda.device
     Loaded: loaded
     Active: active (plugged) since Sat 2026-01-31 06:09:24 UTC; 3h 11min ago
      Until: Sat 2026-01-31 06:09:24 UTC; 3h 11min ago
     Device: /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda

● dev-vda1.device - /dev/vda1
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:04.0-virtio2-block-vda-vda1.device
     Loaded: loaded
     Active: active (plugged) since Sat 2026-01-31 06:09:24 UTC; 3h 11min ago
      Until: Sat 2026-01-31 06:09:24 UTC; 3h 11min ago
     Device: /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda/vda1

● sys-devices-pci0000:00-0000:00:01.1-ata1-host0-target0:0:0-0:0:0:0-block-sr0.device - QEMU_DVD-ROM config-2
     Loaded: loaded
     Active: active (plugged) since Sat 2026-01-31 06:09:24 UTC; 3h 11min ago
      Until: Sat 2026-01-31 06:09:24 UTC; 3h 11min ago
     Device: /sys/devices/pci0000:00/0000:00:01.1/ata1/host0/target0:0:0/0:0:0:0/block/sr0

● sys-devices-pci0000:00-0000:00:03.0-virtio1-net-eth0.device - Virtio network device
     Loaded: loaded
     Active: active (plugged) since Sat 2026-01-31 06:09:24 UTC; 3h 11min ago
      Until: Sat 2026-01-31 06:09:24 UTC; 3h 11min ago
     Device: /sys/devices/pci0000:00/0000:00:03.0/virtio1/net/eth0

● sys-devices-pci0000:00-0000:00:04.0-virtio2-block-vda-vda1.device - /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda/vda1
     Loaded: loaded
     Active: active (plugged) since Sat 2026-01-31 06:09:24 UTC; 3h 11min ago
      Until: Sat 2026-01-31 06:09:24 UTC; 3h 11min ago
     Device: /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda/vda1

● sys-devices-pci0000:00-0000:00:04.0-virtio2-block-vda.device - /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda
     Loaded: loaded
     Active: active (plugged) since Sat 2026-01-31 06:09:24 UTC; 3h 11min ago
      Until: Sat 2026-01-31 06:09:24 UTC; 3h 11min ago
     Device: /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda

● sys-devices-pci0000:00-0000:00:07.0-virtio5-net-eth1.device - Virtio network device
     Loaded: loaded
     Active: active (plugged) since Sat 2026-01-31 06:11:56 UTC; 3h 9min ago
      Until: Sat 2026-01-31 06:11:56 UTC; 3h 9min ago
     Device: /sys/devices/pci0000:00/0000:00:07.0/virtio5/net/eth1

● sys-devices-platform-serial8250-tty-ttyS1.device - /sys/devices/platform/serial8250/tty/ttyS1
     Loaded: loaded
     Active: active (plugged) since Sat 2026-01-31 06:09:24 UTC; 3h 11min ago
      Until: Sat 2026-01-31 06:09:24 UTC; 3h 11min ago
     Device: /sys/devices/platform/serial8250/tty/ttyS1

● sys-devices-platform-serial8250-tty-ttyS2.device - /sys/devices/platform/serial8250/tty/ttyS2
     Loaded: loaded
     Active: active (plugged) since Sat 2026-01-31 06:09:24 UTC; 3h 11min ago
      Until: Sat 2026-01-31 06:09:24 UTC; 3h 11min ago
     Device: /sys/devices/platform/serial8250/tty/ttyS2

● sys-devices-platform-serial8250-tty-ttyS3.device - /sys/devices/platform/serial8250/tty/ttyS3
     Loaded: loaded
     Active: active (plugged) since Sat 2026-01-31 06:09:24 UTC; 3h 11min ago
      Until: Sat 2026-01-31 06:09:24 UTC; 3h 11min ago
     Device: /sys/devices/platform/serial8250/tty/ttyS3

● sys-devices-pnp0-00:00-tty-ttyS0.device - /sys/devices/pnp0/00:00/tty/ttyS0
     Loaded: loaded
     Active: active (plugged) since Sat 2026-01-31 06:09:24 UTC; 3h 11min ago
      Until: Sat 2026-01-31 06:09:24 UTC; 3h 11min ago
     Device: /sys/devices/pnp0/00:00/tty/ttyS0

● sys-devices-virtual-block-dm\x2d0.device - /sys/devices/virtual/block/dm-0
     Loaded: loaded
     Active: active (plugged) since Sat 2026-01-31 07:03:58 UTC; 2h 17min ago
      Until: Sat 2026-01-31 07:03:58 UTC; 2h 17min ago
     Device: /sys/devices/virtual/block/dm-0

● sys-devices-virtual-block-loop3.device - /sys/devices/virtual/block/loop3
     Loaded: loaded
     Active: active (plugged) since Sat 2026-01-31 07:03:57 UTC; 2h 17min ago
      Until: Sat 2026-01-31 07:03:57 UTC; 2h 17min ago
     Device: /sys/devices/virtual/block/loop3

● sys-devices-virtual-misc-rfkill.device - /sys/devices/virtual/misc/rfkill
     Loaded: loaded
     Active: active (plugged) since Sat 2026-01-31 06:09:27 UTC; 3h 11min ago
      Until: Sat 2026-01-31 06:09:27 UTC; 3h 11min ago
     Device: /sys/devices/virtual/misc/rfkill

● sys-devices-virtual-net-br\x2dex.device - /sys/devices/virtual/net/br-ex
     Loaded: loaded
     Active: active (plugged) since Sat 2026-01-31 07:00:58 UTC; 2h 20min ago
      Until: Sat 2026-01-31 07:00:58 UTC; 2h 20min ago
     Device: /sys/devices/virtual/net/br-ex

● sys-devices-virtual-net-br\x2dint.device - /sys/devices/virtual/net/br-int
     Loaded: loaded
     Active: active (plugged) since Sat 2026-01-31 07:16:32 UTC; 2h 4min ago
      Until: Sat 2026-01-31 07:16:32 UTC; 2h 4min ago
     Device: /sys/devices/virtual/net/br-int

● sys-devices-virtual-net-genev_sys_6081.device - /sys/devices/virtual/net/genev_sys_6081
     Loaded: loaded
     Active: active (plugged) since Sat 2026-01-31 07:16:32 UTC; 2h 4min ago
      Until: Sat 2026-01-31 07:16:32 UTC; 2h 4min ago
     Device: /sys/devices/virtual/net/genev_sys_6081

● sys-devices-virtual-net-ovs\x2dsystem.device - /sys/devices/virtual/net/ovs-system
     Loaded: loaded
     Active: active (plugged) since Sat 2026-01-31 07:00:58 UTC; 2h 20min ago
      Until: Sat 2026-01-31 07:00:58 UTC; 2h 20min ago
     Device: /sys/devices/virtual/net/ovs-system

● sys-devices-virtual-net-vlan20.device - /sys/devices/virtual/net/vlan20
     Loaded: loaded
     Active: active (plugged) since Sat 2026-01-31 07:00:58 UTC; 2h 20min ago
      Until: Sat 2026-01-31 07:00:58 UTC; 2h 20min ago
     Device: /sys/devices/virtual/net/vlan20

● sys-devices-virtual-net-vlan21.device - /sys/devices/virtual/net/vlan21
     Loaded: loaded
     Active: active (plugged) since Sat 2026-01-31 07:00:58 UTC; 2h 20min ago
      Until: Sat 2026-01-31 07:00:58 UTC; 2h 20min ago
     Device: /sys/devices/virtual/net/vlan21

● sys-devices-virtual-net-vlan22.device - /sys/devices/virtual/net/vlan22
     Loaded: loaded
     Active: active (plugged) since Sat 2026-01-31 07:00:58 UTC; 2h 20min ago
      Until: Sat 2026-01-31 07:00:58 UTC; 2h 20min ago
     Device: /sys/devices/virtual/net/vlan22

● sys-devices-virtual-net-vlan23.device - /sys/devices/virtual/net/vlan23
     Loaded: loaded
     Active: active (plugged) since Sat 2026-01-31 07:00:58 UTC; 2h 20min ago
      Until: Sat 2026-01-31 07:00:58 UTC; 2h 20min ago
     Device: /sys/devices/virtual/net/vlan23

● sys-module-configfs.device - /sys/module/configfs
     Loaded: loaded
     Active: active (plugged) since Sat 2026-01-31 06:09:24 UTC; 3h 11min ago
      Until: Sat 2026-01-31 06:09:24 UTC; 3h 11min ago
     Device: /sys/module/configfs

● sys-module-fuse.device - /sys/module/fuse
     Loaded: loaded
     Active: active (plugged) since Sat 2026-01-31 06:09:24 UTC; 3h 11min ago
      Until: Sat 2026-01-31 06:09:24 UTC; 3h 11min ago
     Device: /sys/module/fuse

● sys-subsystem-net-devices-br\x2dex.device - /sys/subsystem/net/devices/br-ex
     Loaded: loaded
     Active: active (plugged) since Sat 2026-01-31 07:00:58 UTC; 2h 20min ago
      Until: Sat 2026-01-31 07:00:58 UTC; 2h 20min ago
     Device: /sys/devices/virtual/net/br-ex

● sys-subsystem-net-devices-br\x2dint.device - /sys/subsystem/net/devices/br-int
     Loaded: loaded
     Active: active (plugged) since Sat 2026-01-31 07:16:32 UTC; 2h 4min ago
      Until: Sat 2026-01-31 07:16:32 UTC; 2h 4min ago
     Device: /sys/devices/virtual/net/br-int

● sys-subsystem-net-devices-eth0.device - Virtio network device
     Loaded: loaded
     Active: active (plugged) since Sat 2026-01-31 06:09:24 UTC; 3h 11min ago
      Until: Sat 2026-01-31 06:09:24 UTC; 3h 11min ago
     Device: /sys/devices/pci0000:00/0000:00:03.0/virtio1/net/eth0

● sys-subsystem-net-devices-eth1.device - Virtio network device
     Loaded: loaded
     Active: active (plugged) since Sat 2026-01-31 06:11:56 UTC; 3h 9min ago
      Until: Sat 2026-01-31 06:11:56 UTC; 3h 9min ago
     Device: /sys/devices/pci0000:00/0000:00:07.0/virtio5/net/eth1

● sys-subsystem-net-devices-genev_sys_6081.device - /sys/subsystem/net/devices/genev_sys_6081
     Loaded: loaded
     Active: active (plugged) since Sat 2026-01-31 07:16:32 UTC; 2h 4min ago
      Until: Sat 2026-01-31 07:16:32 Unit boot.mount could not be found.
Unit home.mount could not be found.
UTC; 2h 4min ago
     Device: /sys/devices/virtual/net/genev_sys_6081

● sys-subsystem-net-devices-ovs\x2dsystem.device - /sys/subsystem/net/devices/ovs-system
     Loaded: loaded
     Active: active (plugged) since Sat 2026-01-31 07:00:58 UTC; 2h 20min ago
      Until: Sat 2026-01-31 07:00:58 UTC; 2h 20min ago
     Device: /sys/devices/virtual/net/ovs-system

● sys-subsystem-net-devices-vlan20.device - /sys/subsystem/net/devices/vlan20
     Loaded: loaded
     Active: active (plugged) since Sat 2026-01-31 07:00:58 UTC; 2h 20min ago
      Until: Sat 2026-01-31 07:00:58 UTC; 2h 20min ago
     Device: /sys/devices/virtual/net/vlan20

● sys-subsystem-net-devices-vlan21.device - /sys/subsystem/net/devices/vlan21
     Loaded: loaded
     Active: active (plugged) since Sat 2026-01-31 07:00:58 UTC; 2h 20min ago
      Until: Sat 2026-01-31 07:00:58 UTC; 2h 20min ago
     Device: /sys/devices/virtual/net/vlan21

● sys-subsystem-net-devices-vlan22.device - /sys/subsystem/net/devices/vlan22
     Loaded: loaded
     Active: active (plugged) since Sat 2026-01-31 07:00:58 UTC; 2h 20min ago
      Until: Sat 2026-01-31 07:00:58 UTC; 2h 20min ago
     Device: /sys/devices/virtual/net/vlan22

● sys-subsystem-net-devices-vlan23.device - /sys/subsystem/net/devices/vlan23
     Loaded: loaded
     Active: active (plugged) since Sat 2026-01-31 07:00:58 UTC; 2h 20min ago
      Until: Sat 2026-01-31 07:00:58 UTC; 2h 20min ago
     Device: /sys/devices/virtual/net/vlan23

● -.mount - Root Mount
     Loaded: loaded (/etc/fstab; generated)
     Active: active (mounted)
      Where: /
       What: /dev/vda1
       Docs: man:fstab(5)
             man:systemd-fstab-generator(8)

● dev-hugepages.mount - Huge Pages File System
     Loaded: loaded (/usr/lib/systemd/system/dev-hugepages.mount; static)
     Active: active (mounted) since Sat 2026-01-31 06:09:23 UTC; 3h 11min ago
      Until: Sat 2026-01-31 06:09:23 UTC; 3h 11min ago
      Where: /dev/hugepages
       What: hugetlbfs
       Docs: https://docs.kernel.org/admin-guide/mm/hugetlbpage.html
             https://www.freedesktop.org/wiki/Software/systemd/APIFileSystems
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48560)
     Memory: 52.0K (peak: 556.0K)
        CPU: 6ms
     CGroup: /dev-hugepages.mount

● dev-hugepages1G.mount - /dev/hugepages1G
     Loaded: loaded (/etc/fstab; generated)
     Active: active (mounted) since Sat 2026-01-31 07:03:02 UTC; 2h 18min ago
      Until: Sat 2026-01-31 07:03:02 UTC; 2h 18min ago
      Where: /dev/hugepages1G
       What: none
       Docs: man:fstab(5)
             man:systemd-fstab-generator(8)

● dev-hugepages2M.mount - /dev/hugepages2M
     Loaded: loaded (/etc/fstab; generated)
     Active: active (mounted) since Sat 2026-01-31 07:03:03 UTC; 2h 18min ago
      Until: Sat 2026-01-31 07:03:03 UTC; 2h 18min ago
      Where: /dev/hugepages2M
       What: none
       Docs: man:fstab(5)
             man:systemd-fstab-generator(8)

● dev-mqueue.mount - POSIX Message Queue File System
     Loaded: loaded (/usr/lib/systemd/system/dev-mqueue.mount; static)
     Active: active (mounted) since Sat 2026-01-31 06:09:23 UTC; 3h 11min ago
      Until: Sat 2026-01-31 06:09:23 UTC; 3h 11min ago
      Where: /dev/mqueue
       What: mqueue
       Docs: man:mq_overview(7)
             https://www.freedesktop.org/wiki/Software/systemd/APIFileSystems
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48560)
     Memory: 4.0K (peak: 540.0K)
        CPU: 3ms
     CGroup: /dev-mqueue.mount

○ proc-fs-nfsd.mount - NFSD configuration filesystem
     Loaded: loaded (/usr/lib/systemd/system/proc-fs-nfsd.mount; static)
     Active: inactive (dead)
      Where: /proc/fs/nfsd
       What: nfsd

● proc-sys-fs-binfmt_misc.mount - Arbitrary Executable File Formats File System
     Loaded: loaded (/usr/lib/systemd/system/proc-sys-fs-binfmt_misc.mount; disabled; preset: disabled)
     Active: active (mounted) since Sat 2026-01-31 07:05:33 UTC; 2h 15min ago
      Until: Sat 2026-01-31 07:05:33 UTC; 2h 15min ago
TriggeredBy: ● proc-sys-fs-binfmt_misc.automount
      Where: /proc/sys/fs/binfmt_misc
       What: binfmt_misc
       Docs: https://docs.kernel.org/admin-guide/binfmt-misc.html
             https://www.freedesktop.org/wiki/Software/systemd/APIFileSystems
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48560)
     Memory: 8.0K (peak: 568.0K)
        CPU: 5ms
     CGroup: /proc-sys-fs-binfmt_misc.mount

Jan 31 07:05:33 compute-0 systemd[1]: Mounting Arbitrary Executable File Formats File System...
Jan 31 07:05:33 compute-0 systemd[1]: Mounted Arbitrary Executable File Formats File System.

● run-credentials-systemd\x2dsysctl.service.mount - /run/credentials/systemd-sysctl.service
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Sat 2026-01-31 06:59:47 UTC; 2h 21min ago
      Until: Sat 2026-01-31 06:59:47 UTC; 2h 21min ago
      Where: /run/credentials/systemd-sysctl.service
       What: none

● run-credentials-systemd\x2dsysusers.service.mount - /run/credentials/systemd-sysusers.service
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Sat 2026-01-31 06:09:23 UTC; 3h 11min ago
      Until: Sat 2026-01-31 06:09:23 UTC; 3h 11min ago
      Where: /run/credentials/systemd-sysusers.service
       What: none

● run-credentials-systemd\x2dtmpfiles\x2dsetup.service.mount - /run/credentials/systemd-tmpfiles-setup.service
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Sat 2026-01-31 06:09:24 UTC; 3h 11min ago
      Until: Sat 2026-01-31 06:09:24 UTC; 3h 11min ago
      Where: /run/credentials/systemd-tmpfiles-setup.service
       What: none

● run-credentials-systemd\x2dtmpfiles\x2dsetup\x2ddev.service.mount - /run/credentials/systemd-tmpfiles-setup-dev.service
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Sat 2026-01-31 06:09:23 UTC; 3h 11min ago
      Until: Sat 2026-01-31 06:09:23 UTC; 3h 11min ago
      Where: /run/credentials/systemd-tmpfiles-setup-dev.service
       What: none

● run-netns.mount - /run/netns
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Sat 2026-01-31 07:02:32 UTC; 2h 18min ago
      Until: Sat 2026-01-31 07:02:32 UTC; 2h 18min ago
      Where: /run/netns
       What: tmpfs

● run-user-1000.mount - /run/user/1000
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Sat 2026-01-31 06:09:53 UTC; 3h 11min ago
      Until: Sat 2026-01-31 06:09:53 UTC; 3h 11min ago
      Where: /run/user/1000
       What: tmpfs

● run-user-42477.mount - /run/user/42477
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Sat 2026-01-31 07:05:25 UTC; 2h 15min ago
      Until: Sat 2026-01-31 07:05:25 UTC; 2h 15min ago
      Where: /run/user/42477
       What: tmpfs

● sys-fs-fuse-connections.mount - FUSE Control File System
     Loaded: loaded (/usr/lib/systemd/system/sys-fs-fuse-connections.mount; static)
     Active: active (mounted) since Sat 2026-01-31 06:09:23 UTC; 3h 11min ago
      Until: Sat 2026-01-31 06:09:23 UTC; 3h 11min ago
      Where: /sys/fs/fuse/connections
       What: fusectl
       Docs: https://docs.kernel.org/filesystems/fuse.html
             https://www.freedesktop.org/wiki/Software/systemd/APIFileSystems
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48560)
     Memory: 4.0K (peak: 516.0K)
        CPU: 4ms
     CGroup: /sys-fs-fuse-connections.mount

Jan 31 06:09:23 localhost systemd[1]: Mounting FUSE Control File System...
Jan 31 06:09:23 localhost systemd[1]: Mounted FUSE Control File System.

● sys-kernel-config.mount - Kernel Configuration File System
     Loaded: loaded (/proc/self/mountinfo; static)
     Active: active (mounted) since Sat 2026-01-31 06:09:23 UTC; 3h 11min ago
      Until: Sat 2026-01-31 06:09:23 UTC; 3h 11min ago
      Where: /sys/kernel/config
       What: configfs
       Docs: https://docs.kernel.org/filesystems/configfs.html
             https://www.freedesktop.org/wiki/Software/systemd/APIFileSystems

● sys-kernel-debug-tracing.mount - /sys/kernel/debug/tracing
     Loaded: loaUnit sysroot.mount could not be found.
ded (/proc/self/mountinfo)
     Active: active (mounted) since Sat 2026-01-31 09:14:56 UTC; 6min ago
      Until: Sat 2026-01-31 09:14:56 UTC; 6min ago
      Where: /sys/kernel/debug/tracing
       What: tracefs

● sys-kernel-debug.mount - Kernel Debug File System
     Loaded: loaded (/usr/lib/systemd/system/sys-kernel-debug.mount; static)
     Active: active (mounted) since Sat 2026-01-31 06:09:23 UTC; 3h 11min ago
      Until: Sat 2026-01-31 06:09:23 UTC; 3h 11min ago
      Where: /sys/kernel/debug
       What: debugfs
       Docs: https://docs.kernel.org/filesystems/debugfs.html
             https://www.freedesktop.org/wiki/Software/systemd/APIFileSystems
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48560)
     Memory: 4.0K (peak: 540.0K)
        CPU: 3ms
     CGroup: /sys-kernel-debug.mount

● sys-kernel-tracing.mount - Kernel Trace File System
     Loaded: loaded (/usr/lib/systemd/system/sys-kernel-tracing.mount; static)
     Active: active (mounted) since Sat 2026-01-31 06:09:23 UTC; 3h 11min ago
      Until: Sat 2026-01-31 06:09:23 UTC; 3h 11min ago
      Where: /sys/kernel/tracing
       What: tracefs
       Docs: https://docs.kernel.org/trace/ftrace.html
             https://www.freedesktop.org/wiki/Software/systemd/APIFileSystems
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48560)
     Memory: 4.0K (peak: 540.0K)
        CPU: 2ms
     CGroup: /sys-kernel-tracing.mount

○ tmp.mount - Temporary Directory /tmp
     Loaded: loaded (/usr/lib/systemd/system/tmp.mount; disabled; preset: disabled)
     Active: inactive (dead)
      Where: /tmp
       What: tmpfs
       Docs: https://systemd.io/TEMPORARY_DIRECTORIES
             man:file-hierarchy(7)
             https://www.freedesktop.org/wiki/Software/systemd/APIFileSystems

● var-lib-containers-storage-overlay-2a4e373ef552ae75c0a78a600062a692ffa0b2f36295b2534f62a67928c16d97-merged.mount - /var/lib/containers/storage/overlay/2a4e373ef552ae75c0a78a600062a692ffa0b2f36295b2534f62a67928c16d97/merged
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Sat 2026-01-31 07:04:36 UTC; 2h 16min ago
      Until: Sat 2026-01-31 07:04:36 UTC; 2h 16min ago
      Where: /var/lib/containers/storage/overlay/2a4e373ef552ae75c0a78a600062a692ffa0b2f36295b2534f62a67928c16d97/merged
       What: overlay

● var-lib-containers-storage-overlay-2ecb06755269fdf102ad8b13f3da1e2498f213b5f64725c7540fb71d05d5b8b4-merged.mount - /var/lib/containers/storage/overlay/2ecb06755269fdf102ad8b13f3da1e2498f213b5f64725c7540fb71d05d5b8b4/merged
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Sat 2026-01-31 07:04:34 UTC; 2h 16min ago
      Until: Sat 2026-01-31 07:04:34 UTC; 2h 16min ago
      Where: /var/lib/containers/storage/overlay/2ecb06755269fdf102ad8b13f3da1e2498f213b5f64725c7540fb71d05d5b8b4/merged
       What: overlay

● var-lib-containers-storage-overlay-40cd00e4c1a40e0446e7d0b8c98b4d2645414e2347ad534128305aa1e29eacd9-merged.mount - /var/lib/containers/storage/overlay/40cd00e4c1a40e0446e7d0b8c98b4d2645414e2347ad534128305aa1e29eacd9/merged
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Sat 2026-01-31 07:08:10 UTC; 2h 13min ago
      Until: Sat 2026-01-31 07:08:10 UTC; 2h 13min ago
      Where: /var/lib/containers/storage/overlay/40cd00e4c1a40e0446e7d0b8c98b4d2645414e2347ad534128305aa1e29eacd9/merged
       What: overlay

● var-lib-containers-storage-overlay-5e4a79a3d665d7d86e7ff3b378ff3c712ce81c902e96f3b9551300c184da98f4-merged.mount - /var/lib/containers/storage/overlay/5e4a79a3d665d7d86e7ff3b378ff3c712ce81c902e96f3b9551300c184da98f4/merged
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Sat 2026-01-31 07:08:20 UTC; 2h 12min ago
      Until: Sat 2026-01-31 07:08:20 UTC; 2h 12min ago
      Where: /var/lib/containers/storage/overlay/5e4a79a3d665d7d86e7ff3b378ff3c712ce81c902e96f3b9551300c184da98f4/merged
       What: overlay

● var-lib-containers-storage-overlay-5f652ce010712b756097ce0a41d3a318e6fd92f4f4b27a3ba59ca6964be76834-merged.mount - /var/lib/containers/storage/overlay/5f652ce010712b756097ce0a41d3a318e6fd92f4f4b27a3ba59ca6964be76834/merged
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Sat 2026-01-31 07:25:39 UTC; 1h 55min ago
      Until: Sat 2026-01-31 07:25:39 UTC; 1h 55min ago
      Where: /var/lib/containers/storage/overlay/5f652ce010712b756097ce0a41d3a318e6fd92f4f4b27a3ba59ca6964be76834/merged
       What: overlay

● var-lib-containers-storage-overlay-6075ef3a0a3c02c5632d39f6e4caa32c65341d00ba4c61213303c3bd86e03446-merged.mount - /var/lib/containers/storage/overlay/6075ef3a0a3c02c5632d39f6e4caa32c65341d00ba4c61213303c3bd86e03446/merged
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Sat 2026-01-31 07:18:06 UTC; 2h 3min ago
      Until: Sat 2026-01-31 07:18:06 UTC; 2h 3min ago
      Where: /var/lib/containers/storage/overlay/6075ef3a0a3c02c5632d39f6e4caa32c65341d00ba4c61213303c3bd86e03446/merged
       What: overlay

● var-lib-containers-storage-overlay-6adc12f2687c18b3cdb07e162f1a88b08c133ab5d12031270cb3d36f3d463d6e-merged.mount - /var/lib/containers/storage/overlay/6adc12f2687c18b3cdb07e162f1a88b08c133ab5d12031270cb3d36f3d463d6e/merged
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Sat 2026-01-31 07:06:28 UTC; 2h 14min ago
      Until: Sat 2026-01-31 07:06:28 UTC; 2h 14min ago
      Where: /var/lib/containers/storage/overlay/6adc12f2687c18b3cdb07e162f1a88b08c133ab5d12031270cb3d36f3d463d6e/merged
       What: overlay

● var-lib-containers-storage-overlay-759648a91cc9b8b1430b048d0f09c22cd75b2ceaa0b020a031640a866cc4ac27-merged.mount - /var/lib/containers/storage/overlay/759648a91cc9b8b1430b048d0f09c22cd75b2ceaa0b020a031640a866cc4ac27/merged
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Sat 2026-01-31 07:08:37 UTC; 2h 12min ago
      Until: Sat 2026-01-31 07:08:37 UTC; 2h 12min ago
      Where: /var/lib/containers/storage/overlay/759648a91cc9b8b1430b048d0f09c22cd75b2ceaa0b020a031640a866cc4ac27/merged
       What: overlay

● var-lib-containers-storage-overlay-a7aa80fa79fb9bf6ae5b0db9455669739fa62501a6080b365e71c3549ee89c80-merged.mount - /var/lib/containers/storage/overlay/a7aa80fa79fb9bf6ae5b0db9455669739fa62501a6080b365e71c3549ee89c80/merged
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Sat 2026-01-31 07:08:13 UTC; 2h 12min ago
      Until: Sat 2026-01-31 07:08:13 UTC; 2h 12min ago
      Where: /var/lib/containers/storage/overlay/a7aa80fa79fb9bf6ae5b0db9455669739fa62501a6080b365e71c3549ee89c80/merged
       What: overlay

● var-lib-containers-storage-overlay-bc320a1049d8f05fd2fcd9b4a54d6d483d4695cc2e0a0929a7bc54470f0fb8a8-merged.mount - /var/lib/containers/storage/overlay/bc320a1049d8f05fd2fcd9b4a54d6d483d4695cc2e0a0929a7bc54470f0fb8a8/merged
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Sat 2026-01-31 07:16:31 UTC; 2h 4min ago
      Until: Sat 2026-01-31 07:16:31 UTC; 2h 4min ago
      Where: /var/lib/containers/storage/overlay/bc320a1049d8f05fd2fcd9b4a54d6d483d4695cc2e0a0929a7bc54470f0fb8a8/merged
       What: overlay

● var-lib-containers-storage-overlay-fb4657c69bf1f7017ddb82052b8e192728196553527cd2185ee2575a7f53c015-merged.mount - /var/lib/containers/storage/overlay/fb4657c69bf1f7017ddb82052b8e192728196553527cd2185ee2575a7f53c015/merged
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Sat 2026-01-31 07:05:42 UTC; 2h 15min ago
      Until: Sat 2026-01-31 07:05:42 UTC; 2h 15min ago
      Where: /var/lib/containers/storage/overlay/fb4657c69bf1f7017ddb82052b8e192728196553527cd2185ee2575a7f53c015/merged
       What: overlay

● var-lib-containers-storage-overlay.mount - /var/lib/containers/storage/overlay
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Sat 2026-01-31 07:04:34 UTC; 2h 16min ago
      Until: Sat 2026-01-31 07:04:34 UTC; 2h 16min ago
      Where: /var/lib/containers/storage/overlay
       What: /dev/vda1

● var-lib-containers-storage-overlay\x2dcontainers-08cc1288983ebbc3e029df4ba78417491faf6dcee06220d37f18f3223d9f406f-userdata-shm.mount - /var/lib/containers/storage/overlay-containers/08cc1288983ebbc3e029df4ba78417491faf6dcee06220d37f18f3223d9f406f/userdata/shm
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Sat 2026-01-31 07:25:39 UTC; 1h 55min ago
      Until: Sat 2026-01-31 07:25:39 UTC; 1h 55min ago
      Where: /var/lib/containers/storage/overlay-containers/08cc1288983ebbc3e029df4ba78417491faf6dcee06220d37f18f3223d9f406f/userdata/shm
       What: shm

● var-lib-containers-storage-overlay\x2dcontainers-1e25cafb71c13c532a40aebd7c2342bf479b9dc985b78d8189396343297af238-userdata-shm.mount - /var/lib/containers/storage/overlay-containers/1e25cafb71c13c532a40aebd7c2342bf479b9dc985b78d8189396343297af238/userdata/shm
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Sat 2026-01-31 07:16:31 UTC; 2h 4min ago
      Until: Sat 2026-01-31 07:16:31 UTC; 2h 4min ago
      Where: /var/lib/containers/storage/overlay-containers/1e25cafb71c13c532a40aebd7c2342bf479b9dc985b78d8189396343297af238/userdata/shm
       What: shm

● var-lib-containers-storage-overlay\x2dcontainers-4f21208ce36b9cf814979a8ea23334150fc70d101a3b284bd29d2af80b4ea3b1-userdata-shm.mount - /var/lib/containers/storage/overlay-containers/4f21208ce36b9cf814979a8ea23334150fc70d101a3b284bd29d2af80b4ea3b1/userdata/shm
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Sat 2026-01-31 07:18:06 UTC; 2h 3min ago
      Until: Sat 2026-01-31 07:18:06 UTC; 2h 3min ago
      Where: /var/lib/containers/storage/overlay-containers/4f21208ce36b9cf814979a8ea23334150fc70d101a3b284bd29d2af80b4ea3b1/userdata/shm
       What: shm

○ var-lib-machines.mount - Virtual Machine and Container Storage (Compatibility)
     Loaded: loaded (/usr/lib/systemd/system/var-lib-machines.mount; static)
     Active: inactive (dead)
  Condition: start condition failed at Sat 2026-01-31 07:22:11 UTC; 1h 59min ago
      Where: /var/lib/machines
       What: /var/lib/machines.raw

Jan 31 07:22:11 compute-0 systemd[1]: Virtual Machine and Container Storage (Compatibility) was skipped because of an unmet condition check (ConditionPathExists=/var/lib/machines.raw).

● var-lib-nfs-rpc_pipefs.mount - RPC Pipe File System
     Loaded: loaded (/proc/self/mountinfo; static)
     Active: active (mounted) since Sat 2026-01-31 06:09:23 UTC; 3h 11min ago
      Until: Sat 2026-01-31 06:09:23 UTC; 3h 11min ago
      Where: /var/lib/nfs/rpc_pipefs
       What: rpc_pipefs

● systemd-ask-password-console.path - Dispatch Password Requests to Console Directory Watch
     Loaded: loaded (/usr/lib/systemd/system/systemd-ask-password-console.path; static)
     Active: active (waiting) since Sat 2026-01-31 06:09:23 UTC; 3h 11min ago
      Until: Sat 2026-01-31 06:09:23 UTC; 3h 11min ago
   Triggers: ● systemd-ask-password-console.service
       Docs: man:systemd-ask-password-console.path(8)

● systemd-ask-password-wall.path - Forward Password Requests to Wall Directory Watch
     Loaded: loaded (/usr/lib/systemd/system/systemd-ask-password-wall.path; static)
     Active: active (waiting) since Sat 2026-01-31 06:09:23 UTC; 3h 11min ago
      Until: Sat 2026-01-31 06:09:23 UTC; 3h 11min ago
   Triggers: ● systemd-ask-password-wall.service
       Docs: man:systemd-ask-password-wall.path(8)

● init.scope - System and Service Manager
     Loaded: loaded
  Transient: yes
     Active: active (running) since Sat 2026-01-31 06:09:18 UTC; 3h 11min ago
       Docs: man:systemd(1)
         IO: 1.1M read, 0B written
      Tasks: 1 (limit: 48560)
     Memory: 48.3M (peak: 66.1M)
        CPU: 1min 17.198s
     CGroup: /init.scope
             └─1 /usr/lib/systemd/systemd --switched-root --system --deserialize 31

Jan 31 09:21:08 compute-0 systemd[1]: Started libpod-conmon-1e62b43494c4c5430967c461a614951b2b3e7d9dc0da33bf378a592cf365588f.scope.
Jan 31 09:21:08 compute-0 systemd[1]: Started libcrun container.
Jan 31 09:21:08 compute-0 systemd[1]: libpod-1e62b43494c4c5430967c461a614951b2b3e7d9dc0da33bf378a592cf365588f.scope: Deactivated successfully.
Jan 31 09:21:08 compute-0 systemd[1]: var-lib-containers-storage-overlay-fe605de0efbfff7f1e129c9c05273d581a069c6245020dd6d05de659c33a3558-merged.mount: Deactivated successfully.
Jan 31 09:21:09 compute-0 systemd[1]: libpod-conmon-1e62b43494c4c5430967c461a614951b2b3e7d9dc0da33bf378a592cf365588f.scope: Deactivated successfully.
Jan 31 09:21:09 compute-0 systemd[1]: Started libpod-conmon-c91ad40d319ca0959f25542c639fa3531098a9c8d610a5d90837a5e49347f30f.scope.
Jan 31 09:21:09 compute-0 systemd[1]: Started libcrun container.
Jan 31 09:21:10 compute-0 systemd[1]: libpod-c91ad40d319ca0959f25542c639fa3531098a9c8d610a5d90837a5e49347f30f.scope: Deactivated successfully.
Jan 31 09:21:10 compute-0 systemd[1]: var-lib-containers-storage-overlay-2b43700eb7572bebd3472a5ea1bd5ebd36f83d647edcce72c5d2c1452938ca5d-merged.mount: Deactivated successfully.
Jan 31 09:21:11 compute-0 systemd[1]: libpod-conmon-c91ad40d319ca0959f25542c639fa3531098a9c8d610a5d90837a5e49347f30f.scope: Deactivated successfully.

● libpod-08cc1288983ebbc3e029df4ba78417491faf6dcee06220d37f18f3223d9f406f.scope - libcrun container
     Loaded: loaded (/run/systemd/transient/libpod-08cc1288983ebbc3e029df4ba78417491faf6dcee06220d37f18f3223d9f406f.scope; transient)
  Transient: yes
     Active: active (running) since Sat 2026-01-31 07:25:39 UTC; 1h 55min ago
         IO: 108.0M read, 167.3M written
      Tasks: 29 (limit: 4096)
     Memory: 942.3M (peak: 1006.3M)
        CPU: 7min 46.284s
     CGroup: /machine.slice/libpod-08cc1288983ebbc3e029df4ba78417491faf6dcee06220d37f18f3223d9f406f.scope
             └─container
               ├─247706 dumb-init --single-child -- kolla_start
               ├─247708 /usr/bin/python3 /usr/bin/nova-compute
               ├─254428 /usr/bin/python3 /bin/privsep-helper --config-file /etc/nova/nova.conf --config-file /etc/nova/nova-compute.conf --config-dir /etc/nova/nova.conf.d --privsep_context vif_plug_ovs.privsep.vif_plug --privsep_sock_path /tmp/tmpf8bmbhs4/privsep.sock
               ├─255323 /usr/bin/python3 /bin/privsep-helper --config-file /etc/nova/nova.conf --config-file /etc/nova/nova-compute.conf --config-dir /etc/nova/nova.conf.d --privsep_context os_brick.privileged.default --privsep_sock_path /tmp/tmpm6uxsdmh/privsep.sock
               └─315733 /usr/bin/python3 /bin/privsep-helper --config-file /etc/nova/nova.conf --config-file /etc/nova/nova-compute.conf --config-dir /etc/nova/nova.conf.d --privsep_context nova.privsep.sys_admin_pctxt --privsep_sock_path /tmp/tmpel5it6fi/privsep.sock

Jan 31 07:25:39 compute-0 systemd[1]: Started libcrun container.

● libpod-1e25cafb71c13c532a40aebd7c2342bf479b9dc985b78d8189396343297af238.scope - libcrun container
     Loaded: loaded (/run/systemd/transient/libpod-1e25cafb71c13c532a40aebd7c2342bf479b9dc985b78d8189396343297af238.scope; transient)
  Transient: yes
    Drop-In: /run/systemd/transient/libpod-1e25cafb71c13c532a40aebd7c2342bf479b9dc985b78d8189396343297af238.scope.d
             └─dep.conf
     Active: active (running) since Sat 2026-01-31 07:16:31 UTC; 2h 4min ago
         IO: 7.5M read, 4.0K written
      Tasks: 6 (limit: 4096)
     Memory: 22.0M (peak: 27.8M)
        CPU: 28.155s
     CGroup: /machine.slice/libpod-1e25cafb71c13c532a40aebd7c2342bf479b9dc985b78d8189396343297af238.scope
             └─container
               ├─149459 dumb-init --single-child -- kolla_start
               └─149462 /usr/bin/ovn-controller --pidfile unix:/run/openvswitch/db.sock -p /etc/pki/tls/private/ovndb.key -c /etc/pki/tls/certs/ovndb.crt -C /etc/pki/tls/certs/ovndbca.crt

Jan 31 07:16:31 compute-0 systemd[1]: Started libcrun container.

● libpod-4f21208ce36b9cf814979a8ea23334150fc70d101a3b284bd29d2af80b4ea3b1.scope - libcrun container
     Loaded: loaded (/run/systemd/transient/libpod-4f21208ce36b9cf814979a8ea23334150fc70d101a3b284bd29d2af80b4ea3b1.scope; transient)
  Transient: yes
    Drop-In: /run/systemd/transient/libpod-4f21208ce36b9cf814979a8ea23334150fc70d101a3b284bd29d2af80b4ea3b1.scope.d
             └─dep.conf
     Active: active (running) since Sat 2026-01-31 07:18:08 UTC; 2h 3min ago
         IO: 15.4M read, 78.1M written
      Tasks: 11 (limit: 4096)
     Memory: 445.3M (peak: 506.3M)
        CPU: 2min 8.928s
     CGroup: /machine.slice/libpod-4f21208ce36b9cf814979a8ea23334150fc70d101a3b284bd29d2af80b4ea3b1.scope
             └─container
               ├─160023 dumb-init --single-child -- kolla_start
               ├─160028 "neutron-ovn-metadata-agent (/usr/bin/python3 /usr/bin/neutron-ovn-metadata-agent)"
               ├─160292 "neutron-ovn-metadata-agent (/usr/bin/python3 /usr/bin/neutron-ovn-metadata-agent)"
               ├─160297 /usr/bin/python3 /bin/privsep-helper --config-file /etc/neutron/neutron.conf --config-dir /etc/neutron.conf.d --privsep_context neutron.privileged.namespace_cmd --privsep_sock_path /tmp/tmprrz9sb_2/privsep.sock
               ├─254935 /usr/bin/python3 /bin/privsep-helper --config-file /etc/neutron/neutron.conf --config-dir /etc/neutron.conf.d --privsep_context neutron.privileged.default --privsep_sock_path /tmp/tmptkbyd576/privsep.sock
               └─255113 /usr/bin/python3 /bin/privsep-helper --config-file /etc/neutron/neutron.conf --config-dir /etc/neutron.conf.d --privsep_context neutron.privileged.link_cmd --privsep_sock_path /tmp/tmp6f9jilk8/privsep.sock

Jan 31 09:17:08 compute-0 podman[431391]: 2026-01-31 09:17:08.054442216 +0000 UTC m=+0.197695166 container died 620fc80f3173422c302863e47f24236ddc663169cb170190fbfda05830d308cf (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=neutron-haproxy-ovnmeta-5c9ca540-57e7-412d-8ef3-af923db0a265, io.buildah.version=1.41.3, org.label-schema.schema-version=1.0, tcib_managed=true, org.label-schema.build-date=20260127, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, maintainer=OpenStack Kubernetes Operator team, org.label-schema.license=GPLv2, tcib_build_tag=b85d0548925081ae8c6bdd697658cec4)
Jan 31 09:17:08 compute-0 podman[431391]: 2026-01-31 09:17:08.71890501 +0000 UTC m=+0.862158000 container cleanup 620fc80f3173422c302863e47f24236ddc663169cb170190fbfda05830d308cf (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=neutron-haproxy-ovnmeta-5c9ca540-57e7-412d-8ef3-af923db0a265, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, tcib_build_tag=b85d0548925081ae8c6bdd697658cec4, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, tcib_managed=true, org.label-schema.build-date=20260127)
Jan 31 09:17:08 compute-0 podman[431449]: 2026-01-31 09:17:08.989968223 +0000 UTC m=+0.251896292 container remove 620fc80f3173422c302863e47f24236ddc663169cb170190fbfda05830d308cf (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=neutron-haproxy-ovnmeta-5c9ca540-57e7-412d-8ef3-af923db0a265, org.label-schema.license=GPLv2, tcib_build_tag=b85d0548925081ae8c6bdd697658cec4, tcib_managed=true, org.label-schema.build-date=20260127, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, maintainer=OpenStack Kubernetes Operator team, io.buildah.version=1.41.3, org.label-schema.schema-version=1.0)
Jan 31 09:17:43 compute-0 podman[433193]: 2026-01-31 09:17:43.598812978 +0000 UTC m=+0.028846105 image pull 19964fda6b912d3d57e21b0bcc221725d936e513025030cb508474fe04b06af8 quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified
Jan 31 09:17:44 compute-0 podman[433193]: 2026-01-31 09:17:44.409015955 +0000 UTC m=+0.839049072 container create 769d242170b07fec71176f02c997f29b8b85d704eb75022eb864c5ed99121177 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=neutron-haproxy-ovnmeta-5c9ca540-57e7-412d-8ef3-af923db0a265, tcib_managed=true, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, maintainer=OpenStack Kubernetes Operator team, org.label-schema.license=GPLv2, io.buildah.version=1.41.3, org.label-schema.name=CentOS Stream 9 Base Image, tcib_build_tag=b85d0548925081ae8c6bdd697658cec4, org.label-schema.build-date=20260127)
Jan 31 09:17:45 compute-0 podman[433193]: 2026-01-31 09:17:45.177890455 +0000 UTC m=+1.607923642 container init 769d242170b07fec71176f02c997f29b8b85d704eb75022eb864c5ed99121177 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=neutron-haproxy-ovnmeta-5c9ca540-57e7-412d-8ef3-af923db0a265, org.label-schema.vendor=CentOS, maintainer=OpenStack Kubernetes Operator team, tcib_build_tag=b85d0548925081ae8c6bdd697658cec4, org.label-schema.build-date=20260127, tcib_managed=true, org.label-schema.license=GPLv2, io.buildah.version=1.41.3, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0)
Jan 31 09:17:45 compute-0 podman[433193]: 2026-01-31 09:17:45.184895734 +0000 UTC m=+1.614928881 container start 769d242170b07fec71176f02c997f29b8b85d704eb75022eb864c5ed99121177 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=neutron-haproxy-ovnmeta-5c9ca540-57e7-412d-8ef3-af923db0a265, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, io.buildah.version=1.41.3, org.label-schema.build-date=20260127, tcib_managed=true, org.label-schema.name=CentOS Stream 9 Base Image, maintainer=OpenStack Kubernetes Operator team, tcib_build_tag=b85d0548925081ae8c6bdd697658cec4, org.label-schema.schema-version=1.0)
Jan 31 09:19:05 compute-0 podman[434572]: 2026-01-31 09:19:05.477635656 +0000 UTC m=+0.197603523 container died 769d242170b07fec71176f02c997f29b8b85d704eb75022eb864c5ed99121177 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=neutron-haproxy-ovnmeta-5c9ca540-57e7-412d-8ef3-af923db0a265, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, org.label-schema.vendor=CentOS, org.label-schema.build-date=20260127, org.label-schema.name=CentOS Stream 9 Base Image, tcib_managed=true, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, tcib_build_tag=b85d0548925081ae8c6bdd697658cec4)
Jan 31 09:19:06 compute-0 podman[434572]: 2026-01-31 09:19:06.002316882 +0000 UTC m=+0.722284749 container cleanup 769d242170b07fec71176f02c997f29b8b85d704eb75022eb864c5ed99121177 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=neutron-haproxy-ovnmeta-5c9ca540-57e7-412d-8ef3-af923db0a265, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, org.label-schema.schema-version=1.0, org.label-schema.build-date=20260127, org.label-schema.name=CentOS Stream 9 Base Image, tcib_managed=true, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, tcib_build_tag=b85d0548925081ae8c6bdd697658cec4)
Jan 31 09:19:06 compute-0 podman[434632]: 2026-01-31 09:19:06.661591251 +0000 UTC m=+0.638569461 container remove 769d242170b07fec71176f02c997f29b8b85d704eb75022eb864c5ed99121177 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=neutron-haproxy-ovnmeta-5c9ca540-57e7-412d-8ef3-af923db0a265, tcib_managed=true, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, org.label-schema.build-date=20260127, org.label-schema.vendor=CentOS, tcib_build_tag=b85d0548925081ae8c6bdd697658cec4)

● session-1.scope - Session 1 of User zuul
     Loaded: loaded (/run/systemd/transient/session-1.scope; transient)
  Transient: yes
     Active: active (abandoned) since Sat 2026-01-31 06:09:53 UTC; 3h 11min ago
         IO: 0B read, 0B written
      Tasks: 1
     Memory: 17.1M (peak: 38.7M)
        CPU: 1min 14.807s
     CGroup: /user.slice/user-1000.slice/session-1.scope
             └─4520 /usr/bin/python3

Jan 31 06:12:07 np0005603608.novalocal python3[7133]: ansible-ansible.legacy.copy Invoked with src=/home/zuul/.ansible/tmp/ansible-tmp-1769839926.9646504-104-231875471216184/source dest=/etc/NetworkManager/system-connections/ci-private-network.nmconnection mode=0600 owner=root group=root follow=False _original_basename=bootstrap-ci-network-nm-connection.nmconnection.j2 checksum=36a5d03fbeb50142f9ad00722ddfc7b68cf493f9 backup=False force=True unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None remote_src=None local_follow=None seuser=None serole=None selevel=None setype=None attributes=None
Jan 31 06:12:07 np0005603608.novalocal sudo[7131]: pam_unix(sudo:session): session closed for user root
Jan 31 06:12:08 np0005603608.novalocal sudo[7181]:     zuul : PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-abzbnxvqqjlvdhhrrqqsfnorbbwwtwvh ; OS_CLOUD=vexxhost /usr/bin/python3'
Jan 31 06:12:08 np0005603608.novalocal sudo[7181]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Jan 31 06:12:08 np0005603608.novalocal python3[7183]: ansible-ansible.builtin.systemd Invoked with name=NetworkManager state=restarted daemon_reload=False daemon_reexec=False scope=system no_block=False enabled=None force=None masked=None
Jan 31 06:12:08 np0005603608.novalocal sudo[7181]: pam_unix(sudo:session): session closed for user root
Jan 31 06:12:08 np0005603608.novalocal python3[7249]: ansible-ansible.legacy.command Invoked with _raw_params=ip route zuul_log_id=fa163ef9-e89a-8b99-369b-0000000000bd-0-controller zuul_ansible_split_streams=False _uses_shell=False warn=False stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None
Jan 31 06:13:08 np0005603608.novalocal sshd-session[4317]: Received disconnect from 38.102.83.114 port 52370:11: disconnected by user
Jan 31 06:13:08 np0005603608.novalocal sshd-session[4317]: Disconnected from user zuul 38.102.83.114 port 52370
Jan 31 06:13:08 np0005603608.novalocal sshd-session[4304]: pam_unix(sshd:session): session closed for user zuul

● session-21.scope - Session 21 of User ceph-admin
     Loaded: loaded (/run/systemd/transient/session-21.scope; transient)
  Transient: yes
     Active: active (running) since Sat 2026-01-31 07:05:25 UTC; 2h 15min ago
         IO: 0B read, 0B written
      Tasks: 2
     Memory: 1.2M (peak: 1.7M)
        CPU: 237ms
     CGroup: /user.slice/user-42477.slice/session-21.scope
             ├─76126 "sshd-session: ceph-admin [priv]"
             └─76148 "sshd-session: ceph-admin"

Jan 31 07:05:25 compute-0 systemd[1]: Started Session 21 of User ceph-admin.

● session-23.scope - Session 23 of User ceph-admin
     Loaded: loaded (/run/systemd/transient/session-23.scope; transient)
  Transient: yes
     Active: active (running) since Sat 2026-01-31 07:05:25 UTC; 2h 15min ago
         IO: 0B read, 0B written
      Tasks: 2
     Memory: 1.2M (peak: 4.3M)
        CPU: 320ms
     CGroup: /user.slice/user-42477.slice/session-23.scope
             ├─76143 "sshd-session: ceph-admin [priv]"
             └─76149 "sshd-session: ceph-admin@notty"

Jan 31 07:05:25 compute-0 systemd[1]: Started Session 23 of User ceph-admin.
Jan 31 07:05:26 compute-0 sudo[76150]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Jan 31 07:05:26 compute-0 sudo[76150]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Jan 31 07:05:26 compute-0 sudo[76150]: pam_unix(sudo:session): session closed for user root
Jan 31 07:05:26 compute-0 sudo[76175]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Jan 31 07:05:26 compute-0 sudo[76175]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Jan 31 07:05:26 compute-0 sudo[76175]: pam_unix(sudo:session): session closed for user root

● session-24.scope - Session 24 of User ceph-admin
     Loaded: loaded (/run/systemd/transient/session-24.scope; transient)
  Transient: yes
     Active: active (running) since Sat 2026-01-31 07:05:26 UTC; 2h 15min ago
         IO: 0B read, 0B written
      Tasks: 2
     Memory: 1.2M (peak: 6.3M)
        CPU: 331ms
     CGroup: /user.slice/user-42477.slice/session-24.scope
             ├─76200 "sshd-session: ceph-admin [priv]"
             └─76203 "sshd-session: ceph-admin@notty"

Jan 31 07:05:26 compute-0 systemd[1]: Started Session 24 of User ceph-admin.
Jan 31 07:05:26 compute-0 sudo[76204]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Jan 31 07:05:26 compute-0 sudo[76204]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Jan 31 07:05:26 compute-0 sudo[76204]: pam_unix(sudo:session): session closed for user root
Jan 31 07:05:26 compute-0 sudo[76229]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/f70fcd2a-dcb4-5f89-a4ba-79a09959083b/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --timeout 895 check-host --expect-hostname compute-0
Jan 31 07:05:26 compute-0 sudo[76229]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Jan 31 07:05:26 compute-0 sudo[76229]: pam_unix(sudo:session): session closed for user root

● session-25.scope - Session 25 of User ceph-admin
     Loaded: loaded (/run/systemd/transient/session-25.scope; transient)
  Transient: yes
     Active: active (running) since Sat 2026-01-31 07:05:26 UTC; 2h 15min ago
         IO: 0B read, 0B written
      Tasks: 2
     Memory: 1.2M (peak: 4.3M)
        CPU: 347ms
     CGroup: /user.slice/user-42477.slice/session-25.scope
             ├─76254 "sshd-session: ceph-admin [priv]"
             └─76257 "sshd-session: ceph-admin@notty"

Jan 31 07:05:26 compute-0 systemd[1]: Started Session 25 of User ceph-admin.
Jan 31 07:05:26 compute-0 sudo[76258]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Jan 31 07:05:26 compute-0 sudo[76258]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Jan 31 07:05:26 compute-0 sudo[76258]: pam_unix(sudo:session): session closed for user root
Jan 31 07:05:26 compute-0 sudo[76283]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/ls /var/lib/ceph/f70fcd2a-dcb4-5f89-a4ba-79a09959083b/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d
Jan 31 07:05:26 compute-0 sudo[76283]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Jan 31 07:05:26 compute-0 sudo[76283]: pam_unix(sudo:session): session closed for user root

● session-26.scope - Session 26 of User ceph-admin
     Loaded: loaded (/run/systemd/transient/session-26.scope; transient)
  Transient: yes
     Active: active (running) since Sat 2026-01-31 07:05:27 UTC; 2h 15min ago
         IO: 0B read, 0B written
      Tasks: 2
     Memory: 1.2M (peak: 4.1M)
        CPU: 320ms
     CGroup: /user.slice/user-42477.slice/session-26.scope
             ├─76308 "sshd-session: ceph-admin [priv]"
             └─76311 "sshd-session: ceph-admin@notty"

Jan 31 07:05:27 compute-0 systemd[1]: Started Session 26 of User ceph-admin.
Jan 31 07:05:27 compute-0 sudo[76312]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Jan 31 07:05:27 compute-0 sudo[76312]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Jan 31 07:05:27 compute-0 sudo[76312]: pam_unix(sudo:session): session closed for user root
Jan 31 07:05:27 compute-0 sudo[76337]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/mkdir -p /var/lib/ceph/f70fcd2a-dcb4-5f89-a4ba-79a09959083b
Jan 31 07:05:27 compute-0 sudo[76337]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Jan 31 07:05:27 compute-0 sudo[76337]: pam_unix(sudo:session): session closed for user root

● session-27.scope - Session 27 of User ceph-admin
     Loaded: loaded (/run/systemd/transient/session-27.scope; transient)
  Transient: yes
     Active: active (running) since Sat 2026-01-31 07:05:27 UTC; 2h 15min ago
         IO: 0B read, 0B written
      Tasks: 2
     Memory: 1.2M (peak: 4.4M)
        CPU: 356ms
     CGroup: /user.slice/user-42477.slice/session-27.scope
             ├─76362 "sshd-session: ceph-admin [priv]"
             └─76365 "sshd-session: ceph-admin@notty"

Jan 31 07:05:27 compute-0 systemd[1]: Started Session 27 of User ceph-admin.
Jan 31 07:05:27 compute-0 sudo[76366]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Jan 31 07:05:27 compute-0 sudo[76366]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Jan 31 07:05:27 compute-0 sudo[76366]: pam_unix(sudo:session): session closed for user root
Jan 31 07:05:27 compute-0 sudo[76391]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/mkdir -p /tmp/cephadm-f70fcd2a-dcb4-5f89-a4ba-79a09959083b/var/lib/ceph/f70fcd2a-dcb4-5f89-a4ba-79a09959083b
Jan 31 07:05:27 compute-0 sudo[76391]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Jan 31 07:05:27 compute-0 sudo[76391]: pam_unix(sudo:session): session closed for user root

● session-28.scope - Session 28 of User ceph-admin
     Loaded: loaded (/run/systemd/transient/session-28.scope; transient)
  Transient: yes
     Active: active (running) since Sat 2026-01-31 07:05:27 UTC; 2h 15min ago
         IO: 0B read, 0B written
      Tasks: 2
     Memory: 1.2M (peak: 4.2M)
        CPU: 375ms
     CGroup: /user.slice/user-42477.slice/session-28.scope
             ├─76416 "sshd-session: ceph-admin [priv]"
             └─76419 "sshd-session: ceph-admin@notty"

Jan 31 07:05:27 compute-0 systemd[1]: Started Session 28 of User ceph-admin.
Jan 31 07:05:28 compute-0 sudo[76420]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Jan 31 07:05:28 compute-0 sudo[76420]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Jan 31 07:05:28 compute-0 sudo[76420]: pam_unix(sudo:session): session closed for user root
Jan 31 07:05:28 compute-0 sudo[76445]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/touch /tmp/cephadm-f70fcd2a-dcb4-5f89-a4ba-79a09959083b/var/lib/ceph/f70fcd2a-dcb4-5f89-a4ba-79a09959083b/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d.new
Jan 31 07:05:28 compute-0 sudo[76445]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Jan 31 07:05:28 compute-0 sudo[76445]: pam_unix(sudo:session): session closed for user root

● session-29.scope - Session 29 of User ceph-admin
     Loaded: loaded (/run/systemd/transient/session-29.scope; transient)
  Transient: yes
     Active: active (running) since Sat 2026-01-31 07:05:28 UTC; 2h 15min ago
         IO: 0B read, 0B written
      Tasks: 2
     Memory: 1.2M (peak: 4.5M)
        CPU: 319ms
     CGroup: /user.slice/user-42477.slice/session-29.scope
             ├─76470 "sshd-session: ceph-admin [priv]"
             └─76473 "sshd-session: ceph-admin@notty"

Jan 31 07:05:28 compute-0 systemd[1]: Started Session 29 of User ceph-admin.
Jan 31 07:05:28 compute-0 sudo[76474]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Jan 31 07:05:28 compute-0 sudo[76474]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Jan 31 07:05:28 compute-0 sudo[76474]: pam_unix(sudo:session): session closed for user root
Jan 31 07:05:28 compute-0 sudo[76499]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/chown -R ceph-admin /tmp/cephadm-f70fcd2a-dcb4-5f89-a4ba-79a09959083b
Jan 31 07:05:28 compute-0 sudo[76499]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Jan 31 07:05:28 compute-0 sudo[76499]: pam_unix(sudo:session): session closed for user root

● session-30.scope - Session 30 of User ceph-admin
     Loaded: loaded (/run/systemd/transient/session-30.scope; transient)
  Transient: yes
     Active: active (running) since Sat 2026-01-31 07:05:28 UTC; 2h 15min ago
         IO: 0B read, 0B written
      Tasks: 2
     Memory: 1.2M (peak: 4.3M)
        CPU: 377ms
     CGroup: /user.slice/user-42477.slice/session-30.scope
             ├─76524 "sshd-session: ceph-admin [priv]"
             └─76527 "sshd-session: ceph-admin@notty"

Jan 31 07:05:28 compute-0 systemd[1]: Started Session 30 of User ceph-admin.
Jan 31 07:05:28 compute-0 sudo[76528]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Jan 31 07:05:28 compute-0 sudo[76528]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Jan 31 07:05:28 compute-0 sudo[76528]: pam_unix(sudo:session): session closed for user root
Jan 31 07:05:28 compute-0 sudo[76553]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/chmod 644 /tmp/cephadm-f70fcd2a-dcb4-5f89-a4ba-79a09959083b/var/lib/ceph/f70fcd2a-dcb4-5f89-a4ba-79a09959083b/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d.new
Jan 31 07:05:28 compute-0 sudo[76553]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Jan 31 07:05:28 compute-0 sudo[76553]: pam_unix(sudo:session): session closed for user root

● session-31.scope - Session 31 of User ceph-admin
     Loaded: loaded (/run/systemd/transient/session-31.scope; transient)
  Transient: yes
     Active: active (running) since Sat 2026-01-31 07:05:29 UTC; 2h 15min ago
         IO: 0B read, 216.0K written
      Tasks: 2
     Memory: 1.5M (peak: 3.5M)
        CPU: 343ms
     CGroup: /user.slice/user-42477.slice/session-31.scope
             ├─76578 "sshd-session: ceph-admin [priv]"
             └─76581 "sshd-session: ceph-admin@notty"

Jan 31 07:05:29 compute-0 systemd[1]: Started Session 31 of User ceph-admin.

● session-32.scope - Session 32 of User ceph-admin
     Loaded: loaded (/run/systemd/transient/session-32.scope; transient)
  Transient: yes
     Active: active (running) since Sat 2026-01-31 07:05:29 UTC; 2h 15min ago
         IO: 0B read, 0B written
      Tasks: 2
     Memory: 1.2M (peak: 4.5M)
        CPU: 334ms
     CGroup: /user.slice/user-42477.slice/session-32.scope
             ├─76605 "sshd-session: ceph-admin [priv]"
             └─76608 "sshd-session: ceph-admin@notty"

Jan 31 07:05:29 compute-0 systemd[1]: Started Session 32 of User ceph-admin.
Jan 31 07:05:29 compute-0 sudo[76609]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Jan 31 07:05:29 compute-0 sudo[76609]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Jan 31 07:05:29 compute-0 sudo[76609]: pam_unix(sudo:session): session closed for user root
Jan 31 07:05:29 compute-0 sudo[76634]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/mv /tmp/cephadm-f70fcd2a-dcb4-5f89-a4ba-79a09959083b/var/lib/ceph/f70fcd2a-dcb4-5f89-a4ba-79a09959083b/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d.new /var/lib/ceph/f70fcd2a-dcb4-5f89-a4ba-79a09959083b/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d
Jan 31 07:05:29 compute-0 sudo[76634]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Jan 31 07:05:29 compute-0 sudo[76634]: pam_unix(sudo:session): session closed for user root

● session-33.scope - Session 33 of User ceph-admin
     Loaded: loaded (/run/systemd/transient/session-33.scope; transient)
  Transient: yes
     Active: active (running) since Sat 2026-01-31 07:05:30 UTC; 2h 15min ago
         IO: 7.9M read, 663.5M written
      Tasks: 2
     Memory: 218.2M (peak: 448.2M)
        CPU: 9min 35.648s
     CGroup: /user.slice/user-42477.slice/session-33.scope
             ├─76659 "sshd-session: ceph-admin [priv]"
             └─76662 "sshd-session: ceph-admin@notty"

Jan 31 09:21:12 compute-0 sudo[442762]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Jan 31 09:21:12 compute-0 sudo[442732]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Jan 31 09:21:12 compute-0 sudo[442762]: pam_unix(sudo:session): session closed for user root
Jan 31 09:21:12 compute-0 sudo[442732]: pam_unix(sudo:session): session closed for user root
Jan 31 09:21:13 compute-0 sudo[442811]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/ls /etc/sysctl.d
Jan 31 09:21:13 compute-0 sudo[442811]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Jan 31 09:21:13 compute-0 sudo[442811]: pam_unix(sudo:session): session closed for user root
Jan 31 09:21:13 compute-0 sudo[442810]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Jan 31 09:21:13 compute-0 sudo[442810]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Jan 31 09:21:13 compute-0 sudo[442810]: pam_unix(sudo:session): session closed for user root

● session-67.scope - Session 67 of User zuul
     Loaded: loaded (/run/systemd/transient/session-67.scope; transient)
  Transient: yes
     Active: active (running) since Sat 2026-01-31 09:20:34 UTC; 39s ago
         IO: 3.5M read, 383.2M written
      Tasks: 31
     Memory: 661.7M (peak: 688.4M)
        CPU: 1min 49.621s
     CGroup: /user.slice/user-1000.slice/session-67.scope
             ├─435980 "sshd-session: zuul [priv]"
             ├─435983 "sshd-session: zuul@notty"
             ├─435984 sudo bash -c "rm -rf /var/tmp/sos-osp && mkdir /var/tmp/sos-osp && sos report --batch --all-logs --tmp-dir=/var/tmp/sos-osp  -p container,openstack_edpm,system,storage,virt"
             ├─436008 /usr/bin/python3 -s /sbin/sos report --batch --all-logs --tmp-dir=/var/tmp/sos-osp -p container,openstack_edpm,system,storage,virt
             ├─442195 timeout 15s turbostat --debug sleep 10
             ├─442899 timeout 300s ceph fs dump --format json-pretty
             ├─442900 /usr/bin/python3 -s /usr/bin/ceph fs dump --format json-pretty
             ├─442903 timeout 300s systemctl status --all
             ├─442904 systemctl status --all
             ├─442905 timeout 300s semanage interface -l
             └─442906 /usr/bin/python3 -EsI /usr/sbin/semanage interface -l

Jan 31 09:20:34 compute-0 systemd[1]: Started Session 67 of User zuul.
Jan 31 09:20:34 compute-0 sudo[435984]:     zuul : PWD=/home/zuul ; USER=root ; COMMAND=/bin/bash -c 'rm -rf /var/tmp/sos-osp && mkdir /var/tmp/sos-osp && sos report --batch --all-logs --tmp-dir=/var/tmp/sos-osp  -p container,openstack_edpm,system,storage,virt'
Jan 31 09:20:34 compute-0 sudo[435984]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Jan 31 09:20:39 compute-0 ovs-vsctl[436342]: ovs|00001|db_ctl_base|ERR|no key "dpdk-init" in Open_vSwitch record "." column other_config
Jan 31 09:20:49 compute-0 crontab[437918]: (root) LIST (root)
Jan 31 09:21:05 compute-0 ovs-appctl[441233]: ovs|00001|daemon_unix|WARN|/var/run/openvswitch/ovs-monitor-ipsec.pid: open: No such file or directory
Jan 31 09:21:05 compute-0 ovs-appctl[441238]: ovs|00001|daemon_unix|WARN|/var/run/openvswitch/ovs-monitor-ipsec.pid: open: No such file or directory
Jan 31 09:21:06 compute-0 ovs-appctl[441243]: ovs|00001|daemon_unix|WARN|/var/run/openvswitch/ovs-monitor-ipsec.pid: open: No such file or directory

○ 1e25cafb71c13c532a40aebd7c2342bf479b9dc985b78d8189396343297af238-518afe6e7402b380.service - /usr/bin/podman healthcheck run 1e25cafb71c13c532a40aebd7c2342bf479b9dc985b78d8189396343297af238
     Loaded: loaded (/run/systemd/transient/1e25cafb71c13c532a40aebd7c2342bf479b9dc985b78d8189396343297af238-518afe6e7402b380.service; transient)
  Transient: yes
     Active: inactive (dead) since Sat 2026-01-31 09:21:07 UTC; 5s ago
   Duration: 116ms
TriggeredBy: ● 1e25cafb71c13c532a40aebd7c2342bf479b9dc985b78d8189396343297af238-518afe6e7402b380.timer
    Process: 441974 ExecStart=/usr/bin/podman healthcheck run 1e25cafb71c13c532a40aebd7c2342bf479b9dc985b78d8189396343297af238 (code=exited, status=0/SUCCESS)
   Main PID: 441974 (code=exited, status=0/SUCCESS)
        CPU: 77ms

Jan 31 09:21:07 compute-0 podman[441974]: 2026-01-31 09:21:07.926880046 +0000 UTC m=+0.097249084 container health_status 1e25cafb71c13c532a40aebd7c2342bf479b9dc985b78d8189396343297af238 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, config_id=ovn_controller, io.buildah.version=1.41.3, tcib_managed=true, maintainer=OpenStack Kubernetes Operator team, org.label-schema.vendor=CentOS, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'EDPM_CONFIG_HASH': '93c1edad6c3ce19ccbf4cad1c823140b960799b036165432d2a9b50972fa7d6a-1ebd21696fff3d9ce9d1c627d87eb768e7a7895873c4ad726f2d4c0751d2120c-1ebd21696fff3d9ce9d1c627d87eb768e7a7895873c4ad726f2d4c0751d2120c-1ebd21696fff3d9ce9d1c627d87eb768e7a7895873c4ad726f2d4c0751d2120c'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, container_name=ovn_controller, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, tcib_build_tag=b85d0548925081ae8c6bdd697658cec4, managed_by=edpm_ansible, org.label-schema.build-date=20260127, org.label-schema.license=GPLv2)

○ 4f21208ce36b9cf814979a8ea23334150fc70d101a3b284bd29d2af80b4ea3b1-61765080bc59b28e.service - /usr/bin/podman healthcheck run 4f21208ce36b9cf814979a8ea23334150fc70d101a3b284bd29d2af80b4ea3b1
     Loaded: loaded (/run/systemd/transient/4f21208ce36b9cf814979a8ea23334150fc70d101a3b284bd29d2af80b4ea3b1-61765080bc59b28e.service; transient)
  Transient: yes
     Active: inactive (dead) since Sat 2026-01-31 09:20:54 UTC; 18s ago
   Duration: 87ms
TriggeredBy: ● 4f21208ce36b9cf814979a8ea23334150fc70d101a3b284bd29d2af80b4ea3b1-61765080bc59b28e.timer
    Process: 438802 ExecStart=/usr/bin/podman healthcheck run 4f21208ce36b9cf814979a8ea23334150fc70d101a3b284bd29d2af80b4ea3b1 (code=exited, status=0/SUCCESS)
   Main PID: 438802 (code=exited, status=0/SUCCESS)
        CPU: 93ms

Jan 31 09:20:54 compute-0 podman[438802]: 2026-01-31 09:20:54.899933068 +0000 UTC m=+0.062039666 container health_status 4f21208ce36b9cf814979a8ea23334150fc70d101a3b284bd29d2af80b4ea3b1 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=ovn_metadata_agent, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.schema-version=1.0, tcib_build_tag=b85d0548925081ae8c6bdd697658cec4, config_data={'cgroupns': 'host', 'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'EDPM_CONFIG_HASH': '93c1edad6c3ce19ccbf4cad1c823140b960799b036165432d2a9b50972fa7d6a-1ebd21696fff3d9ce9d1c627d87eb768e7a7895873c4ad726f2d4c0751d2120c-1ebd21696fff3d9ce9d1c627d87eb768e7a7895873c4ad726f2d4c0751d2120c-1ebd21696fff3d9ce9d1c627d87eb768e7a7895873c4ad726f2d4c0751d2120c-0823bd3e096c75f72e4a95820d41b0d4b6a1172bd2892ddb9f29b788a11bc87d'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_metadata_agent', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified', 'net': 'host', 'pid': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/run/openvswitch:/run/openvswitch:z', '/var/lib/openstack/neutron-ovn-metadata-agent:/etc/neutron.conf.d:z', '/run/netns:/run/netns:shared', '/var/lib/kolla/config_files/ovn_metadata_agent.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/neutron:/var/lib/neutron:shared,z', '/var/lib/neutron/ovn_metadata_haproxy_wrapper:/usr/local/bin/haproxy:ro', '/var/lib/neutron/Unit apparmor.service could not be found.
Unit apt-daily.service could not be found.
Unit auto-cpufreq.service could not be found.
Unit autofs.service could not be found.
kill_scripts:/etc/neutron/kill_scripts:ro', '/var/lib/openstack/cacerts/neutron-metadata/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_metadata_agent:/openstack:ro,z']}, config_id=ovn_metadata_agent, container_name=ovn_metadata_agent, io.buildah.version=1.41.3, managed_by=edpm_ansible, org.label-schema.build-date=20260127, maintainer=OpenStack Kubernetes Operator team, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, tcib_managed=true, org.label-schema.name=CentOS Stream 9 Base Image)

● auditd.service - Security Auditing Service
     Loaded: loaded (/usr/lib/systemd/system/auditd.service; enabled; preset: enabled)
     Active: active (running) since Sat 2026-01-31 06:09:24 UTC; 3h 11min ago
       Docs: man:auditd(8)
             https://github.com/linux-audit/audit-documentation
   Main PID: 699 (auditd)
         IO: 0B read, 43.9M written
      Tasks: 4 (limit: 48560)
     Memory: 25.9M (peak: 26.4M)
        CPU: 9.849s
     CGroup: /system.slice/auditd.service
             ├─699 /sbin/auditd
             └─701 /usr/sbin/sedispatch

Jan 31 06:09:24 localhost augenrules[719]: pid 699
Jan 31 06:09:24 localhost augenrules[719]: rate_limit 0
Jan 31 06:09:24 localhost augenrules[719]: backlog_limit 8192
Jan 31 06:09:24 localhost augenrules[719]: lost 0
Jan 31 06:09:24 localhost augenrules[719]: backlog 0
Jan 31 06:09:24 localhost augenrules[719]: backlog_wait_time 60000
Jan 31 06:09:24 localhost augenrules[719]: backlog_wait_time_actual 0
Jan 31 06:09:24 localhost systemd[1]: Started Security Auditing Service.
Jan 31 07:19:34 compute-0 auditd[699]: Audit daemon rotating log files
Jan 31 08:18:27 compute-0 auditd[699]: Audit daemon rotating log files

○ auth-rpcgss-module.service - Kernel Module supporting RPCSEC_GSS
     Loaded: loaded (/usr/lib/systemd/system/auth-rpcgss-module.service; static)
     Active: inactive (dead)
  Condition: start condition failed at Sat 2026-01-31 06:09:23 UTC; 3h 11min ago

○ blk-availability.service - Availability of block devices
     Loaded: loaded (/usr/lib/systemd/system/blk-availability.service; disabled; preset: disabled)
     Active: inactive (dead)

● ceph-f70fcd2a-dcb4-5f89-a4ba-79a09959083b@crash.compute-0.service - Ceph crash.compute-0 for f70fcd2a-dcb4-5f89-a4ba-79a09959083b
     Loaded: loaded (/etc/systemd/system/ceph-f70fcd2a-dcb4-5f89-a4ba-79a09959083b@.service; enabled; preset: disabled)
     Active: active (running) since Sat 2026-01-31 07:05:42 UTC; 2h 15min ago
   Main PID: 81645 (conmon)
         IO: 0B read, 1.1M written
      Tasks: 3 (limit: 48560)
     Memory: 12.2M (peak: 33.7M)
        CPU: 803ms
     CGroup: /system.slice/system-ceph\x2df70fcd2a\x2ddcb4\x2d5f89\x2da4ba\x2d79a09959083b.slice/ceph-f70fcd2a-dcb4-5f89-a4ba-79a09959083b@crash.compute-0.service
             ├─libpod-payload-1da6676927c3fbc16088c8bd8068265d57380b59a05a52c32a3be4739c9d9976
             │ ├─81648 /run/podman-init -- /usr/bin/ceph-crash -n client.crash.compute-0
             │ └─81650 /usr/bin/python3 -s /usr/bin/ceph-crash -n client.crash.compute-0
             └─runtime
               └─81645 /usr/bin/conmon --api-version 1 -c 1da6676927c3fbc16088c8bd8068265d57380b59a05a52c32a3be4739c9d9976 -u 1da6676927c3fbc16088c8bd8068265d57380b59a05a52c32a3be4739c9d9976 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/1da6676927c3fbc16088c8bd8068265d57380b59a05a52c32a3be4739c9d9976/userdata -p /run/containers/storage/overlay-containers/1da6676927c3fbc16088c8bd8068265d57380b59a05a52c32a3be4739c9d9976/userdata/pidfile -n ceph-f70fcd2a-dcb4-5f89-a4ba-79a09959083b-crash-compute-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/1da6676927c3fbc16088c8bd8068265d57380b59a05a52c32a3be4739c9d9976 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/1da6676927c3fbc16088c8bd8068265d57380b59a05a52c32a3be4739c9d9976/userdata/oci-log --conmon-pidfile /run/ceph-f70fcd2a-dcb4-5f89-a4ba-79a09959083b@crash.compute-0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 1da6676927c3fbc16088c8bd8068265d57380b59a05a52c32a3be4739c9d9976

Jan 31 07:45:43 compute-0 ceph-f70fcd2a-dcb4-5f89-a4ba-79a09959083b-crash-compute-0[81645]: ERROR:ceph-crash:Error scraping /var/lib/ceph/crash: [Errno 13] Permission denied: '/var/lib/ceph/crash'
Jan 31 07:55:43 compute-0 ceph-f70fcd2a-dcb4-5f89-a4ba-79a09959083b-crash-compute-0[81645]: ERROR:ceph-crash:Error scraping /var/lib/ceph/crash: [Errno 13] Permission denied: '/var/lib/ceph/crash'
Jan 31 08:05:43 compute-0 ceph-f70fcd2a-dcb4-5f89-a4ba-79a09959083b-crash-compute-0[81645]: ERROR:ceph-crash:Error scraping /var/lib/ceph/crash: [Errno 13] Permission denied: '/var/lib/ceph/crash'
Jan 31 08:15:43 compute-0 ceph-f70fcd2a-dcb4-5f89-a4ba-79a09959083b-crash-compute-0[81645]: ERROR:ceph-crash:Error scraping /var/lib/ceph/crash: [Errno 13] Permission denied: '/var/lib/ceph/crash'
Jan 31 08:25:44 compute-0 ceph-f70fcd2a-dcb4-5f89-a4ba-79a09959083b-crash-compute-0[81645]: ERROR:ceph-crash:Error scraping /var/lib/ceph/crash: [Errno 13] Permission denied: '/var/lib/ceph/crash'
Jan 31 08:35:44 compute-0 ceph-f70fcd2a-dcb4-5f89-a4ba-79a09959083b-crash-compute-0[81645]: ERROR:ceph-crash:Error scraping /var/lib/ceph/crash: [Errno 13] Permission denied: '/var/lib/ceph/crash'
Jan 31 08:45:44 compute-0 ceph-f70fcd2a-dcb4-5f89-a4ba-79a09959083b-crash-compute-0[81645]: ERROR:ceph-crash:Error scraping /var/lib/ceph/crash: [Errno 13] Permission denied: '/var/lib/ceph/crash'
Jan 31 08:55:44 compute-0 ceph-f70fcd2a-dcb4-5f89-a4ba-79a09959083b-crash-compute-0[81645]: ERROR:ceph-crash:Error scraping /var/lib/ceph/crash: [Errno 13] Permission denied: '/var/lib/ceph/crash'
Jan 31 09:05:44 compute-0 ceph-f70fcd2a-dcb4-5f89-a4ba-79a09959083b-crash-compute-0[81645]: ERROR:ceph-crash:Error scraping /var/lib/ceph/crash: [Errno 13] Permission denied: '/var/lib/ceph/crash'
Jan 31 09:15:44 compute-0 ceph-f70fcd2a-dcb4-5f89-a4ba-79a09959083b-crash-compute-0[81645]: ERROR:ceph-crash:Error scraping /var/lib/ceph/crash: [Errno 13] Permission denied: '/var/lib/ceph/crash'

● ceph-f70fcd2a-dcb4-5f89-a4ba-79a09959083b@haproxy.rgw.default.compute-0.cwtxbj.service - Ceph haproxy.rgw.default.compute-0.cwtxbj for f70fcd2a-dcb4-5f89-a4ba-79a09959083b
     Loaded: loaded (/etc/systemd/system/ceph-f70fcd2a-dcb4-5f89-a4ba-79a09959083b@.service; enabled; preset: disabled)
     Active: active (running) since Sat 2026-01-31 07:08:21 UTC; 2h 12min ago
   Main PID: 95975 (conmon)
         IO: 4.1M read, 162.5K written
      Tasks: 11 (limit: 48560)
     Memory: 10.1M (peak: 22.7M)
        CPU: 12.911s
     CGroup: /system.slice/system-ceph\x2df70fcd2a\x2ddcb4\x2d5f89\x2da4ba\x2d79a09959083b.slice/ceph-f70fcd2a-dcb4-5f89-a4ba-79a09959083b@haproxy.rgw.default.compute-0.cwtxbj.service
             ├─libpod-payload-b160f7ad791bcb51d7340140782e9b69931fca86a37ca28ddd2321bd134bedd3
             │ ├─95977 /run/podman-init -- docker-entrypoint.sh haproxy -f /var/lib/haproxy/haproxy.cfg
             │ ├─95985 haproxy -W -db -f /var/lib/haproxy/haproxy.cfg
             │ └─95987 haproxy -W -db -f /var/lib/haproxy/haproxy.cfg
             └─runtime
               └─95975 /usr/bin/conmon --api-version 1 -c b160f7ad791bcb51d7340140782e9b69931fca86a37ca28ddd2321bd134bedd3 -u b160f7ad791bcb51d7340140782e9b69931fca86a37ca28ddd2321bd134bedd3 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/b160f7ad791bcb51d7340140782e9b69931fca86a37ca28ddd2321bd134bedd3/userdata -p /run/containers/storage/overlay-containers/b160f7ad791bcb51d7340140782e9b69931fca86a37ca28ddd2321bd134bedd3/userdata/pidfile -n ceph-f70fcd2a-dcb4-5f89-a4ba-79a09959083b-haproxy-rgw-default-compute-0-cwtxbj --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/b160f7ad791bcb51d7340140782e9b69931fca86a37ca28ddd2321bd134bedd3 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/b160f7ad791bcb51d7340140782e9b69931fca86a37ca28ddd2321bd134bedd3/userdata/oci-log --conmon-pidfile /run/ceph-f70fcd2a-dcb4-5f89-a4ba-79a09959083b@haproxy.rgw.default.compute-0.cwtxbj.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg b160f7ad791bcb51d7340140782e9b69931fca86a37ca28ddd2321bd134bedd3

Jan 31 07:08:20 compute-0 systemd[1]: Starting Ceph haproxy.rgw.default.compute-0.cwtxbj for f70fcd2a-dcb4-5f89-a4ba-79a09959083b...
Jan 31 07:08:20 compute-0 podman[95948]: 2026-01-31 07:08:20.710378454 +0000 UTC m=+0.121575978 container create b160f7ad791bcb51d7340140782e9b69931fca86a37ca28ddd2321bd134bedd3 (image=quay.io/ceph/haproxy:2.3, name=ceph-f70fcd2a-dcb4-5f89-a4ba-79a09959083b-haproxy-rgw-default-compute-0-cwtxbj)
Jan 31 07:08:20 compute-0 podman[95948]: 2026-01-31 07:08:20.624452641 +0000 UTC m=+0.035650195 image pull e85424b0d443f37ddd2dd8a3bb2ef6f18dd352b987723a921b64289023af2914 quay.io/ceph/haproxy:2.3
Jan 31 07:08:20 compute-0 podman[95948]: 2026-01-31 07:08:20.837747368 +0000 UTC m=+0.248944922 container init b160f7ad791bcb51d7340140782e9b69931fca86a37ca28ddd2321bd134bedd3 (image=quay.io/ceph/haproxy:2.3, name=ceph-f70fcd2a-dcb4-5f89-a4ba-79a09959083b-haproxy-rgw-default-compute-0-cwtxbj)
Jan 31 07:08:20 compute-0 podman[95948]: 2026-01-31 07:08:20.846194904 +0000 UTC m=+0.257392478 container start b160f7ad791bcb51d7340140782e9b69931fca86a37ca28ddd2321bd134bedd3 (image=quay.io/ceph/haproxy:2.3, name=ceph-f70fcd2a-dcb4-5f89-a4ba-79a09959083b-haproxy-rgw-default-compute-0-cwtxbj)
Jan 31 07:08:20 compute-0 ceph-f70fcd2a-dcb4-5f89-a4ba-79a09959083b-haproxy-rgw-default-compute-0-cwtxbj[95975]: [NOTICE] 030/070820 (2) : New worker #1 (4) forked
Jan 31 07:08:21 compute-0 bash[95948]: b160f7ad791bcb51d7340140782e9b69931fca86a37ca28ddd2321bd134bedd3
Jan 31 07:08:21 compute-0 systemd[1]: Started Ceph haproxy.rgw.default.compute-0.cwtxbj for f70fcd2a-dcb4-5f89-a4ba-79a09959083b.

● ceph-f70fcd2a-dcb4-5f89-a4ba-79a09959083b@keepalived.rgw.default.compute-0.rwjfwq.service - Ceph keepalived.rgw.default.compute-0.rwjfwq for f70fcd2a-dcb4-5f89-a4ba-79a09959083b
     Loaded: loaded (/etc/systemd/system/ceph-f70fcd2a-dcb4-5f89-a4ba-79a09959083b@.service; enabled; preset: disabled)
     Active: active (running) since Sat 2026-01-31 07:08:37 UTC; 2h 12min ago
   Main PID: 96432 (conmon)
         IO: 0B read, 188.5K written
      Tasks: 4 (limit: 48560)
     Memory: 2.9M (peak: 20.2M)
        CPU: 33.334s
     CGroup: /system.slice/system-ceph\x2df70fcd2a\x2ddcb4\x2d5f89\x2da4ba\x2d79a09959083b.slice/ceph-f70fcd2a-dcb4-5f89-a4ba-79a09959083b@keepalived.rgw.default.compute-0.rwjfwq.service
             ├─libpod-payload-37a60ce08c4c74b1713cee8109229a211cbb75cdd957895056cf506b036b5f51
             │ ├─96434 /run/podman-init -- ./init.sh
             │ ├─96436 /usr/sbin/keepalived -n -l -f /etc/keepalived/keepalived.conf
             │ └─96438 /usr/sbin/keepalived -n -l -f /etc/keepalived/keepalived.conf
             └─runtime
               └─96432 /usr/bin/conmon --api-version 1 -c 37a60ce08c4c74b1713cee8109229a211cbb75cdd957895056cf506b036b5f51 -u 37a60ce08c4c74b1713cee8109229a211cbb75cdd957895056cf506b036b5f51 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/37a60ce08c4c74b1713cee8109229a211cbb75cdd957895056cf506b036b5f51/userdata -p /run/containers/storage/overlay-containers/37a60ce08c4c74b1713cee8109229a211cbb75cdd957895056cf506b036b5f51/userdata/pidfile -n ceph-f70fcd2a-dcb4-5f89-a4ba-79a09959083b-keepalived-rgw-default-compute-0-rwjfwq --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/37a60ce08c4c74b1713cee8109229a211cbb75cdd957895056cf506b036b5f51 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/37a60ce08c4c74b1713cee8109229a211cbb75cdd957895056cf506b036b5f51/userdata/oci-log --conmon-pidfile /run/ceph-f70fcd2a-dcb4-5f89-a4ba-79a09959083b@keepalived.rgw.default.compute-0.rwjfwq.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 37a60ce08c4c74b1713cee8109229a211cbb75cdd957895056cf506b036b5f51

Jan 31 07:08:37 compute-0 ceph-f70fcd2a-dcb4-5f89-a4ba-79a09959083b-keepalived-rgw-default-compute-0-rwjfwq[96432]: Sat Jan 31 07:08:37 2026: Running on Linux 5.14.0-665.el9.x86_64 #1 SMP PREEMPT_DYNAMIC Thu Jan 22 12:30:22 UTC 2026 (built for Linux 5.14.0)
Jan 31 07:08:37 compute-0 ceph-f70fcd2a-dcb4-5f89-a4ba-79a09959083b-keepalived-rgw-default-compute-0-rwjfwq[96432]: Sat Jan 31 07:08:37 2026: Command line: '/usr/sbin/keepalived' '-n' '-l' '-f' '/etc/keepalived/keepalived.conf'
Jan 31 07:08:37 compute-0 ceph-f70fcd2a-dcb4-5f89-a4ba-79a09959083b-keepalived-rgw-default-compute-0-rwjfwq[96432]: Sat Jan 31 07:08:37 2026: Configuration file /etc/keepalived/keepalived.conf
Jan 31 07:08:37 compute-0 ceph-f70fcd2a-dcb4-5f89-a4ba-79a09959083b-keepalived-rgw-default-compute-0-rwjfwq[96432]: Sat Jan 31 07:08:37 2026: NOTICE: setting config option max_auto_priority should result in better keepalived performance
Jan 31 07:08:37 compute-0 ceph-f70fcd2a-dcb4-5f89-a4ba-79a09959083b-keepalived-rgw-default-compute-0-rwjfwq[96432]: Sat Jan 31 07:08:37 2026: Starting VRRP child process, pid=4
Jan 31 07:08:37 compute-0 ceph-f70fcd2a-dcb4-5f89-a4ba-79a09959083b-keepalived-rgw-default-compute-0-rwjfwq[96432]: Sat Jan 31 07:08:37 2026: Startup complete
Jan 31 07:08:37 compute-0 ceph-f70fcd2a-dcb4-5f89-a4ba-79a09959083b-keepalived-rgw-default-compute-0-rwjfwq[96432]: Sat Jan 31 07:08:37 2026: (VI_0) Entering BACKUP STATE (init)
Jan 31 07:08:37 compute-0 ceph-f70fcd2a-dcb4-5f89-a4ba-79a09959083b-keepalived-rgw-default-compute-0-rwjfwq[96432]: Sat Jan 31 07:08:37 2026: VRRP_Script(check_backend) succeeded
Jan 31 07:08:40 compute-0 ceph-f70fcd2a-dcb4-5f89-a4ba-79a09959083b-keepalived-rgw-default-compute-0-rwjfwq[96432]: Sat Jan 31 07:08:40 2026: (VI_0) Entering MASTER STATE
Jan 31 09:18:39 compute-0 ceph-f70fcd2a-dcb4-5f89-a4ba-79a09959083b-keepalived-rgw-default-compute-0-rwjfwq[96432]: Sat Jan 31 09:18:39 2026: A thread timer expired 1.250529 seconds ago

● ceph-f70fcd2a-dcb4-5f89-a4ba-79a09959083b@mds.cephfs.compute-0.voybui.service - Ceph mds.cephfs.compute-0.voybui for f70fcd2a-dcb4-5f89-a4ba-79a09959083b
     Loaded: loaded (/etc/systemd/system/ceph-f70fcd2a-dcb4-5f89-a4ba-79a09959083b@.service; enabled; preset: disabled)
     Active: active (running) since Sat 2026-01-31 07:08:13 UTC; 2h 12min ago
   Main PID: 94760 (conmon)
         IO: 0B read, 212.0K written
      Tasks: 18 (limit: 48560)
     Memory: 26.8M (peak: 28.7M)
        CPU: 4.171s
     CGroup: /system.slice/system-ceph\x2df70fcd2a\x2ddcb4\x2d5f89\x2da4ba\x2d79a09959083b.slice/ceph-f70fcd2a-dcb4-5f89-a4ba-79a09959083b@mds.cephfs.compute-0.voybui.service
             ├─libpod-payload-b8961a2bb89ef92ece9fe756e2217169f987d51ab5ed9d70855c2e200dbe66dd
             │ ├─94764 /run/podman-init -- /usr/bin/ceph-mds -n mds.cephfs.compute-0.voybui -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ └─94769 /usr/bin/ceph-mds -n mds.cephfs.compute-0.voybui -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             └─runtime
               └─94760 /usr/bin/conmon --api-version 1 -c b8961a2bb89ef92ece9fe756e2217169f987d51ab5ed9d70855c2e200dbe66dd -u b8961a2bb89ef92ece9fe756e2217169f987d51ab5ed9d70855c2e200dbe66dd -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/b8961a2bb89ef92ece9fe756e2217169f987d51ab5ed9d70855c2e200dbe66dd/userdata -p /run/containers/storage/overlay-containers/b8961a2bb89ef92ece9fe756e2217169f987d51ab5ed9d70855c2e200dbe66dd/userdata/pidfile -n ceph-f70fcd2a-dcb4-5f89-a4ba-79a09959083b-mds-cephfs-compute-0-voybui --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/b8961a2bb89ef92ece9fe756e2217169f987d51ab5ed9d70855c2e200dbe66dd --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/b8961a2bb89ef92ece9fe756e2217169f987d51ab5ed9d70855c2e200dbe66dd/userdata/oci-log --conmon-pidfile /run/ceph-f70fcd2a-dcb4-5f89-a4ba-79a09959083b@mds.cephfs.compute-0.voybui.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg b8961a2bb89ef92ece9fe756e2217169f987d51ab5ed9d70855c2e200dbe66dd

Jan 31 09:20:42 compute-0 ceph-mds[94769]: mds.cephfs.compute-0.voybui Can't run that command on an inactive MDS!
Jan 31 09:20:42 compute-0 ceph-mds[94769]: mds.cephfs.compute-0.voybui asok_command: dump_ops_in_flight {prefix=dump_ops_in_flight} (starting...)
Jan 31 09:20:42 compute-0 ceph-mds[94769]: mds.cephfs.compute-0.voybui Can't run that command on an inactive MDS!
Jan 31 09:20:43 compute-0 ceph-mds[94769]: mds.cephfs.compute-0.voybui asok_command: get subtrees {prefix=get subtrees} (starting...)
Jan 31 09:20:43 compute-0 ceph-mds[94769]: mds.cephfs.compute-0.voybui Can't run that command on an inactive MDS!
Jan 31 09:20:43 compute-0 ceph-mds[94769]: mds.cephfs.compute-0.voybui asok_command: ops {prefix=ops} (starting...)
Jan 31 09:20:43 compute-0 ceph-mds[94769]: mds.cephfs.compute-0.voybui Can't run that command on an inactive MDS!
Jan 31 09:20:43 compute-0 ceph-mds[94769]: mds.cephfs.compute-0.voybui asok_command: session ls {prefix=session ls} (starting...)
Jan 31 09:20:43 compute-0 ceph-mds[94769]: mds.cephfs.compute-0.voybui Can't run that command on an inactive MDS!
Jan 31 09:20:44 compute-0 ceph-mds[94769]: mds.cephfs.compute-0.voybui asok_command: status {prefix=status} (starting...)

● ceph-f70fcd2a-dcb4-5f89-a4ba-79a09959083b@mgr.compute-0.hhuoua.service - Ceph mgr.compute-0.hhuoua for f70fcd2a-dcb4-5f89-a4ba-79a09959083b
     Loaded: loaded (/etc/systemd/system/ceph-f70fcd2a-dcb4-5f89-a4ba-79a09959083b@.service; enabled; preset: disabled)
     Active: active (running) since Sat 2026-01-31 07:04:36 UTC; 2h 16min ago
   Main PID: 74787 (conmon)
         IO: 964.0K read, 7.4M written
      Tasks: 149 (limit: 48560)
     Memory: 561.5M (peak: 562.6M)
        CPU: 3min 45.908s
     CGroup: /system.slice/system-ceph\x2df70fcd2a\x2ddcb4\x2d5f89\x2da4ba\x2d79a09959083b.slice/ceph-f70fcd2a-dcb4-5f89-a4ba-79a09959083b@mgr.compute-0.hhuoua.service
             ├─libpod-payload-80eff2094e986ce79ca2f5db33c4e20adbe7c8c35152ee84bfca5e742fce5e26
             │ ├─74789 /run/podman-init -- /usr/bin/ceph-mgr -n mgr.compute-0.hhuoua -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ └─74791 /usr/bin/ceph-mgr -n mgr.compute-0.hhuoua -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             └─runtime
               └─74787 /usr/bin/conmon --api-version 1 -c 80eff2094e986ce79ca2f5db33c4e20adbe7c8c35152ee84bfca5e742fce5e26 -u 80eff2094e986ce79ca2f5db33c4e20adbe7c8c35152ee84bfca5e742fce5e26 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/80eff2094e986ce79ca2f5db33c4e20adbe7c8c35152ee84bfca5e742fce5e26/userdata -p /run/containers/storage/overlay-containers/80eff2094e986ce79ca2f5db33c4e20adbe7c8c35152ee84bfca5e742fce5e26/userdata/pidfile -n ceph-f70fcd2a-dcb4-5f89-a4ba-79a09959083b-mgr-compute-0-hhuoua --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/80eff2094e986ce79ca2f5db33c4e20adbe7c8c35152ee84bfca5e742fce5e26 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/80eff2094e986ce79ca2f5db33c4e20adbe7c8c35152ee84bfca5e742fce5e26/userdata/oci-log --conmon-pidfile /run/ceph-f70fcd2a-dcb4-5f89-a4ba-79a09959083b@mgr.compute-0.hhuoua.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 80eff2094e986ce79ca2f5db33c4e20adbe7c8c35152ee84bfca5e742fce5e26

Jan 31 09:21:10 compute-0 ceph-mgr[74791]: log_channel(cluster) log [DBG] : pgmap v4498: 305 pgs: 305 active+clean; 120 MiB data, 1.6 GiB used, 19 GiB / 21 GiB avail
Jan 31 09:21:10 compute-0 ceph-mgr[74791]: log_channel(audit) log [DBG] : from='client.39849 -' entity='client.admin' cmd=[{"prefix": "pg stat", "target": ["mon-mgr", ""]}]: dispatch
Jan 31 09:21:12 compute-0 ceph-mgr[74791]: log_channel(audit) log [DBG] : from='client.48745 -' entity='client.admin' cmd=[{"prefix": "fs status", "target": ["mon-mgr", ""], "format": "json-pretty"}]: dispatch
Jan 31 09:21:12 compute-0 ceph-mgr[74791]: log_channel(audit) log [DBG] : from='client.54032 -' entity='client.admin' cmd=[{"prefix": "device ls", "target": ["mon-mgr", ""], "format": "json-pretty"}]: dispatch
Jan 31 09:21:12 compute-0 ceph-mgr[74791]: log_channel(cluster) log [DBG] : pgmap v4499: 305 pgs: 305 active+clean; 120 MiB data, 1.6 GiB used, 19 GiB / 21 GiB avail
Jan 31 09:21:12 compute-0 ceph-mgr[74791]: [progress WARNING root] complete: ev fc721ea0-5b11-41f2-8bfb-039d538c426d does not exist
Jan 31 09:21:12 compute-0 ceph-mgr[74791]: [progress WARNING root] complete: ev 6f380943-a998-42e6-aa3d-d2cb5732e035 does not exist
Jan 31 09:21:12 compute-0 ceph-mgr[74791]: [progress WARNING root] complete: ev 197ea0a5-ea1e-4048-8bab-e5e389bed086 does not exist
Jan 31 09:21:12 compute-0 ceph-mgr[74791]: log_channel(audit) log [DBG] : from='client.39885 -' entity='client.admin' cmd=[{"prefix": "device ls", "target": ["mon-mgr", ""], "format": "json-pretty"}]: dispatch
Jan 31 09:21:13 compute-0 ceph-mgr[74791]: log_channel(audit) log [DBG] : from='client.48769 -' entity='client.admin' cmd=[{"prefix": "osd blocked-by", "target": ["mon-mgr", ""], "format": "json-pretty"}]: dispatch

● ceph-f70fcd2a-dcb4-5f89-a4ba-79a09959083b@mon.compute-0.service - Ceph mon.compute-0 for f70fcd2a-dcb4-5f89-a4ba-79a09959083b
     Loaded: loaded (/etc/systemd/system/ceph-f70fcd2a-dcb4-5f89-a4ba-79a09959083b@.service; enabled; preset: disabled)
     Active: active (running) since Sat 2026-01-31 07:04:34 UTC; 2h 16min ago
   Main PID: 74492 (conmon)
         IO: 1.8M read, 1.2G written
      Tasks: 27 (limit: 48560)
     Memory: 184.4M (peak: 198.9M)
        CPU: 2min 49.675s
     CGroup: /system.slice/system-ceph\x2df70fcd2a\x2ddcb4\x2d5f89\x2da4ba\x2d79a09959083b.slice/ceph-f70fcd2a-dcb4-5f89-a4ba-79a09959083b@mon.compute-0.service
             ├─libpod-payload-c6500841c07b36a8c971bbdcb750ce9cda5744cce00d5b6822bb699bead089c7
             │ ├─74494 /run/podman-init -- /usr/bin/ceph-mon -n mon.compute-0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false
             │ └─74496 /usr/bin/ceph-mon -n mon.compute-0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false
             └─runtime
               └─74492 /usr/bin/conmon --api-version 1 -c c6500841c07b36a8c971bbdcb750ce9cda5744cce00d5b6822bb699bead089c7 -u c6500841c07b36a8c971bbdcb750ce9cda5744cce00d5b6822bb699bead089c7 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/c6500841c07b36a8c971bbdcb750ce9cda5744cce00d5b6822bb699bead089c7/userdata -p /run/containers/storage/overlay-containers/c6500841c07b36a8c971bbdcb750ce9cda5744cce00d5b6822bb699bead089c7/userdata/pidfile -n ceph-f70fcd2a-dcb4-5f89-a4ba-79a09959083b-mon-compute-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/c6500841c07b36a8c971bbdcb750ce9cda5744cce00d5b6822bb699bead089c7 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/c6500841c07b36a8c971bbdcb750ce9cda5744cce00d5b6822bb699bead089c7/userdata/oci-log --conmon-pidfile /run/ceph-f70fcd2a-dcb4-5f89-a4ba-79a09959083b@mon.compute-0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg c6500841c07b36a8c971bbdcb750ce9cda5744cce00d5b6822bb699bead089c7

Jan 31 09:21:13 compute-0 ceph-mon[74496]: pgmap v4499: 305 pgs: 305 active+clean; 120 MiB data, 1.6 GiB used, 19 GiB / 21 GiB avail
Jan 31 09:21:13 compute-0 ceph-mon[74496]: from='mgr.14132 192.168.122.100:0/828660362' entity='mgr.compute-0.hhuoua' 
Jan 31 09:21:13 compute-0 ceph-mon[74496]: from='client.? 192.168.122.102:0/2850281673' entity='client.admin' cmd=[{"prefix": "mds stat", "format": "json-pretty"}]: dispatch
Jan 31 09:21:13 compute-0 ceph-mon[74496]: from='client.39885 -' entity='client.admin' cmd=[{"prefix": "device ls", "target": ["mon-mgr", ""], "format": "json-pretty"}]: dispatch
Jan 31 09:21:13 compute-0 ceph-mon[74496]: from='client.? 192.168.122.101:0/1191317902' entity='client.admin' cmd=[{"prefix": "df", "detail": "detail", "format": "json-pretty"}]: dispatch
Jan 31 09:21:13 compute-0 ceph-mon[74496]: from='client.? 192.168.122.102:0/258564218' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json-pretty"}]: dispatch
Jan 31 09:21:13 compute-0 ceph-mon[74496]: from='client.? 192.168.122.100:0/893647686' entity='client.admin' cmd=[{"prefix": "df", "detail": "detail", "format": "json-pretty"}]: dispatch
Jan 31 09:21:13 compute-0 ceph-mon[74496]: from='client.? 192.168.122.101:0/3564389048' entity='client.admin' cmd=[{"prefix": "df", "format": "json-pretty"}]: dispatch
Jan 31 09:21:13 compute-0 ceph-mon[74496]: from='client.? 192.168.122.100:0/1772620264' entity='client.admin' cmd=[{"prefix": "df", "format": "json-pretty"}]: dispatch
Jan 31 09:21:13 compute-0 ceph-mon[74496]: from='client.? 192.168.122.101:0/2538997185' entity='client.admin' cmd=[{"prefix": "fs dump", "format": "json-pretty"}]: dispatch

● ceph-f70fcd2a-dcb4-5f89-a4ba-79a09959083b@osd.0.service - Ceph osd.0 for f70fcd2a-dcb4-5f89-a4ba-79a09959083b
     Loaded: loaded (/etc/systemd/system/ceph-f70fcd2a-dcb4-5f89-a4ba-79a09959083b@.service; enabled; preset: disabled)
     Active: active (running) since Sat 2026-01-31 07:06:28 UTC; 2h 14min ago
   Main PID: 84812 (conmon)
         IO: 3.4G read, 11.0G written
      Tasks: 60 (limit: 48560)
     Memory: 1.4G (peak: 1.6G)
        CPU: 2min 25.885s
     CGroup: /system.slice/system-ceph\x2df70fcd2a\x2ddcb4\x2d5f89\x2da4ba\x2d79a09959083b.slice/ceph-f70fcd2a-dcb4-5f89-a4ba-79a09959083b@osd.0.service
             ├─libpod-payload-7fd99238d214935b3dacb5b0284648e15db53a9954031165abce49a65cde0630
             │ ├─84814 /run/podman-init -- /usr/bin/ceph-osd -n osd.0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ └─84816 /usr/bin/ceph-osd -n osd.0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             └─runtime
               └─84812 /usr/bin/conmon --api-version 1 -c 7fd99238d214935b3dacb5b0284648e15db53a9954031165abce49a65cde0630 -u 7fd99238d214935b3dacb5b0284648e15db53a9954031165abce49a65cde0630 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/7fd99238d214935b3dacb5b0284648e15db53a9954031165abce49a65cde0630/userdata -p /run/containers/storage/overlay-containers/7fd99238d214935b3dacb5b0284648e15db53a9954031165abce49a65cde0630/userdata/pidfile -n ceph-f70fcd2a-dcb4-5f89-a4ba-79a09959083b-osd-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/7fd99238d214935b3dacb5b0284648e15db53a9954031165abce49a65cde0630 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/7fd99238d214935b3dacb5b0284648e15db53a9954031165abce49a65cde0630/userdata/oci-log --conmon-pidfile /run/ceph-f70fcd2a-dcb4-5f89-a4ba-79a09959083b@osd.0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 7fd99238d214935b3dacb5b0284648e15db53a9954031165abce49a65cde0630

Jan 31 09:20:47 compute-0 ceph-osd[84816]: prioritycache tune_memory target: 4294967296 mapped: 425033728 unmapped: 50585600 heap: 475619328 old mem: 2845415832 new mem: 2845415832
Jan 31 09:20:47 compute-0 ceph-osd[84816]: osd.0 432 heartbeat osd_stat(store_statfs(0x1a2b36000/0x0/0x1bfc00000, data 0x2452f1f/0x2688000, compress 0x0/0x0/0x0, omap 0x639, meta 0x1aa3f9c7), peers [1,2] op hist [])
Jan 31 09:20:47 compute-0 ceph-osd[84816]: monclient: tick
Jan 31 09:20:47 compute-0 ceph-osd[84816]: monclient: _check_auth_tickets
Jan 31 09:20:47 compute-0 ceph-osd[84816]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2026-01-31T09:20:15.831479+0000)
Jan 31 09:20:47 compute-0 ceph-osd[84816]: prioritycache tune_memory target: 4294967296 mapped: 425025536 unmapped: 50593792 heap: 475619328 old mem: 2845415832 new mem: 2845415832
Jan 31 09:20:47 compute-0 ceph-osd[84816]: monclient: tick
Jan 31 09:20:47 compute-0 ceph-osd[84816]: monclient: _check_auth_tickets
Jan 31 09:20:47 compute-0 ceph-osd[84816]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2026-01-31T09:20:16.831619+0000)
Jan 31 09:20:47 compute-0 ceph-osd[84816]: do_command 'log dump' '{prefix=log dump}'

● ceph-f70fcd2a-dcb4-5f89-a4ba-79a09959083b@rgw.rgw.compute-0.njduba.service - Ceph rgw.rgw.compute-0.njduba for f70fcd2a-dcb4-5f89-a4ba-79a09959083b
     Loaded: loaded (/etc/systemd/system/ceph-f70fcd2a-dcb4-5f89-a4ba-79a09959083b@.service; enabled; preset: disabled)
     Active: active (running) since Sat 2026-01-31 07:08:10 UTC; 2h 13min ago
   Main PID: 94235 (conmon)
         IO: 2.1M read, 8.6M written
      Tasks: 605 (limit: 48560)
     Memory: 125.2M (peak: 126.2M)
        CPU: 1min 7.057s
     CGroup: /system.slice/system-ceph\x2df70fcd2a\x2ddcb4\x2d5f89\x2da4ba\x2d79a09959083b.slice/ceph-f70fcd2a-dcb4-5f89-a4ba-79a09959083b@rgw.rgw.compute-0.njduba.service
             ├─libpod-payload-7b0621a2e40c1742efa8191945f46ebb87559261384b35471c2d6d71eb9be595
             │ ├─94237 /run/podman-init -- /usr/bin/radosgw -n client.rgw.rgw.compute-0.njduba -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ └─94239 /usr/bin/radosgw -n client.rgw.rgw.compute-0.njduba -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             └─runtime
               └─94235 /usr/bin/conmon --api-version 1 -c 7b0621a2e40c1742efa8191945f46ebb87559261384b35471c2d6d71eb9be595 -u 7b0621a2e40c1742efa8191945f46ebb87559261384b35471c2d6d71eb9be595 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/7b0621a2e40c1742efa8191945f46ebb87559261384b35471c2d6d71eb9be595/userdata -p /run/containers/storage/overlay-containers/7b0621a2e40c1742efa8191945f46ebb87559261384b35471c2d6d71eb9be595/userdata/pidfile -n ceph-f70fcd2a-dcb4-5f89-a4ba-79a09959083b-rgw-rgw-compute-0-njduba --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/7b0621a2e40c1742efa8191945f46ebb87559261384b35471c2d6d71eb9be595 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/7b0621a2e40c1742efa8191945f46ebb87559261384b35471c2d6d71eb9be595/userdata/oci-log --conmon-pidfile /run/ceph-f70fcd2a-dcb4-5f89-a4ba-79a09959083b@rgw.rgw.compute-0.njduba.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 7b0621a2e40c1742efa8191945f46ebb87559261384b35471c2d6d71eb9be595

Jan 31 09:21:10 compute-0 radosgw[94239]: beast: 0x7fdbf7a9a6f0: 192.168.122.102 - anonymous [31/Jan/2026:09:21:10.008 +0000] "HEAD / HTTP/1.0" 200 0 - - - latency=0.000000000s
Jan 31 09:21:11 compute-0 radosgw[94239]: ====== starting new request req=0x7fdbf7a9a6f0 =====
Jan 31 09:21:11 compute-0 radosgw[94239]: ====== req done req=0x7fdbf7a9a6f0 op status=0 http_status=200 latency=0.000000000s ======
Jan 31 09:21:11 compute-0 radosgw[94239]: beast: 0x7fdbf7a9a6f0: 192.168.122.100 - anonymous [31/Jan/2026:09:21:11.598 +0000] "HEAD / HTTP/1.0" 200 0 - - - latency=0.000000000s
Jan 31 09:21:12 compute-0 radosgw[94239]: ====== starting new request req=0x7fdbf7a9a6f0 =====
Jan 31 09:21:12 compute-0 radosgw[94239]: ====== req done req=0x7fdbf7a9a6f0 op status=0 http_status=200 latency=0.002000047s ======
Jan 31 09:21:12 compute-0 radosgw[94239]: beast: 0x7fdbf7a9a6f0: 192.168.122.102 - anonymous [31/Jan/2026:09:21:12.012 +0000] "HEAD / HTTP/1.0" 200 0 - - - latency=0.002000047s
Jan 31 09:21:13 compute-0 radosgw[94239]: ====== starting new request req=0x7fdbf7a9a6f0 =====
Jan 31 09:21:13 compute-0 radosgw[94239]: ====== req done req=0x7fdbf7a9a6f0 op status=0 http_status=200 latency=0.001000024s ======
Jan 31 09:21:13 compute-0 radosgw[94239]: beast: 0x7fdbf7a9a6f0: 192.168.122.100 - anonymous [31/Jan/2026:09:21:13.600 +0000] "HEAD / HTTP/1.0" 200 0 - - - latency=0.001000024s

● ceph-osd-losetup-0.service - Ceph OSD losetup
     Loaded: loaded (/etc/systemd/system/ceph-osd-losetup-0.service; enabled; preset: disabled)
     Active: active (exited) since Sat 2026-01-31 07:04:00 UTC; 2h 17min ago
   Main PID: 72648 (code=exited, status=0/SUCCESS)
        CPU: 19ms

Jan 31 07:04:00 compute-0 systemd[1]: Starting Ceph OSD losetup...
Jan 31 07:04:00 compute-0 bash[72649]: /dev/loop3: [64513]:4355757 (/var/lib/ceph-osd-0.img)
Jan 31 07:04:00 compute-0 systemd[1]: Finished Ceph OSD losetup.

● chronyd.service - NTP client/server
     Loaded: loaded (/usr/lib/systemd/system/chronyd.service; enabled; preset: enabled)
     Active: active (running) since Sat 2026-01-31 07:02:01 UTC; 2h 19min ago
       Docs: man:chronyd(8)
             man:chrony.conf(5)
   Main PID: 58682 (chronyd)
         IO: 0B read, 4.0K written
      Tasks: 1 (limit: 48560)
     Memory: 1.0M (peak: 2.0M)
        CPU: 92ms
     CGroup: /system.slice/chronyd.service
             └─58682 /usr/sbin/chronyd -F 2

Jan 31 07:02:01 compute-0 systemd[1]: Starting NTP client/server...
Jan 31 07:02:01 compute-0 chronyd[58682]: chronyd version 4.8 starting (+CMDMON +REFCLOCK +RTC +PRIVDROP +SCFILTER +SIGND +NTS +SECHASH +IPV6 +DEBUG)
Jan 31 07:02:01 compute-0 chronyd[58682]: Frequency -23.865 +/- 0.194 ppm read from /var/lib/chrony/drift
Jan 31 07:02:01 compute-0 chronyd[58682]: Loaded seccomp filter (level 2)
Jan 31 07:02:01 compute-0 systemd[1]: Started NTP client/server.
Jan 31 07:04:11 compute-0 chronyd[58682]: Selected source 23.133.168.244 (pool.ntp.org)

● cloud-config.service - Cloud-init: Config Stage
     Loaded: loaded (/usr/lib/systemd/system/cloud-config.service; enabled; preset: disabled)
     Active: active (exited) since Sat 2026-01-31 06:09:35 UTC; 3h 11min ago
   Main PID: 1002 (code=exited, status=0/SUCCESS)
        CPU: 425ms

Jan 31 06:09:34 np0005603608.novalocal systemd[1]: Starting Cloud-init: Config Stage...
Jan 31 06:09:35 np0005603608.novalocal cloud-init[1083]: Cloud-init v. 24.4-8.el9 running 'modules:config' at Sat, 31 Jan 2026 06:09:35 +0000. Up 17.74 seconds.
Jan 31 06:09:35 np0005603608.novalocal systemd[1]: Finished Cloud-init: Config Stage.

● cloud-final.service - Cloud-init: Final Stage
     Loaded: loaded (/usr/lib/systemd/system/cloud-final.service; enabled; preset: disabled)
     Active: active (exited) since Sat 2026-01-31 06:09:35 UTC; 3h 11min ago
   Main PID: 1109 (code=exited, status=0/SUCCESS)
        CPU: 478ms

Jan 31 06:09:35 np0005603608.novalocal systemd[1]: Starting Cloud-init: Final Stage...
Jan 31 06:09:35 np0005603608.novalocal cloud-init[1279]: Cloud-init v. 24.4-8.el9 running 'modules:final' at Sat, 31 Jan 2026 06:09:35 +0000. Up 18.19 seconds.
Jan 31 06:09:35 np0005603608.novalocal cloud-init[1305]: -----BEGIN SSH HOST KEY FINGERPRINTS-----
Jan 31 06:09:35 np0005603608.novalocal cloud-init[1307]: 256 SHA256:e5N9drk3X5SwzMzL5fKxTip2dbxXI/qhDwmEku98D98 root@np0005603608.novalocal (ECDSA)
Jan 31 06:09:35 np0005603608.novalocal cloud-init[1309]: 256 SHA256:ZeSUSnbZtWludQiTHbmVg26cq0v97MO1P7+4FrqepnQ root@np0005603608.novalocal (ED25519)
Jan 31 06:09:35 np0005603608.novalocal cloud-init[1311]: 3072 SHA256:sqsz7Lhvmu2W18CD6Im4da3VXd1cK4mGH+rEJGTaiDQ root@np0005603608.novalocal (RSA)
Jan 31 06:09:35 np0005603608.novalocal cloud-init[1312]: -----END SSH HOST KEY FINGERPRINTS-----
Jan 31 06:09:35 np0005603608.novalocal cloud-init[1313]: #############################################################
Jan 31 06:09:35 np0005603608.novalocal cloud-init[1279]: Cloud-init v. 24.4-8.el9 finished at Sat, 31 Jan 2026 06:09:35 +0000. Datasource DataSourceConfigDrive [net,ver=2][source=/dev/sr0].  Up 18.39 seconds
Jan 31 06:09:35 np0005603608.novalocal systemd[1]: Finished Cloud-init: Final Stage.

● cloud-init-local.service - Cloud-init: Local Stage (pre-network)
     Loaded: loaded (/usr/lib/systemd/system/cloud-init-local.service; enabled; preset: disabled)
     Active: active (exited) since Sat 2026-01-31 06:09:27 UTC; 3h 11min ago
   Main PID: 812 (code=exited, status=0/SUCCESS)
        CPU: 832ms

Jan 31 06:09:25 localhost systemd[1]: Starting Cloud-init: Local Stage (pre-network)...
Jan 31 06:09:26 localhost cloud-init[837]: Cloud-init v. 24.4-8.el9 running 'init-local' at Sat, 31 Jan 2026 06:09:26 +0000. Up 9.15 seconds.
Jan 31 06:09:27 np0005603608.novalocal systemd[1]: Finished Cloud-init: Local Stage (pre-network).

● cloud-init.service - Cloud-init: Network Stage
     Loaded: loaded (/usr/lib/systemd/system/cloud-init.service; enabled; preset: disabled)
     Active: active (exited) since Sat 2026-01-31 06:09:34 UTC; 3h 11min ago
   Main PID: 885 (code=exited, status=0/SUCCESS)
        CPU: 1.056s

Jan 31 06:09:34 np0005603608.novalocal cloud-init[922]: |        o++ o+o*.|
Jan 31 06:09:34 np0005603608.novalocal cloud-init[922]: |       o o+ o *o+|
Jan 31 06:09:34 np0005603608.novalocal cloud-init[922]: |        .o   B...|
Jan 31 06:09:34 np0005603608.novalocal cloud-init[922]: |        S   . +  |
Jan 31 06:09:34 np0005603608.novalocal cloud-init[922]: |            .+  .|
Jan 31 06:09:34 np0005603608.novalocal cloud-init[922]: |         . Eo.o o|
Jan 31 06:09:34 np0005603608.novalocal cloud-init[922]: |        . o+..+= |
Jan 31 06:09:34 np0005603608.novalocal cloud-init[922]: |         .+=+oo=O|
Jan 31 06:09:34 np0005603608.novalocal cloud-init[922]: +----[SHA256]-----+
Jan 31 06:09:34 np0005603608.novalocal systemd[1]: Finished Cloud-init: Network Stage.

○ cpupower.service - Configure CPU power related settings
     Loaded: loaded (/usr/lib/systemd/system/cpupower.service; disabled; preset: disabled)
     Active: inactive (dead)

● crond.service - Command Scheduler
     Loaded: loaded (/usr/lib/systemd/system/crond.service; enabled; preset: enabled)
     Active: active (running) since Sat 2026-01-31 06:09:34 UTC; 3h 11min ago
   Main PID: 1009 (crond)
         IO: 168.0K read, 12.0K written
      Tasks: 1 (limit: 48560)
     Memory: 1.2M (peak: 4.8M)
        CPU: 266ms
     CGroup: /system.slice/crond.service
             └─1009 /usr/sbin/crond -n

Jan 31 07:09:01 compute-0 anacron[52258]: Job `cron.daily' started
Jan 31 07:09:01 compute-0 anacron[52258]: Job `cron.daily' terminated
Jan 31 07:29:01 compute-0 anacron[52258]: Job `cron.weekly' started
Jan 31 07:29:01 compute-0 anacron[52258]: Job `cron.weekly' terminated
Jan 31 07:49:01 compute-0 anacron[52258]: Job `cron.monthly' started
Jan 31 07:49:02 compute-0 anacron[52258]: Job `cron.monthly' terminated
Jan 31 07:49:02 compute-0 anacron[52258]: Normal exit (3 jobs run)
Jan 31 08:01:01 compute-0 CROND[307724]: (root) CMD (run-parts /etc/cron.hourly)
JanUnit display-manager.service could not be found.
 31 08:01:01 compute-0 CROND[307723]: (root) CMDEND (run-parts /etc/cron.hourly)
Jan 31 09:01:01 compute-0 CROND[404001]: (root) CMD (run-parts /etc/cron.hourly)

● dbus-broker.service - D-Bus System Message Bus
     Loaded: loaded (/usr/lib/systemd/system/dbus-broker.service; enabled; preset: enabled)
     Active: active (running) since Sat 2026-01-31 06:09:25 UTC; 3h 11min ago
TriggeredBy: ● dbus.socket
       Docs: man:dbus-broker-launch(1)
   Main PID: 809 (dbus-broker-lau)
         IO: 0B read, 0B written
      Tasks: 2 (limit: 48560)
     Memory: 2.9M (peak: 3.6M)
        CPU: 10.348s
     CGroup: /system.slice/dbus-broker.service
             ├─809 /usr/bin/dbus-broker-launch --scope system --audit
             └─810 dbus-broker --log 4 --controller 9 --machine-id bf0bc0bb03de29b24cba1cc9599cf5d0 --max-bytes 536870912 --max-fds 4096 --max-matches 131072 --audit

Jan 31 06:59:29 compute-0 dbus-broker-launch[809]: Noticed file-system modification, trigger reload.
Jan 31 07:00:16 compute-0 dbus-broker-launch[810]: avc:  op=load_policy lsm=selinux seqno=7 res=1
Jan 31 07:00:28 compute-0 dbus-broker-launch[810]: avc:  op=load_policy lsm=selinux seqno=8 res=1
Jan 31 07:15:37 compute-0 dbus-broker-launch[810]: avc:  op=load_policy lsm=selinux seqno=9 res=1
Jan 31 07:19:34 compute-0 dbus-broker-launch[810]: avc:  op=load_policy lsm=selinux seqno=10 res=1
Jan 31 07:20:01 compute-0 dbus-broker-launch[810]: avc:  op=load_policy lsm=selinux seqno=11 res=1
Jan 31 07:20:33 compute-0 dbus-broker-launch[809]: Noticed file-system modification, trigger reload.
Jan 31 07:20:33 compute-0 dbus-broker-launch[810]: avc:  op=load_policy lsm=selinux seqno=12 res=1
Jan 31 07:20:33 compute-0 dbus-broker-launch[809]: Noticed file-system modification, trigger reload.
Jan 31 07:22:00 compute-0 dbus-broker-launch[810]: avc:  op=load_policy lsm=selinux seqno=13 res=1

○ dm-event.service - Device-mapper event daemon
     Loaded: loaded (/usr/lib/systemd/system/dm-event.service; static)
     Active: inactive (dead)
TriggeredBy: ● dm-event.socket
       Docs: man:dmeventd(8)

○ dnf-makecache.service - dnf makecache
     Loaded: loaded (/usr/lib/systemd/system/dnf-makecache.service; static)
     Active: inactive (dead) since Sat 2026-01-31 07:40:15 UTC; 1h 40min ago
TriggeredBy: ● dnf-makecache.timer
    Process: 274340 ExecStart=/usr/bin/dnf makecache --timer (code=exited, status=0/SUCCESS)
   Main PID: 274340 (code=exited, status=0/SUCCESS)
        CPU: 253ms

Jan 31 07:40:15 compute-0 systemd[1]: Starting dnf makecache...
Jan 31 07:40:15 compute-0 dnf[274340]: Metadata cache refreshed recently.
Jan 31 07:40:15 compute-0 systemd[1]: dnf-makecache.service: Deactivated successfully.
Jan 31 07:40:15 compute-0 systemd[1]: Finished dnf makecache.

○ dracut-cmdline.service - dracut cmdline hook
     Loaded: loaded (/usr/lib/systemd/system/dracut-cmdline.service; static)
     Active: inactive (dead) since Sat 2026-01-31 06:09:21 UTC; 3h 11min ago
   Duration: 2.476s
       Docs: man:dracut-cmdline.service(8)
             man:dracut.bootup(7)
   Main PID: 321 (code=exited, status=0/SUCCESS)
        CPU: 106ms

Jan 31 06:09:19 localhost systemd[1]: Starting dracut cmdline hook...
Jan 31 06:09:19 localhost dracut-cmdline[321]: dracut-9 dracut-057-102.git20250818.el9
Jan 31 06:09:19 localhost dracut-cmdline[321]: Using kernel command line parameters:    BOOT_IMAGE=(hd0,msdos1)/boot/vmlinuz-5.14.0-665.el9.x86_64 root=UUID=822f14ea-6e7e-41df-b0d8-fbe282d9ded8 ro console=ttyS0,115200n8 no_timer_check net.ifnames=0 crashkernel=1G-2G:192M,2G-64G:256M,64G-:512M
Jan 31 06:09:19 localhost systemd[1]: Finished dracut cmdline hook.
Jan 31 06:09:21 localhost systemd[1]: dracut-cmdline.service: Deactivated successfully.
Jan 31 06:09:21 localhost systemd[1]: Stopped dracut cmdline hook.

○ dracut-initqueue.service - dracut initqueue hook
     Loaded: loaded (/usr/lib/systemd/system/dracut-initqueue.service; static)
     Active: inactive (dead) since Sat 2026-01-31 06:09:21 UTC; 3h 11min ago
   Duration: 1.537s
       Docs: man:dracut-initqueue.service(8)
             man:dracut.bootup(7)
   Main PID: 494 (code=exited, status=0/SUCCESS)
        CPU: 34ms

Jan 31 06:09:19 localhost systemd[1]: Starting dracut initqueue hook...
Jan 31 06:09:20 localhost systemd[1]: Finished dracut initqueue hook.
Jan 31 06:09:21 localhost systemd[1]: dracut-initqueue.service: Deactivated successfully.
Jan 31 06:09:21 localhost systemd[1]: Stopped dracut initqueue hook.

○ dracut-mount.service - dracut mount hook
     Loaded: loaded (/usr/lib/systemd/system/dracut-mount.service; static)
     Active: inactive (dead) since Sat 2026-01-31 06:09:21 UTC; 3h 11min ago
   Duration: 176ms
       Docs: man:dracut-mount.service(8)
             man:dracut.bootup(7)
   Main PID: 568 (code=exited, status=0/SUCCESS)
        CPU: 13ms

Jan 31 06:09:21 localhost systemd[1]: Starting dracut mount hook...
Jan 31 06:09:21 localhost systemd[1]: Finished dracut mount hook.
Jan 31 06:09:21 localhost systemd[1]: dracut-mount.service: Deactivated successfully.
Jan 31 06:09:21 localhost systemd[1]: Stopped dracut mount hook.

○ dracut-pre-mount.service - dracut pre-mount hook
     Loaded: loaded (/usr/lib/systemd/system/dracut-pre-mount.service; static)
     Active: inactive (dead) since Sat 2026-01-31 06:09:21 UTC; 3h 11min ago
   Duration: 1.498s
       Docs: man:dracut-pre-mount.service(8)
             man:dracut.bootup(7)
   Main PID: 547 (code=exited, status=0/SUCCESS)
        CPU: 9ms

Jan 31 06:09:20 localhost systemd[1]: Starting dracut pre-mount hook...
Jan 31 06:09:20 localhost systemd[1]: Finished dracut pre-mount hook.
Jan 31 06:09:21 localhost systemd[1]: dracut-pre-mount.service: Deactivated successfully.
Jan 31 06:09:21 localhost systemd[1]: Stopped dracut pre-mount hook.

○ dracut-pre-pivot.service - dracut pre-pivot and cleanup hook
     Loaded: loaded (/usr/lib/systemd/system/dracut-pre-pivot.service; static)
     Active: inactive (dead) since Sat 2026-01-31 06:09:21 UTC; 3h 11min ago
   Duration: 37ms
       Docs: man:dracut-pre-pivot.service(8)
             man:dracut.bootup(7)
   Main PID: 573 (code=exited, status=0/SUCCESS)
        CPU: 85ms

Jan 31 06:09:21 localhost systemd[1]: Starting dracut pre-pivot and cleanup hook...
Jan 31 06:09:21 localhost systemd[1]: Finished dracut pre-pivot and cleanup hook.
Jan 31 06:09:21 localhost systemd[1]: dracut-pre-pivot.service: Deactivated successfully.
Jan 31 06:09:21 localhost systemd[1]: Stopped dracut pre-pivot and cleanup hook.

○ dracut-pre-trigger.service - dracut pre-trigger hook
     Loaded: loaded (/usr/lib/systemd/system/dracut-pre-trigger.service; static)
     Active: inactive (dead) since Sat 2026-01-31 06:09:21 UTC; 3h 11min ago
   Duration: 2.069s
       Docs: man:dracut-pre-trigger.service(8)
             man:dracut.bootup(7)
   Main PID: 462 (code=exited, status=0/SUCCESS)
        CPU: 17ms

Jan 31 06:09:19 localhost systemd[1]: Starting dracut pre-trigger hook...
Jan 31 06:09:19 localhost systemd[1]: Finished dracut pre-trigger hook.
Jan 31 06:09:21 localhost systemd[1]: dracut-pre-trigger.service: Deactivated successfully.
Jan 31 06:09:21 localhost systemd[1]: Stopped dracut pre-trigger hook.

○ dracut-pre-udev.service - dracut pre-udev hook
     Loaded: loaded (/usr/lib/systemd/system/dracut-pre-udev.service; static)
     Active: inactive (dead) since Sat 2026-01-31 06:09:21 UTC; 3h 11min ago
   Duration: 2.181s
       Docs: man:dracut-pre-udev.service(8)
             man:dracut.bootup(7)
   Main PID: 410 (code=exited, status=0/SUCCESS)
        CPU: 283ms

Jan 31 06:09:19 localhost systemd[1]: Starting dracut pre-udev hook...
Jan 31 06:09:19 localhost rpc.statd[438]: Version 2.5.4 starting
Jan 31 06:09:19 localhost rpc.statd[438]: Initializing NSM state
Jan 31 06:09:19 localhost rpc.idmapd[443]: Setting log level to 0
Jan 31 06:09:19 localhost systemd[1]: Finished dracut pre-udev hook.
Jan 31 06:09:21 localhost rpc.idmapd[443]: exiting on signal 15
Jan 31 06:09:21 localhost systemd[1]: dracut-pre-udev.service: Deactivated successfully.
Jan 31 06:09:21 localhost systemd[1]: Stopped dracut pre-udev hook.

○ dracut-shutdown-onfailure.service - Service executing upon dracut-shutdown failure to perform cleanup
     Loaded: loaded (/usr/lib/systemd/system/dracut-shutdown-onfailure.service; static)
     Active: inactive (dead)
       Docs: man:dracut-shutdown.service(8)

● dracut-shutdown.service - Restore /run/initramfs on shutdown
     Loaded: loaded (/usr/lib/systemd/system/dracut-shutdown.service; static)
     Active: active (exited) since Sat 2026-01-31 06:09:25 UTC; 3h 11min ago
       Docs: man:dracut-shutdown.service(8)
   Main PID: 813 (code=exited, status=0/SUCCESS)
        CPU: 2ms

Jan 31 06:09:25 localhost systemd[1]: Starting Restore /run/initramfs on shutdown...
Jan 31 06:09:25 localhost systemd[1]: Finished Restore /run/initramfs on shutdown.

● edpm-container-shutdown.service - EDPM Container Shutdown
     Loaded: loaded (/etc/systemd/system/edpm-container-shutdown.service; enabled; preset: enabled)
     Active: active (exited) since Sat 2026-01-31 07:02:28 UTC; 2h 18min ago
       Docs: https://github.com/openstack-k8s-operators/docs
   Main PID: 61679 (code=exited, status=0/SUCCESS)
        CPU: 1ms

Jan 31 07:02:28 compute-0 systemd[1]: Starting EDPM Container Shutdown...
Jan 31 07:02:28 compute-0 systemd[1]: Finished EDPM Container Shutdown.

○ edpm_libvirt_guests.service - Suspend libvirt Guests in edpm
     Loaded: loaded (/etc/systemd/system/edpm_libvirt_guests.service; enabled; preset: disabled)
     Active: inactive (dead)
       Docs: man:libvirtd(8)
             https://libvirt.org

● edpm_nova_compute.service - nova_compute container
     Loaded: loaded (/etc/systemd/system/edpm_nova_compute.service; enabled; preset: disabled)
     Active: active (running) since Sat 2026-01-31 07:25:39 UTC; 1h 55min ago
    Process: 247688 ExecStart=/usr/bin/podman start nova_compute (code=exited, status=0/SUCCESS)
   Main PID: 247704 (conmon)
         IO: 0B read, 89.5K written
      Tasks: 1 (limit: 48560)
     Memory: 692.0K (peak: 16.8M)
        CPU: 3.505s
     CGroup: /system.slice/edpm_nova_compute.service
             └─247704 /usr/bin/conmon --api-version 1 -c 08cc1288983ebbc3e029df4ba78417491faf6dcee06220d37f18f3223d9f406f -u 08cc1288983ebbc3e029df4ba78417491faf6dcee06220d37f18f3223d9f406f -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/08cc1288983ebbc3e029df4ba78417491faf6dcee06220d37f18f3223d9f406f/userdata -p /run/containers/storage/overlay-containers/08cc1288983ebbc3e029df4ba78417491faf6dcee06220d37f18f3223d9f406f/userdata/pidfile -n nova_compute --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/08cc1288983ebbc3e029df4ba78417491faf6dcee06220d37f18f3223d9f406f --full-attach -s -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/08cc1288983ebbc3e029df4ba78417491faf6dcee06220d37f18f3223d9f406f/userdata/oci-log --conmon-pidfile /run/nova_compute.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg 08cc1288983ebbc3e029df4ba78417491faf6dcee06220d37f18f3223d9f406f

Jan 31 09:20:51 compute-0 nova_compute[247704]: 2026-01-31 09:20:51.556 247708 DEBUG oslo_service.periodic_task [None req-7a4c4282-4b52-4900-be7b-2c1fa0adac81 - - - - - -] Running periodic task ComputeManager._check_instance_build_time run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210[00m
Jan 31 09:20:54 compute-0 nova_compute[247704]: 2026-01-31 09:20:54.120 247708 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 25 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263[00m
Jan 31 09:20:55 compute-0 nova_compute[247704]: 2026-01-31 09:20:55.440 247708 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 25 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263[00m
Jan 31 09:20:58 compute-0 nova_compute[247704]: 2026-01-31 09:20:58.082 247708 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 25 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263[00m
Jan 31 09:20:59 compute-0 nova_compute[247704]: 2026-01-31 09:20:59.122 247708 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 25 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263[00m
Jan 31 09:21:00 compute-0 nova_compute[247704]: 2026-01-31 09:21:00.443 247708 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 25 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263[00m
Jan 31 09:21:04 compute-0 nova_compute[247704]: 2026-01-31 09:21:04.121 247708 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 25 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263[00m
Jan 31 09:21:05 compute-0 nova_compute[247704]: 2026-01-31 09:21:05.447 247708 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 25 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263[00m
Jan 31 09:21:09 compute-0 nova_compute[247704]: 2026-01-31 09:21:09.123 247708 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 25 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263[00m
Jan 31 09:21:10 compute-0 nova_compute[247704]: 2026-01-31 09:21:10.450 247708 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 25 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263[00m

● edpm_ovn_controller.service - ovn_controller container
     Loaded: loaded (/etc/systemd/system/edpm_ovn_controller.service; enabled; preset: disabled)
     Active: active (running) since Sat 2026-01-31 07:16:32 UTC; 2h 4min ago
   Main PID: 149457 (conmon)
         IO: 0B read, 124.5K written
      Tasks: 1 (limit: 48560)
     Memory: 692.0K (peak: 18.3M)
        CPU: 360ms
     CGroup: /system.slice/edpm_ovn_controller.service
             └─149457 /usr/bin/conmon --api-version 1 -c 1e25cafb71c13c532a40aebd7c2342bf479b9dc985b78d8189396343297af238 -u 1e25cafb71c13c532a40aebd7c2342bf479b9dc985b78d8189396343297af238 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/1e25cafb71c13c532a40aebd7c2342bf479b9dc985b78d8189396343297af238/userdata -p /run/containers/storage/overlay-containers/1e25cafb71c13c532a40aebd7c2342bf479b9dc985b78d8189396343297af238/userdata/pidfile -n ovn_controller --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/1e25cafb71c13c532a40aebd7c2342bf479b9dc985b78d8189396343297af238 --full-attach -s -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/1e25cafb71c13c532a40aebd7c2342bf479b9dc985b78d8189396343297af238/userdata/oci-log --conmon-pidfile /run/ovn_controller.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg 1e25cafb71c13c532a40aebd7c2342bf479b9dc985b78d8189396343297af238

Jan 31 09:17:57 compute-0 ovn_controller[149457]: 2026-01-31T09:17:57Z|00119|pinctrl(ovn_pinctrl0)|INFO|DHCPNAK fa:16:3e:d8:14:74 10.100.0.6
Jan 31 09:18:01 compute-0 ovn_controller[149457]: 2026-01-31T09:18:01Z|00120|pinctrl(ovn_pinctrl0)|WARN|DHCPREQUEST requested IP 10.100.0.4 does not match offer 10.100.0.6
Jan 31 09:18:01 compute-0 ovn_controller[149457]: 2026-01-31T09:18:01Z|00121|pinctrl(ovn_pinctrl0)|INFO|DHCPNAK fa:16:3e:d8:14:74 10.100.0.6
Jan 31 09:18:02 compute-0 ovn_controller[149457]: 2026-01-31T09:18:02Z|00122|pinctrl(ovn_pinctrl0)|INFO|DHCPOFFER fa:16:3e:d8:14:74 10.100.0.6
Jan 31 09:18:02 compute-0 ovn_controller[149457]: 2026-01-31T09:18:02Z|00123|pinctrl(ovn_pinctrl0)|INFO|DHCPACK fa:16:3e:d8:14:74 10.100.0.6
Jan 31 09:18:23 compute-0 ovn_controller[149457]: 2026-01-31T09:18:23Z|00920|memory_trim|INFO|Detected inactivity (last active 30021 ms ago): trimming memory
Jan 31 09:19:05 compute-0 ovn_controller[149457]: 2026-01-31T09:19:05Z|00921|binding|INFO|Releasing lport 9d925d3a-15af-4795-b206-2c45063bc1f7 from this chassis (sb_readonly=0)
Jan 31 09:19:05 compute-0 ovn_controller[149457]: 2026-01-31T09:19:05Z|00922|binding|INFO|Setting lport 9d925d3a-15af-4795-b206-2c45063bc1f7 down in Southbound
Jan 31 09:19:05 compute-0 ovn_controller[149457]: 2026-01-31T09:19:05Z|00923|binding|INFO|Removing iface tap9d925d3a-15 ovn-installed in OVS
Jan 31 09:20:01 compute-0 ovn_controller[149457]: 2026-01-31T09:20:01Z|00924|memory_trim|INFO|Detected inactivity (last active 30027 ms ago): trimming memory

● edpm_ovn_metadata_agent.service - ovn_metadata_agent container
     Loaded: loaded (/etc/systemd/system/edpm_ovn_metadata_agent.service; enabled; preset: disabled)
     Active: active (running) since Sat 2026-01-31 07:18:10 UTC; 2h 3min ago
   Main PID: 160021 (conmon)
         IO: 0B read, 138.5K written
      Tasks: 1 (limit: 48560)
     Memory: 712.0K (peak: 20.1M)
        CPU: 921ms
     CGroup: /system.slice/edpm_ovn_metadata_agent.service
             └─160021 /usr/bin/conmon --api-version 1 -c 4f21208ce36b9cf814979a8ea23334150fc70d101a3b284bd29d2af80b4ea3b1 -u 4f21208ce36b9cf814979a8ea23334150fc70d101a3b284bd29d2af80b4ea3b1 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/4f21208ce36b9cf814979a8ea23334150fc70d101a3b284bd29d2af80b4ea3b1/userdata -p /run/containers/storage/overlay-containers/4f21208ce36b9cf814979a8ea23334150fc70d101a3b284bd29d2af80b4ea3b1/userdata/pidfile -n ovn_metadata_agent --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/4f21208ce36b9cf814979a8ea23334150fc70d101a3b284bd29d2af80b4ea3b1 --full-attach -s -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/4f21208ce36b9cf814979a8ea23334150fc70d101a3b284bd29d2af80b4ea3b1/userdata/oci-log --conmon-pidfile /run/ovn_metadata_agent.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg 4f21208ce36b9cf814979a8ea23334150fc70d101a3b284bd29d2af80b4ea3b1

Jan 31 09:19:11 compute-0 ovn_metadata_agent[160021]: 2026-01-31 09:19:11.252 160028 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" "released" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: held 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423[00m
Jan 31 09:20:11 compute-0 ovn_metadata_agent[160021]: 2026-01-31 09:20:11.253 160028 DEBUG oslo_concurrency.lockutils [-] Acquiring lock "_check_child_processes" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404[00m
Jan 31 09:20:11 compute-0 ovn_metadata_agent[160021]: 2026-01-31 09:20:11.253 160028 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" acquired by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: waited 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409[00m
Jan 31 09:20:11 compute-0 ovn_metadata_agent[160021]: 2026-01-31 09:20:11.253 160028 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" "released" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: held 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423[00m
Jan 31 09:20:58 compute-0 ovn_metadata_agent[160021]: 2026-01-31 09:20:58.079 160028 DEBUG ovsdbapp.backend.ovs_idl.event [-] Matched UPDATE: SbGlobalUpdateEvent(events=('update',), table='SB_Global', conditions=None, old_conditions=None), priority=20 to row=SB_Global(external_ids={}, nb_cfg=119, options={'arp_ns_explicit_output': 'true', 'mac_prefix': '52:b2:f5', 'max_tunid': '16711680', 'northd_internal_version': '24.03.8-20.33.0-76.8', 'svc_monitor_mac': 'a6:2b:58:cd:91:59'}, ipsec=False) old=SB_Global(nb_cfg=118) matches /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/event.py:43[00m
Jan 31 09:20:58 compute-0 ovn_metadata_agent[160021]: 2026-01-31 09:20:58.080 160028 DEBUG neutron.agent.ovn.metadata.agent [-] Delaying updating chassis table for 10 seconds run /usr/lib/python3.9/site-packages/neutron/agent/ovn/metadata/agent.py:274[00m
Jan 31 09:21:08 compute-0 ovn_metadata_agent[160021]: 2026-01-31 09:21:08.083 160028 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Running txn n=1 command(idx=0): DbSetCommand(_result=None, table=Chassis_Private, record=5c307474-e9ec-4d19-9f52-463eb0ff26d1, col_values=(('external_ids', {'neutron:ovn-metadata-sb-cfg': '119'}),), if_exists=True) do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:89[00m
Jan 31 09:21:11 compute-0 ovn_metadata_agent[160021]: 2026-01-31 09:21:11.253 160028 DEBUG oslo_concurrency.lockutils [-] Acquiring lock "_check_child_processes" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404[00m
Jan 31 09:21:11 compute-0 ovn_metadata_agent[160021]: 2026-01-31 09:21:11.254 160028 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" acquired by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409[00m
Jan 31 09:21:11 compute-0 ovn_metadata_agent[160021]: 2026-01-31 09:21:11.254 160028 DEBUG oslo_concurrency.lockutils [-] Lock "_cheUnit fcoe.service could not be found.
Unit hv_kvp_daemon.service could not be found.
ck_child_processes" "released" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: held 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423[00m

○ emergency.service - Emergency Shell
     Loaded: loaded (/usr/lib/systemd/system/emergency.service; static)
     Active: inactive (dead)
       Docs: man:sulogin(8)

● getty@tty1.service - Getty on tty1
     Loaded: loaded (/usr/lib/systemd/system/getty@.service; enabled; preset: enabled)
     Active: active (running) since Sat 2026-01-31 06:09:34 UTC; 3h 11min ago
       Docs: man:agetty(8)
             man:systemd-getty-generator(8)
             http://0pointer.de/blog/projects/serial-console.html
   Main PID: 1010 (agetty)
         IO: 0B read, 0B written
      Tasks: 1 (limit: 48560)
     Memory: 232.0K (peak: 704.0K)
        CPU: 6ms
     CGroup: /system.slice/system-getty.slice/getty@tty1.service
             └─1010 /sbin/agetty -o "-p -- \\u" --noclear - linux

Jan 31 06:09:34 np0005603608.novalocal systemd[1]: Started Getty on tty1.

● gssproxy.service - GSSAPI Proxy Daemon
     Loaded: loaded (/usr/lib/systemd/system/gssproxy.service; disabled; preset: disabled)
     Active: active (running) since Sat 2026-01-31 06:09:27 UTC; 3h 11min ago
   Main PID: 875 (gssproxy)
         IO: 0B read, 0B written
      Tasks: 6 (limit: 48560)
     Memory: 1.8M (peak: 3.3M)
        CPU: 33ms
     CGroup: /system.slice/gssproxy.service
             └─875 /usr/sbin/gssproxy -D

Jan 31 06:09:27 np0005603608.novalocal systemd[1]: Starting GSSAPI Proxy Daemon...
Jan 31 06:09:27 np0005603608.novalocal systemd[1]: Started GSSAPI Proxy Daemon.

○ import-state.service - Import network configuration from initramfs
     Loaded: loaded (/usr/lib/systemd/system/import-state.service; enabled; preset: enabled)
     Active: inactive (dead)

○ initrd-cleanup.service - Cleaning Up and Shutting Down Daemons
     Loaded: loaded (/usr/lib/systemd/system/initrd-cleanup.service; static)
     Active: inactive (dead) since Sat 2026-01-31 06:09:21 UTC; 3h 11min ago
   Main PID: 615 (code=exited, status=0/SUCCESS)
        CPU: 8ms

Jan 31 06:09:21 localhost systemd[1]: Starting Cleaning Up and Shutting Down Daemons...
Jan 31 06:09:21 localhost systemd[1]: initrd-cleanup.service: Deactivated successfully.
Jan 31 06:09:21 localhost systemd[1]: Finished Cleaning Up and Shutting Down Daemons.

○ initrd-parse-etc.service - Mountpoints Configured in the Real Root
     Loaded: loaded (/usr/lib/systemd/system/initrd-parse-etc.service; static)
     Active: inactive (dead) since Sat 2026-01-31 06:09:21 UTC; 3h 11min ago
   Main PID: 567 (code=exited, status=0/SUCCESS)
        CPU: 17ms

Jan 31 06:09:21 localhost systemd[1]: Starting Mountpoints Configured in the Real Root...
Jan 31 06:09:21 localhost systemd[1]: initrd-parse-etc.service: Deactivated successfully.
Jan 31 06:09:21 localhost systemd[1]: Finished Mountpoints Configured in the Real Root.

○ initrd-switch-root.service - Switch Root
     Loaded: loaded (/usr/lib/systemd/system/initrd-switch-root.service; static)
     Active: inactive (dead) since Sat 2026-01-31 06:09:23 UTC; 3h 11min ago
   Main PID: 619 (code=exited, status=0/SUCCESS)
        CPU: 6ms

Jan 31 06:09:21 localhost systemd[1]: Starting Switch Root...

○ initrd-udevadm-cleanup-db.service - Cleanup udev Database
     Loaded: loaded (/usr/lib/systemd/system/initrd-udevadm-cleanup-db.service; static)
     Active: inactive (dead) since Sat 2026-01-31 06:09:21 UTC; 3h 11min ago
   Main PID: 617 (code=exited, status=0/SUCCESS)
        CPU: 10ms

Jan 31 06:09:21 localhost systemd[1]: Starting Cleanup udev Database...
Jan 31 06:09:21 localhost systemd[1]: initrd-udevadm-cleanup-db.service: Deactivated successfully.
Jan 31 06:09:21 localhost systemd[1]: Finished Cleanup udev Database.

○ ip6tables.service - IPv6 firewall with ip6tables
     Loaded: loaded (/usr/lib/systemd/system/ip6tables.service; disabled; preset: disabled)
     Active: inactive (dead)

○ iptables.service - IPv4 firewall with iptables
     Loaded: loaded (/usr/lib/systemd/system/iptables.service; disabled; preset: disabled)
     Active: inactive (dead) since Sat 2026-01-31 07:02:36 UTC; 2h 18min ago
   Duration: 53min 11.094s
   Main PID: 814 (code=exited, status=0/SUCCESS)
        CPU: 93ms

Jan 31 06:09:25 localhost systemd[1]: Starting IPv4 firewall with iptables...
Jan 31 06:09:25 localhost iptables.init[814]: iptables: Applying firewall rules: [  OK  ]
Jan 31 06:09:25 localhost systemd[1]: Finished IPv4 firewall with iptables.
Jan 31 07:02:36 compute-0 systemd[1]: Stopping IPv4 firewall with iptables...
Jan 31 07:02:36 compute-0 iptables.init[62931]: iptables: Setting chains to policy ACCEPT: raw mangle filter nat [  OK  ]
Jan 31 07:02:36 compute-0 iptables.init[62931]: iptables: Flushing firewall rules: [  OK  ]
Jan 31 07:02:36 compute-0 systemd[1]: iptables.service: Deactivated successfully.
Jan 31 07:02:36 compute-0 systemd[1]: Stopped IPv4 firewall with iptables.

● irqbalance.service - irqbalance daemon
     Loaded: loaded (/usr/lib/systemd/system/irqbalance.service; enabled; preset: enabled)
     Active: active (running) since Sat 2026-01-31 06:09:25 UTC; 3h 11min ago
       Docs: man:irqbalance(1)
             https://github.com/Irqbalance/irqbalance
   Main PID: 815 (irqbalance)
         IO: 0B read, 0B written
      Tasks: 2 (limit: 48560)
     Memory: 1.1M (peak: 1.4M)
        CPU: 773ms
     CGroup: /system.slice/irqbalance.service
             └─815 /usr/sbin/irqbalance

Jan 31 06:09:35 np0005603608.novalocal irqbalance[815]: Cannot change IRQ 32 affinity: Operation not permitted
Jan 31 06:09:35 np0005603608.novalocal irqbalance[815]: IRQ 32 affinity is now unmanaged
Jan 31 06:09:35 np0005603608.novalocal irqbalance[815]: Cannot change IRQ 30 affinity: Operation not permitted
Jan 31 06:09:35 np0005603608.novalocal irqbalance[815]: IRQ 30 affinity is now unmanaged
Jan 31 06:09:35 np0005603608.novalocal irqbalance[815]: Cannot change IRQ 29 affinity: Operation not permitted
Jan 31 06:09:35 np0005603608.novalocal irqbalance[815]: IRQ 29 affinity is now unmanaged
Jan 31 06:28:55 np0005603608.novalocal irqbalance[815]: Cannot change IRQ 27 affinity: Operation not permitted
Jan 31 06:28:55 np0005603608.novalocal irqbalance[815]: IRQ 27 affinity is now unmanaged
Jan 31 06:57:25 compute-0 irqbalance[815]: Cannot change IRQ 26 affinity: Operation not permitted
Jan 31 06:57:25 compute-0 irqbalance[815]: IRQ 26 affinity is now unmanaged

○ iscsi-init.service - One time configuration for iscsi.service
     Loaded: loaded (/usr/lib/systemd/system/iscsi-init.service; disabled; preset: disabled)
     Active: inactive (dead)
  Condition: start condition failed at Sat 2026-01-31 07:23:59 UTC; 1h 57min ago

Jan 31 07:23:19 compute-0 systemd[1]: One time configuration for iscsi.service was skipped because of an unmet condition check (ConditionPathExists=!/etc/iscsi/initiatorname.iscsi).
Jan 31 07:23:59 compute-0 systemd[1]: One time configuration for iscsi.service was skipped because of an unmet condition check (ConditionPathExists=!/etc/iscsi/initiatorname.iscsi).

○ iscsi-onboot.service - Special handling of early boot iSCSI sessions
     Loaded: loaded (/usr/lib/systemd/system/iscsi-onboot.service; enabled; preset: enabled)
     Active: inactive (dead)
       Docs: man:iscsiadm(8)
             man:iscsid(8)

● iscsi-shutdown.service - Logout off all iSCSI sessions on shutdown
     Loaded: loaded (/usr/lib/systemd/system/iscsi-shutdown.service; static)
     Active: active (exited) since Sat 2026-01-31 07:23:19 UTC; 1h 57min ago
       Docs: man:iscsid(8)
             man:iscsiadm(8)
   Main PID: 224718 (code=exited, status=0/SUCCESS)
        CPU: 2ms

Jan 31 07:23:19 compute-0 systemd[1]: Starting Logout off all iSCSI sessions on shutdown...
Jan 31 07:23:19 compute-0 systemd[1]: Finished Logout off all iSCSI sessions on shutdown.

○ iscsi-starter.service
     Loaded: loaded (/usr/lib/systemd/system/iscsi-starter.service; enabled; preset: disabled)
     Active: inactive (dead)

○ iscsi.service - Login and scanning of iSCSI devices
     Loaded: loaded (Unit lvm2-activation-early.service could not be found.
/usr/lib/systemd/system/iscsi.service; indirect; preset: enabled)
     Active: inactive (dead)
       Docs: man:iscsiadm(8)
             man:iscsid(8)

● iscsid.service - Open-iSCSI
     Loaded: loaded (/usr/lib/systemd/system/iscsid.service; enabled; preset: disabled)
     Active: active (running) since Sat 2026-01-31 07:23:59 UTC; 1h 57min ago
TriggeredBy: ● iscsid.socket
       Docs: man:iscsid(8)
             man:iscsiuio(8)
             man:iscsiadm(8)
   Main PID: 231340 (iscsid)
     Status: "Ready to process requests"
         IO: 0B read, 0B written
      Tasks: 1 (limit: 48560)
     Memory: 1.9M (peak: 2.0M)
        CPU: 6ms
     CGroup: /system.slice/iscsid.service
             └─231340 /usr/sbin/iscsid -f

Jan 31 07:23:59 compute-0 systemd[1]: Starting Open-iSCSI...
Jan 31 07:23:59 compute-0 systemd[1]: Started Open-iSCSI.

○ iscsiuio.service - iSCSI UserSpace I/O driver
     Loaded: loaded (/usr/lib/systemd/system/iscsiuio.service; disabled; preset: disabled)
     Active: inactive (dead)
TriggeredBy: ○ iscsiuio.socket
       Docs: man:iscsiuio(8)

● kdump.service - Crash recovery kernel arming
     Loaded: loaded (/usr/lib/systemd/system/kdump.service; enabled; preset: enabled)
     Active: active (exited) since Sat 2026-01-31 06:09:49 UTC; 3h 11min ago
   Main PID: 1008 (code=exited, status=0/SUCCESS)
        CPU: 16.180s

Jan 31 06:09:47 np0005603608.novalocal dracut[1289]: Linked:         0 files
Jan 31 06:09:47 np0005603608.novalocal dracut[1289]: Compared:       0 xattrs
Jan 31 06:09:47 np0005603608.novalocal dracut[1289]: Compared:       0 files
Jan 31 06:09:47 np0005603608.novalocal dracut[1289]: Saved:          0 B
Jan 31 06:09:47 np0005603608.novalocal dracut[1289]: Duration:       0.000291 seconds
Jan 31 06:09:47 np0005603608.novalocal dracut[1289]: *** Hardlinking files done ***
Jan 31 06:09:48 np0005603608.novalocal dracut[1289]: *** Creating initramfs image file '/boot/initramfs-5.14.0-665.el9.x86_64kdump.img' done ***
Jan 31 06:09:49 np0005603608.novalocal kdumpctl[1016]: kdump: kexec: loaded kdump kernel
Jan 31 06:09:49 np0005603608.novalocal kdumpctl[1016]: kdump: Starting kdump: [OK]
Jan 31 06:09:49 np0005603608.novalocal systemd[1]: Finished Crash recovery kernel arming.

● kmod-static-nodes.service - Create List of Static Device Nodes
     Loaded: loaded (/usr/lib/systemd/system/kmod-static-nodes.service; static)
     Active: active (exited) since Sat 2026-01-31 06:09:23 UTC; 3h 11min ago
   Main PID: 669 (code=exited, status=0/SUCCESS)
        CPU: 7ms

● ldconfig.service - Rebuild Dynamic Linker Cache
     Loaded: loaded (/usr/lib/systemd/system/ldconfig.service; static)
     Active: active (exited) since Sat 2026-01-31 06:09:24 UTC; 3h 11min ago
       Docs: man:ldconfig(8)
   Main PID: 692 (code=exited, status=0/SUCCESS)
        CPU: 58ms

Jan 31 06:09:23 localhost systemd[1]: Starting Rebuild Dynamic Linker Cache...
Jan 31 06:09:24 localhost systemd[1]: Finished Rebuild Dynamic Linker Cache.

○ libvirtd.service
     Loaded: masked (Reason: Unit libvirtd.service is masked.)
     Active: inactive (dead)
TriggeredBy: ○ libvirtd.socket
             ○ libvirtd-admin.socket
             ○ libvirtd-ro.socket

○ loadmodules.service - Load legacy module configuration
     Loaded: loaded (/usr/lib/systemd/system/loadmodules.service; enabled; preset: enabled)
     Active: inactive (dead)

○ logrotate.service - Rotate log files
     Loaded: loaded (/usr/lib/systemd/system/logrotate.service; static)
     Active: inactive (dead)
TriggeredBy: ● logrotate.timer
       Docs: man:logrotate(8)
             man:logrotate.conf(5)

○ lvm2-lvmpolld.service - LVM2 poll daemon
     Loaded: loaded (/usr/lib/systemd/system/lvm2-lvmpolld.service; static)
     Active: inactive (dead)
TriggeredBy: ● lvm2-lvmpolld.socket
       Docs: man:lvmpolld(8)

● lvm2-monitor.service - Monitoring of LVM2 mirrors, snapshots etc. using dmeventd or progress polling
     Loaded: loaded (/usr/lib/systemd/system/lvm2-monitor.service; enabled; preset: enabled)
     Active: active (exited) since Sat 2026-01-31 06:57:50 UTC; 2h 23min ago
       Docs: man:dmeventd(8)
             man:lvcreate(8)
             man:lvchange(8)
             man:vgchange(8)
   Main PID: 34177 (code=exited, status=0/SUCCESS)
        CPU: 26ms

Jan 31 06:57:50 compute-0 systemd[1]: Starting Monitoring of LVM2 mirrors, snapshots etc. using dmeventd or progress polling...
Jan 31 06:57:50 compute-0 systemd[1]: Finished Monitoring of LVM2 mirrors, snapshots etc. using dmeventd or progress polling.

○ mdmonitor.service - Software RAID monitoring and management
     Loaded: loaded (/usr/lib/systemd/system/mdmonitor.service; enabled; preset: enabled)
     Active: inactive (dead)

○ microcode.service - Load CPU microcode update
     Loaded: loaded (/usr/lib/systemd/system/microcode.service; enabled; preset: enabled)
     Active: inactive (dead)
  Condition: start condition failed at Sat 2026-01-31 06:09:25 UTC; 3h 11min ago

Jan 31 06:09:25 localhost systemd[1]: Load CPU microcode update was skipped because of an unmet condition check (ConditionPathExists=/sys/devices/system/cpu/microcode/reload).

○ modprobe@configfs.service - Load Kernel Module configfs
     Loaded: loaded (/usr/lib/systemd/system/modprobe@.service; static)
     Active: inactive (dead) since Sat 2026-01-31 06:09:24 UTC; 3h 11min ago
       Docs: man:modprobe(8)
   Main PID: 737 (code=exited, status=0/SUCCESS)
        CPU: 4ms

Jan 31 06:09:24 localhost systemd[1]: Starting Load Kernel Module configfs...
Jan 31 06:09:24 localhost systemd[1]: modprobe@configfs.service: Deactivated successfully.
Jan 31 06:09:24 localhost systemd[1]: Finished Load Kernel Module configfs.

○ modprobe@drm.service - Load Kernel Module drm
     Loaded: loaded (/usr/lib/systemd/system/modprobe@.service; static)
     Active: inactive (dead) since Sat 2026-01-31 06:09:23 UTC; 3h 11min ago
       Docs: man:modprobe(8)
   Main PID: 671 (code=exited, status=0/SUCCESS)
        CPU: 90ms

Jan 31 06:09:23 localhost systemd[1]: modprobe@drm.service: Deactivated successfully.
Jan 31 06:09:23 localhost systemd[1]: Finished Load Kernel Module drm.

○ modprobe@efi_pstore.service - Load Kernel Module efi_pstore
     Loaded: loaded (/usr/lib/systemd/system/modprobe@.service; static)
     Active: inactive (dead) since Sat 2026-01-31 06:09:23 UTC; 3h 11min ago
       Docs: man:modprobe(8)
   Main PID: 672 (code=exited, status=0/SUCCESS)
        CPU: 8ms

Jan 31 06:09:23 localhost systemd[1]: modprobe@efi_pstore.service: Deactivated successfully.
Jan 31 06:09:23 localhost systemd[1]: Finished Load Kernel Module efi_pstore.

○ modprobe@fuse.service - Load Kernel Module fuse
     Loaded: loaded (/usr/lib/systemd/system/modprobe@.service; static)
     Active: inactive (dead) since Sat 2026-01-31 06:09:23 UTC; 3h 11min ago
       Docs: man:modprobe(8)
   Main PID: 673 (code=exited, status=0/SUCCESS)
        CPU: 46ms

Jan 31 06:09:23 localhost systemd[1]: modprobe@fuse.service: Deactivated successfully.
Jan 31 06:09:23 localhost systemd[1]: Finished Load Kernel Module fuse.

● multipathd.service - Device-Mapper Multipath Device Controller
     Loaded: loaded (/usr/lib/systemd/system/multipathd.service; enabled; preset: enabled)
     Active: active (running) since Sat 2026-01-31 07:24:00 UTC; 1h 57min ago
TriggeredBy: ● multipathd.socket
   Main PID: 231500 (multipathd)
     Status: "up"
         IO: 0B read, 0B written
      Tasks: 7
     Memory: 18.4M (peak: 18.9M)
        CPU: 865ms
     CGroup: /system.slice/multipathd.service
             └─231500 /sbin/multipathd -d -s

Jan 31 07:24:00 compute-0 systemd[1]: Starting Device-Mapper Multipath Device Controller...
Jan 31 07:24:00 compute-0 multipathd[231500]: --------start up--------
Jan 31 07:24:00 compute-0 multipathd[231500]: read /etc/multipath.conf
Jan 31 07:24:00 compute-0 multipathd[231500]: path checkers start up
Jan 31 07:24:00 compute-0 systemd[1]: Started Device-Mapper Multipath Device Controller.

○ netns-placeholder.service - Create netns directory
     Loaded: loaded (/etc/systemd/system/netns-placeholder.service; enabled; preset: enabled)
     Active: inactive (dead) since Sat 2026-01-31 07:17:24 UTC; 2h 3min ago
   Main PID: 157011 (code=exited, status=0/SUCCESS)
        CPU: 12ms

Jan 31 07:17:24 compute-0 systemd[1]: Starting Create netns directory...
Jan 31 07:17:24 compute-0 systemd[1]: netns-placeholder.service: Deactivated successfully.
Jan 31 07:17:24 compute-0 systemd[1]: Finished Create netns directory.

○ network.service - LSB: Bring up/down networking
     Loaded: loaded (/etc/rc.d/init.d/network; generated)
     Active: inactive (dead)
       Docs: man:systemd-sysv-generator(8)

● NetworkManager-wait-online.service - Network Manager Wait Online
     Loaded: loaded (/usr/lib/systemd/system/NetworkManager-wait-online.service; enabled; preset: disabled)
     Active: active (exited) since Sat 2026-01-31 07:00:37 UTC; 2h 20min ago
       Docs: man:NetworkManager-wait-online.service(8)
   Main PID: 49122 (code=exited, status=0/SUCCESS)
        CPU: 14ms

Jan 31 07:00:37 compute-0 systemd[1]: Starting Network Manager Wait Online...
Jan 31 07:00:37 compute-0 systemd[1]: Finished Network Manager Wait Online.

● NetworkManager.service - Network Manager
     Loaded: loaded (/usr/lib/systemd/system/NetworkManager.service; enabled; preset: enabled)
    Drop-In: /usr/lib/systemd/system/NetworkManager.service.d
             └─NetworkManager-ovs.conf
     Active: active (running) since Sat 2026-01-31 07:00:37 UTC; 2h 20min ago
       Docs: man:NetworkManager(8)
   Main PID: 49108 (NetworkManager)
         IO: 104.0K read, 255.5K written
      Tasks: 3 (limit: 48560)
     Memory: 6.0M (peak: 6.6M)
        CPU: 1min 30.207s
     CGroup: /system.slice/NetworkManager.service
             └─49108 /usr/sbin/NetworkManager --no-daemon

Jan 31 09:16:39 compute-0 NetworkManager[49108]: <info>  [1769850999.4253] manager: (tap5c9ca540-50): new Open vSwitch Port device (/org/freedesktop/NetworkManager/Devices/406)
Jan 31 09:17:07 compute-0 NetworkManager[49108]: <info>  [1769851027.7349] device (tap9df79f25-f3): state change: disconnected -> unmanaged (reason 'unmanaged', managed-type: 'removed')
Jan 31 09:17:38 compute-0 NetworkManager[49108]: <info>  [1769851058.1481] manager: (tap9d925d3a-15): new Open vSwitch Port device (/org/freedesktop/NetworkManager/Devices/407)
Jan 31 09:17:42 compute-0 NetworkManager[49108]: <info>  [1769851062.9952] manager: (tap9d925d3a-15): new Tun device (/org/freedesktop/NetworkManager/Devices/408)
Jan 31 09:17:43 compute-0 NetworkManager[49108]: <info>  [1769851063.0575] device (tap9d925d3a-15): state change: unmanaged -> unavailable (reason 'connection-assumed', managed-type: 'external')
Jan 31 09:17:43 compute-0 NetworkManager[49108]: <info>  [1769851063.0587] device (tap9d925d3a-15): state change: unavailable -> disconnected (reason 'none', managed-type: 'external')
Jan 31 09:17:43 compute-0 NetworkManager[49108]: <info>  [1769851063.0971] manager: (tap5c9ca540-50): new Veth device (/org/freedesktop/NetworkManager/Devices/409)
Jan 31 09:17:43 compute-0 NetworkManager[49108]: <info>  [1769851063.1388] device (tap5c9ca540-50): carrier: link connected
Jan 31 09:17:43 compute-0 NetworkManager[49108]: <info>  [1769851063.2438] manager: (tap5c9ca540-50): new Open vSwitch Port device (/org/freedesktop/NetworkManager/Devices/410)
Jan 31 09:19:05 compute-0 NetworkManager[49108]: <info>  [1769851145.1352] device (tap9d925d3a-15): state change: disconnected -> unmanaged (reason 'unmanaged', managed-type: 'removed')

○ nfs-idmapd.service - NFSv4 ID-name mapping service
     Loaded: loaded (/usr/lib/systemd/system/nfs-idmapd.service; static)
     Active: inactive (dead)
       Docs: man:idmapd(8)

○ nfs-mountd.service - NFS Mount Daemon
     Loaded: loaded (/usr/lib/systemd/system/nfs-mountd.service; static)
     Active: inactive (dead)
       Docs: man:rpc.mountd(8)

○ nfs-server.service - NFS server and services
     Loaded: loaded (/usr/lib/systemd/system/nfs-server.service; disabled; preset: disabled)
     Active: inactive (dead)
       Docs: man:rpc.nfsd(8)
             man:exportfs(8)

○ nfs-utils.service - NFS serveUnit ntpd.service could not be found.
Unit ntpdate.service could not be found.
r and client services
     Loaded: loaded (/usr/lib/systemd/system/nfs-utils.service; static)
     Active: inactive (dead)

○ nfsdcld.service - NFSv4 Client Tracking Daemon
     Loaded: loaded (/usr/lib/systemd/system/nfsdcld.service; static)
     Active: inactive (dead)
       Docs: man:nfsdcld(8)

● nftables.service - Netfilter Tables
     Loaded: loaded (/usr/lib/systemd/system/nftables.service; enabled; preset: disabled)
     Active: active (exited) since Sat 2026-01-31 07:02:39 UTC; 2h 18min ago
       Docs: man:nft(8)
   Main PID: 63321 (code=exited, status=0/SUCCESS)
        CPU: 14ms

Jan 31 07:02:39 compute-0 systemd[1]: Starting Netfilter Tables...
Jan 31 07:02:39 compute-0 systemd[1]: Finished Netfilter Tables.

● nis-domainname.service - Read and set NIS domainname from /etc/sysconfig/network
     Loaded: loaded (/usr/lib/systemd/system/nis-domainname.service; enabled; preset: enabled)
     Active: active (exited) since Sat 2026-01-31 06:09:23 UTC; 3h 11min ago
   Main PID: 674 (code=exited, status=0/SUCCESS)
        CPU: 4ms

Jan 31 06:09:23 localhost systemd[1]: Finished Read and set NIS domainname from /etc/sysconfig/network.

○ nvmefc-boot-connections.service - Auto-connect to subsystems on FC-NVME devices found during boot
     Loaded: loaded (/usr/lib/systemd/system/nvmefc-boot-connections.service; enabled; preset: enabled)
     Active: inactive (dead)

● openvswitch.service - Open vSwitch
     Loaded: loaded (/usr/lib/systemd/system/openvswitch.service; enabled; preset: disabled)
     Active: active (exited) since Sat 2026-01-31 07:00:23 UTC; 2h 20min ago
   Main PID: 47413 (code=exited, status=0/SUCCESS)
        CPU: 2ms

Jan 31 07:00:23 compute-0 systemd[1]: Starting Open vSwitch...
Jan 31 07:00:23 compute-0 systemd[1]: Finished Open vSwitch.

● ovs-delete-transient-ports.service - Open vSwitch Delete Transient Ports
     Loaded: loaded (/usr/lib/systemd/system/ovs-delete-transient-ports.service; static)
     Active: active (exited) since Sat 2026-01-31 07:00:23 UTC; 2h 20min ago
   Main PID: 47351 (code=exited, status=0/SUCCESS)
        CPU: 21ms

Jan 31 07:00:23 compute-0 systemd[1]: Starting Open vSwitch Delete Transient Ports...
Jan 31 07:00:23 compute-0 systemd[1]: Finished Open vSwitch Delete Transient Ports.

● ovs-vswitchd.service - Open vSwitch Forwarding Unit
     Loaded: loaded (/usr/lib/systemd/system/ovs-vswitchd.service; static)
     Active: active (running) since Sat 2026-01-31 07:00:23 UTC; 2h 20min ago
   Main PID: 47404 (ovs-vswitchd)
         IO: 3.4M read, 1.4M written
      Tasks: 13 (limit: 48560)
     Memory: 247.4M (peak: 251.5M)
        CPU: 42.233s
     CGroup: /system.slice/ovs-vswitchd.service
             └─47404 ovs-vswitchd unix:/var/run/openvswitch/db.sock -vconsole:emer -vsyslog:err -vfile:info --mlockall --user openvswitch:hugetlbfs --no-chdir --log-file=/var/log/openvswitch/ovs-vswitchd.log --pidfile=/var/run/openvswitch/ovs-vswitchd.pid --detach

Jan 31 07:00:23 compute-0 systemd[1]: Starting Open vSwitch Forwarding Unit...
Jan 31 07:00:23 compute-0 ovs-ctl[47394]: Inserting openvswitch module [  OK  ]
Jan 31 07:00:23 compute-0 ovs-ctl[47363]: Starting ovs-vswitchd [  OK  ]
Jan 31 07:00:23 compute-0 ovs-ctl[47363]: Enabling remote OVSDB managers [  OK  ]
Jan 31 07:00:23 compute-0 ovs-vsctl[47412]: ovs|00001|vsctl|INFO|Called as ovs-vsctl --no-wait add Open_vSwitch . external-ids hostname=compute-0
Jan 31 07:00:23 compute-0 systemd[1]: Started Open vSwitch Forwarding Unit.

● ovsdb-server.service - Open vSwitch Database Unit
     Loaded: loaded (/usr/lib/systemd/system/ovsdb-server.service; static)
     Active: active (running) since Sat 2026-01-31 07:00:23 UTC; 2h 20min ago
   Main PID: 47323 (ovsdb-server)
         IO: 2.5M read, 2.4M written
      Tasks: 1 (limit: 48560)
     Memory: 6.9M (peak: 41.6M)
        CPU: 35.171s
     CGroup: /system.slice/ovsdb-server.service
             └─47323 ovsdb-server /etc/openvswitch/conf.db -vconsole:emer -vsyslog:err -vfile:info --remote=punix:/var/run/openvswitch/db.sock --private-key=db:Open_vSwitchUnit pacemaker.service could not be found.
Unit plymouth-quit-wait.service could not be found.
Unit plymouth-start.service could not be found.
Unit power-profiles-daemon.service could not be found.
,SSL,private_key --certificate=db:Open_vSwitch,SSL,certificate --bootstrap-ca-cert=db:Open_vSwitch,SSL,ca_cert --user openvswitch:hugetlbfs --no-chdir --log-file=/var/log/openvswitch/ovsdb-server.log --pidfile=/var/run/openvswitch/ovsdb-server.pid --detach

Jan 31 07:00:23 compute-0 chown[47270]: /usr/bin/chown: cannot access '/run/openvswitch': No such file or directory
Jan 31 07:00:23 compute-0 ovs-ctl[47275]: /etc/openvswitch/conf.db does not exist ... (warning).
Jan 31 07:00:23 compute-0 ovs-ctl[47275]: Creating empty database /etc/openvswitch/conf.db [  OK  ]
Jan 31 07:00:23 compute-0 ovs-ctl[47275]: Starting ovsdb-server [  OK  ]
Jan 31 07:00:23 compute-0 ovs-vsctl[47324]: ovs|00001|vsctl|INFO|Called as ovs-vsctl --no-wait -- init -- set Open_vSwitch . db-version=8.5.1
Jan 31 07:00:23 compute-0 ovs-vsctl[47340]: ovs|00001|vsctl|INFO|Called as ovs-vsctl --no-wait set Open_vSwitch . ovs-version=3.3.5-115.el9s "external-ids:system-id=\"5c307474-e9ec-4d19-9f52-463eb0ff26d1\"" "external-ids:rundir=\"/var/run/openvswitch\"" "system-type=\"centos\"" "system-version=\"9\""
Jan 31 07:00:23 compute-0 ovs-ctl[47275]: Configuring Open vSwitch system IDs [  OK  ]
Jan 31 07:00:23 compute-0 ovs-vsctl[47350]: ovs|00001|vsctl|INFO|Called as ovs-vsctl --no-wait add Open_vSwitch . external-ids hostname=compute-0
Jan 31 07:00:23 compute-0 ovs-ctl[47275]: Enabling remote OVSDB managers [  OK  ]
Jan 31 07:00:23 compute-0 systemd[1]: Started Open vSwitch Database Unit.

● polkit.service - Authorization Manager
     Loaded: loaded (/usr/lib/systemd/system/polkit.service; static)
     Active: active (running) since Sat 2026-01-31 06:59:34 UTC; 2h 21min ago
       Docs: man:polkit(8)
   Main PID: 43587 (polkitd)
         IO: 19.0M read, 0B written
      Tasks: 12 (limit: 48560)
     Memory: 24.9M (peak: 26.6M)
        CPU: 3.917s
     CGroup: /system.slice/polkit.service
             └─43587 /usr/lib/polkit-1/polkitd --no-debug

Jan 31 07:20:37 compute-0 polkitd[43587]: Collecting garbage unconditionally...
Jan 31 07:20:37 compute-0 polkitd[43587]: Loading rules from directory /etc/polkit-1/rules.d
Jan 31 07:20:37 compute-0 polkitd[43587]: Loading rules from directory /usr/share/polkit-1/rules.d
Jan 31 07:20:37 compute-0 polkitd[43587]: Finished loading, compiling and executing 3 rules
Jan 31 07:22:18 compute-0 polkitd[43587]: Registered Authentication Agent for unix-process:215740:438150 (system bus name :1.2915 [pkttyagent --process 215740 --notify-fd 4 --fallback], object path /org/freedesktop/PolicyKit1/AuthenticationAgent, locale en_US.UTF-8)
Jan 31 07:22:18 compute-0 polkitd[43587]: Unregistered Authentication Agent for unix-process:215740:438150 (system bus name :1.2915, object path /org/freedesktop/PolicyKit1/AuthenticationAgent, locale en_US.UTF-8) (disconnected from bus)
Jan 31 07:22:18 compute-0 polkitd[43587]: Registered Authentication Agent for unix-process:215739:438149 (system bus name :1.2916 [pkttyagent --process 215739 --notify-fd 5 --fallback], object path /org/freedesktop/PolicyKit1/AuthenticationAgent, locale en_US.UTF-8)
Jan 31 07:22:18 compute-0 polkitd[43587]: Unregistered Authentication Agent for unix-process:215739:438149 (system bus name :1.2916, object path /org/freedesktop/PolicyKit1/AuthenticationAgent, locale en_US.UTF-8) (disconnected from bus)
Jan 31 07:22:21 compute-0 polkitd[43587]: Registered Authentication Agent for unix-process:216208:438463 (system bus name :1.2919 [pkttyagent --process 216208 --notify-fd 4 --fallback], object path /org/freedesktop/PolicyKit1/AuthenticationAgent, locale en_US.UTF-8)
Jan 31 07:22:21 compute-0 polkitd[43587]: Unregistered Authentication Agent for unix-process:216208:438463 (system bus name :1.2919, object path /org/freedesktop/PolicyKit1/AuthenticationAgent, locale en_US.UTF-8) (disconnected from bus)

○ raid-check.service - RAID setup health check
     Loaded: loaded (/usr/lib/systemd/system/raid-check.service; static)
     Active: inactive (dead)
TriggeredBy: ○ raid-check.timer

○ rbdmap.service - Map RBD devices
     Loaded: loaded (/usr/lib/systemd/systemUnit rpc-svcgssd.service could not be found.
/rbdmap.service; disabled; preset: disabled)
     Active: inactive (dead)

○ rc-local.service - /etc/rc.d/rc.local Compatibility
     Loaded: loaded (/usr/lib/systemd/system/rc-local.service; static)
     Active: inactive (dead)
       Docs: man:systemd-rc-local-generator(8)

○ rescue.service - Rescue Shell
     Loaded: loaded (/usr/lib/systemd/system/rescue.service; static)
     Active: inactive (dead)
       Docs: man:sulogin(8)

○ rpc-gssd.service - RPC security service for NFS client and server
     Loaded: loaded (/usr/lib/systemd/system/rpc-gssd.service; static)
     Active: inactive (dead)
  Condition: start condition failed at Sat 2026-01-31 06:09:27 UTC; 3h 11min ago
       Docs: man:rpc.gssd(8)

Jan 31 06:09:27 np0005603608.novalocal systemd[1]: RPC security service for NFS client and server was skipped because of an unmet condition check (ConditionPathExists=/etc/krb5.keytab).

● rpc-statd-notify.service - Notify NFS peers of a restart
     Loaded: loaded (/usr/lib/systemd/system/rpc-statd-notify.service; static)
     Active: active (exited) since Sat 2026-01-31 06:09:34 UTC; 3h 11min ago
       Docs: man:sm-notify(8)
             man:rpc.statd(8)
        CPU: 9ms

Jan 31 06:09:34 np0005603608.novalocal systemd[1]: Starting Notify NFS peers of a restart...
Jan 31 06:09:34 np0005603608.novalocal sm-notify[1004]: Version 2.5.4 starting
Jan 31 06:09:34 np0005603608.novalocal systemd[1]: Started Notify NFS peers of a restart.

○ rpc-statd.service - NFS status monitor for NFSv2/3 locking.
     Loaded: loaded (/usr/lib/systemd/system/rpc-statd.service; static)
     Active: inactive (dead)
       Docs: man:rpc.statd(8)

● rpcbind.service - RPC Bind
     Loaded: loaded (/usr/lib/systemd/system/rpcbind.service; enabled; preset: enabled)
     Active: active (running) since Sat 2026-01-31 06:09:23 UTC; 3h 11min ago
TriggeredBy: ● rpcbind.socket
       Docs: man:rpcbind(8)
   Main PID: 697 (rpcbind)
         IO: 0B read, 0B written
      Tasks: 1 (limit: 48560)
     Memory: 3.0M (peak: 3.2M)
        CPU: 46ms
     CGroup: /system.slice/rpcbind.service
             └─697 /usr/bin/rpcbind -w -f

Jan 31 06:09:23 localhost systemd[1]: Starting RPC Bind...
Jan 31 06:09:23 localhost systemd[1]: Started RPC Bind.

● rsyslog.service - System Logging Service
     Loaded: loaded (/usr/lib/systemd/system/rsyslog.service; enabled; preset: enabled)
     Active: active (running) since Sat 2026-01-31 06:09:34 UTC; 3h 11min ago
       Docs: man:rsyslogd(8)
             https://www.rsyslog.com/doc/
   Main PID: 1005 (rsyslogd)
         IO: 4.0K read, 45.5M written
      Tasks: 3 (limit: 48560)
     Memory: 44.0M (peak: 44.5M)
        CPU: 27.894s
     CGroup: /system.slice/rsyslog.service
             └─1005 /usr/sbin/rsyslogd -n

Jan 31 08:30:52 compute-0 rsyslogd[1005]: imjournal: journal files changed, reloading...  [v8.2510.0-2.el9 try https://www.rsyslog.com/e/0 ]
Jan 31 08:43:59 compute-0 rsyslogd[1005]: imjournal: journal files changed, reloading...  [v8.2510.0-2.el9 try https://www.rsyslog.com/e/0 ]
Jan 31 08:43:59 compute-0 rsyslogd[1005]: imjournal: journal files changed, reloading...  [v8.2510.0-2.el9 try https://www.rsyslog.com/e/0 ]
Jan 31 08:57:10 compute-0 rsyslogd[1005]: imjournal: journal files changed, reloading...  [v8.2510.0-2.el9 try https://www.rsyslog.com/e/0 ]
Jan 31 08:57:10 compute-0 rsyslogd[1005]: imjournal: journal files changed, reloading...  [v8.2510.0-2.el9 try https://www.rsyslog.com/e/0 ]
Jan 31 09:12:50 compute-0 rsyslogd[1005]: imjournal: journal files changed, reloading...  [v8.2510.0-2.el9 try https://www.rsyslog.com/e/0 ]
Jan 31 09:12:50 compute-0 rsyslogd[1005]: imjournal: journal files changed, reloading...  [v8.2510.0-2.el9 try https://www.rsyslog.com/e/0 ]
Jan 31 09:15:30 compute-0 rsyslogd[1005]: imjournal: journal files changed, reloading...  [v8.2510.0-2.el9 try https://www.rsyslog.com/e/0 ]
Jan 31 09:15:30 compute-0 rsyslogd[1005]: imjournal: journal files changed, reloading...  [v8.2510.0-2.el9 try https://www.rsyslog.com/e/0 ]
Jan 31 09:20:47 compute-0 rsyslogd[1005]Unit sntp.service could not be found.
Unit sshd-keygen.service could not be found.
: imjournal: journal files changed, reloading...  [v8.2510.0-2.el9 try https://www.rsyslog.com/e/0 ]

○ selinux-autorelabel-mark.service - Mark the need to relabel after reboot
     Loaded: loaded (/usr/lib/systemd/system/selinux-autorelabel-mark.service; enabled; preset: enabled)
     Active: inactive (dead)
  Condition: start condition failed at Sat 2026-01-31 06:09:23 UTC; 3h 11min ago

Jan 31 06:09:23 localhost systemd[1]: Mark the need to relabel after reboot was skipped because of an unmet condition check (ConditionSecurity=!selinux).

● serial-getty@ttyS0.service - Serial Getty on ttyS0
     Loaded: loaded (/usr/lib/systemd/system/serial-getty@.service; enabled-runtime; preset: disabled)
     Active: active (running) since Sat 2026-01-31 06:09:34 UTC; 3h 11min ago
       Docs: man:agetty(8)
             man:systemd-getty-generator(8)
             http://0pointer.de/blog/projects/serial-console.html
   Main PID: 1011 (agetty)
         IO: 0B read, 0B written
      Tasks: 1 (limit: 48560)
     Memory: 284.0K (peak: 528.0K)
        CPU: 8ms
     CGroup: /system.slice/system-serial\x2dgetty.slice/serial-getty@ttyS0.service
             └─1011 /sbin/agetty -o "-p -- \\u" --keep-baud 115200,57600,38400,9600 - vt220

Jan 31 06:09:34 np0005603608.novalocal systemd[1]: Started Serial Getty on ttyS0.

○ sshd-keygen@ecdsa.service - OpenSSH ecdsa Server Key Generation
     Loaded: loaded (/usr/lib/systemd/system/sshd-keygen@.service; disabled; preset: disabled)
    Drop-In: /usr/lib/systemd/system/sshd-keygen@.service.d
             └─disable-sshd-keygen-if-cloud-init-active.conf
     Active: inactive (dead)
  Condition: start condition failed at Sat 2026-01-31 07:20:41 UTC; 2h 0min ago

Jan 31 06:09:25 localhost systemd[1]: OpenSSH ecdsa Server Key Generation was skipped because of an unmet condition check (ConditionPathExists=!/run/systemd/generator.early/multi-user.target.wants/cloud-init.target).
Jan 31 07:20:41 compute-0 systemd[1]: OpenSSH ecdsa Server Key Generation was skipped because of an unmet condition check (ConditionPathExists=!/run/systemd/generator.early/multi-user.target.wants/cloud-init.target).

○ sshd-keygen@ed25519.service - OpenSSH ed25519 Server Key Generation
     Loaded: loaded (/usr/lib/systemd/system/sshd-keygen@.service; disabled; preset: disabled)
    Drop-In: /usr/lib/systemd/system/sshd-keygen@.service.d
             └─disable-sshd-keygen-if-cloud-init-active.conf
     Active: inactive (dead)
  Condition: start condition failed at Sat 2026-01-31 07:20:41 UTC; 2h 0min ago

Jan 31 06:09:25 localhost systemd[1]: OpenSSH ed25519 Server Key Generation was skipped because of an unmet condition check (ConditionPathExists=!/run/systemd/generator.early/multi-user.target.wants/cloud-init.target).
Jan 31 07:20:41 compute-0 systemd[1]: OpenSSH ed25519 Server Key Generation was skipped because of an unmet condition check (ConditionPathExists=!/run/systemd/generator.early/multi-user.target.wants/cloud-init.target).

○ sshd-keygen@rsa.service - OpenSSH rsa Server Key Generation
     Loaded: loaded (/usr/lib/systemd/system/sshd-keygen@.service; disabled; preset: disabled)
    Drop-In: /usr/lib/systemd/system/sshd-keygen@.service.d
             └─disable-sshd-keygen-if-cloud-init-active.conf
     Active: inactive (dead)
  Condition: start condition failed at Sat 2026-01-31 07:20:41 UTC; 2h 0min ago

Jan 31 06:09:25 localhost systemd[1]: OpenSSH rsa Server Key Generation was skipped because of an unmet condition check (ConditionPathExists=!/run/systemd/generator.early/multi-user.target.wants/cloud-init.target).
Jan 31 07:20:41 compute-0 systemd[1]: OpenSSH rsa Server Key Generation was skipped because of an unmet condition check (ConditionPathExists=!/run/systemd/generator.early/multi-user.target.wants/cloud-init.target).

● sshd.service - OpenSSH server daemon
     Loaded: loaded (/usr/lib/systemd/system/sshd.service; enabled; preset: enabled)
     Active: active (running) since Sat 2026-01-31 07:20:41 UTC; 2h 0min ago
       Docs: man:sshd(8)
             man:sshd_config(5)
   Main PUnit syslog.service could not be found.
ID: 187503 (sshd)
         IO: 8.0K read, 524.0K written
      Tasks: 1 (limit: 48560)
     Memory: 3.5M (peak: 7.1M)
        CPU: 13.643s
     CGroup: /system.slice/sshd.service
             └─187503 "sshd: /usr/sbin/sshd -D [listener] 0 of 10-100 startups"

Jan 31 09:14:50 compute-0 sshd-session[421331]: Accepted publickey for zuul from 192.168.122.10 port 49084 ssh2: ECDSA SHA256:/XjW4njRnFkaMo3aYOSKPaOEQq6UYC1L631cF4V0Rd4
Jan 31 09:14:50 compute-0 sshd-session[421331]: pam_unix(sshd:session): session opened for user zuul(uid=1000) by zuul(uid=0)
Jan 31 09:16:31 compute-0 sshd-session[430825]: Accepted publickey for zuul from 192.168.122.10 port 40202 ssh2: ECDSA SHA256:/XjW4njRnFkaMo3aYOSKPaOEQq6UYC1L631cF4V0Rd4
Jan 31 09:16:31 compute-0 sshd-session[430825]: pam_unix(sshd:session): session opened for user zuul(uid=1000) by zuul(uid=0)
Jan 31 09:16:31 compute-0 sshd-session[430825]: pam_unix(sshd:session): session closed for user zuul
Jan 31 09:16:31 compute-0 sshd-session[430930]: Accepted publickey for zuul from 192.168.122.10 port 40214 ssh2: ECDSA SHA256:/XjW4njRnFkaMo3aYOSKPaOEQq6UYC1L631cF4V0Rd4
Jan 31 09:16:31 compute-0 sshd-session[430930]: pam_unix(sshd:session): session opened for user zuul(uid=1000) by zuul(uid=0)
Jan 31 09:16:31 compute-0 sshd-session[430930]: pam_unix(sshd:session): session closed for user zuul
Jan 31 09:20:34 compute-0 sshd-session[435980]: Accepted publickey for zuul from 192.168.122.10 port 60456 ssh2: ECDSA SHA256:/XjW4njRnFkaMo3aYOSKPaOEQq6UYC1L631cF4V0Rd4
Jan 31 09:20:34 compute-0 sshd-session[435980]: pam_unix(sshd:session): session opened for user zuul(uid=1000) by zuul(uid=0)

○ sssd-kcm.service - SSSD Kerberos Cache Manager
     Loaded: loaded (/usr/lib/systemd/system/sssd-kcm.service; indirect; preset: disabled)
     Active: inactive (dead)
TriggeredBy: ● sssd-kcm.socket
       Docs: man:sssd-kcm(5)

○ sssd.service - System Security Services Daemon
     Loaded: loaded (/usr/lib/systemd/system/sssd.service; enabled; preset: enabled)
     Active: inactive (dead)
  Condition: start condition failed at Sat 2026-01-31 06:09:25 UTC; 3h 11min ago

Jan 31 06:09:25 localhost systemd[1]: System Security Services Daemon was skipped because no trigger condition checks were met.

○ sysstat-collect.service - system activity accounting tool
     Loaded: loaded (/usr/lib/systemd/system/sysstat-collect.service; static)
     Active: inactive (dead)
TriggeredBy: ○ sysstat-collect.timer
       Docs: man:sa1(8)

○ sysstat-summary.service - Generate a daily summary of process accounting
     Loaded: loaded (/usr/lib/systemd/system/sysstat-summary.service; static)
     Active: inactive (dead)
TriggeredBy: ○ sysstat-summary.timer
       Docs: man:sa2(8)

○ sysstat.service - Resets System Activity Logs
     Loaded: loaded (/usr/lib/systemd/system/sysstat.service; enabled; preset: enabled)
     Active: inactive (dead)

○ systemd-ask-password-console.service - Dispatch Password Requests to Console
     Loaded: loaded (/usr/lib/systemd/system/systemd-ask-password-console.service; static)
     Active: inactive (dead)
TriggeredBy: ● systemd-ask-password-console.path
       Docs: man:systemd-ask-password-console.service(8)

○ systemd-ask-password-wall.service - Forward Password Requests to Wall
     Loaded: loaded (/usr/lib/systemd/system/systemd-ask-password-wall.service; static)
     Active: inactive (dead)
TriggeredBy: ● systemd-ask-password-wall.path
       Docs: man:systemd-ask-password-wall.service(8)

○ systemd-binfmt.service - Set Up Additional Binary Formats
     Loaded: loaded (/usr/lib/systemd/system/systemd-binfmt.service; static)
     Active: inactive (dead)
  Condition: start condition failed at Sat 2026-01-31 06:09:23 UTC; 3h 11min ago
       Docs: man:systemd-binfmt.service(8)
             man:binfmt.d(5)
             https://docs.kernel.org/admin-guide/binfmt-misc.html
             https://www.freedesktop.org/wiki/Software/systemd/APIFileSystems

Jan 31 06:09:23 localhost systemd[1]: Set Up Additional Binary Formats was skipped because no trigger condition checks were met.

○ systemd-boot-random-seed.service - Update Boot Loader Random Seed
     Loaded: loaded (/usr/lib/systemd/system/systemd-boot-random-seed.service; static)
     Active: inactive (dead)
  Condition: start condition failed at Sat 2026-01-31 06:09:23 UTC; 3h 11min ago
       Docs: man:systemd-boot-random-seed.service(8)
             man:random(4)

Jan 31 06:09:23 localhost systemd[1]: Update Boot Loader Random Seed was skipped because no trigger condition checks were met.

● systemd-boot-update.service - Automatic Boot Loader Update
     Loaded: loaded (/usr/lib/systemd/system/systemd-boot-update.service; enabled; preset: enabled)
     Active: active (exited) since Sat 2026-01-31 06:09:23 UTC; 3h 11min ago
       Docs: man:bootctl(1)
   Main PID: 693 (code=exited, status=0/SUCCESS)
        CPU: 9ms

Jan 31 06:09:23 localhost systemd[1]: Starting Automatic Boot Loader Update...
Jan 31 06:09:23 localhost bootctl[693]: Couldn't find EFI system partition, skipping.
Jan 31 06:09:23 localhost systemd[1]: Finished Automatic Boot Loader Update.

○ systemd-firstboot.service - First Boot Wizard
     Loaded: loaded (/usr/lib/systemd/system/systemd-firstboot.service; static)
     Active: inactive (dead)
  Condition: start condition failed at Sat 2026-01-31 06:09:23 UTC; 3h 11min ago
       Docs: man:systemd-firstboot(1)

Jan 31 06:09:23 localhost systemd[1]: First Boot Wizard was skipped because of an unmet condition check (ConditionFirstBoot=yes).

○ systemd-fsck-root.service - File System Check on Root Device
     Loaded: loaded (/usr/lib/systemd/system/systemd-fsck-root.service; static)
     Active: inactive (dead) since Sat 2026-01-31 06:09:23 UTC; 3h 11min ago
   Duration: 2.941s
       Docs: man:systemd-fsck-root.service(8)
   Main PID: 551 (code=exited, status=0/SUCCESS)
        CPU: 14ms

Jan 31 06:09:20 localhost systemd[1]: Starting File System Check on /dev/disk/by-uuid/822f14ea-6e7e-41df-b0d8-fbe282d9ded8...
Jan 31 06:09:20 localhost systemd-fsck[553]: /usr/sbin/fsck.xfs: XFS file system.
Jan 31 06:09:20 localhost systemd[1]: Finished File System Check on /dev/disk/by-uuid/822f14ea-6e7e-41df-b0d8-fbe282d9ded8.

● systemd-hostnamed.service - Hostname Service
     Loaded: loaded (/usr/lib/systemd/system/systemd-hostnamed.service; static)
    Drop-In: /usr/lib/systemd/system/systemd-hostnamed.service.d
             └─disable-privatedevices.conf
     Active: active (running) since Sat 2026-01-31 09:20:52 UTC; 22s ago
       Docs: man:systemd-hostnamed.service(8)
             man:hostname(5)
             man:machine-info(5)
             man:org.freedesktop.resolve1(5)
   Main PID: 438269 (systemd-hostnam)
         IO: 0B read, 0B written
      Tasks: 1 (limit: 48560)
     Memory: 2.7M (peak: 3.8M)
        CPU: 96ms
     CGroup: /system.slice/systemd-hostnamed.service
             └─438269 /usr/lib/systemd/systemd-hostnamed

Jan 31 09:20:51 compute-0 systemd[1]: Starting Hostname Service...
Jan 31 09:20:52 compute-0 systemd[1]: Started Hostname Service.

● systemd-hwdb-update.service - Rebuild Hardware Database
     Loaded: loaded (/usr/lib/systemd/system/systemd-hwdb-update.service; static)
     Active: active (exited) since Sat 2026-01-31 06:09:24 UTC; 3h 11min ago
       Docs: man:hwdb(7)
             man:systemd-hwdb(8)
   Main PID: 685 (code=exited, status=0/SUCCESS)
        CPU: 499ms

Jan 31 06:09:23 localhost systemd[1]: Starting Rebuild Hardware Database...
Jan 31 06:09:24 localhost systemd[1]: Finished Rebuild Hardware Database.

○ systemd-initctl.service - initctl Compatibility Daemon
     Loaded: loaded (/usr/lib/systemd/system/systemd-initctl.service; static)
     Active: inactive (dead)
TriggeredBy: ● systemd-initctl.socket
       Docs: man:systemd-initctl.service(8)

● systemd-journal-catalog-update.service - Rebuild Journal Catalog
     Loaded: loaded (/usr/lib/systemd/system/systemd-journal-catalog-update.service; static)
     Active: active (exited) since Sat 2026-01-31 06:09:23 UTC; 3h 11min ago
       Docs: man:systemd-journald.service(8)
             man:journald.conf(5)
   Main PID: 698 (code=exited, status=0/SUCCESS)
        CPU: 16ms

Jan 31 06:09:23 localhost systemd[1]: Starting Rebuild Journal Catalog...
Jan 31 06:09:23 localhost systemd[1]: Finished Rebuild Journal Catalog.

● systemd-journal-flush.service - Flush Journal to Persistent Storage
     Loaded: loaded (/usr/lib/systemd/system/systemd-journal-flush.service; static)
     Active: active (exited) since Sat 2026-01-31 06:09:23 UTC; 3h 11min ago
       Docs: man:systemd-journald.service(8)
             man:journald.conf(5)
   Main PID: 686 (code=exited, status=0/SUCCESS)
        CPU: 13ms

Jan 31 06:09:23 localhost systemd[1]: Starting Flush Journal to Persistent Storage...
Jan 31 06:09:23 localhost systemd[1]: Finished Flush Journal to Persistent Storage.

● systemd-journald.service - Journal Service
     Loaded: loaded (/usr/lib/systemd/system/systemd-journald.service; static)
     Active: active (running) since Sat 2026-01-31 06:09:23 UTC; 3h 11min ago
TriggeredBy: ● systemd-journald.socket
             ● systemd-journald-dev-log.socket
       Docs: man:systemd-journald.service(8)
             man:journald.conf(5)
   Main PID: 675 (systemd-journal)
     Status: "Processing requests..."
         IO: 0B read, 0B written
      Tasks: 1 (limit: 48560)
     Memory: 150.4M (peak: 158.0M)
        CPU: 35.473s
     CGroup: /system.slice/systemd-journald.service
             └─675 /usr/lib/systemd/systemd-journald

Jan 31 06:09:23 localhost systemd-journald[675]: Journal started
Jan 31 06:09:23 localhost systemd-journald[675]: Runtime Journal (/run/log/journal/bf0bc0bb03de29b24cba1cc9599cf5d0) is 8.0M, max 153.6M, 145.6M free.
Jan 31 06:09:23 localhost systemd[1]: systemd-journald.service: Deactivated successfully.
Jan 31 06:09:23 localhost systemd-journald[675]: Runtime Journal (/run/log/journal/bf0bc0bb03de29b24cba1cc9599cf5d0) is 8.0M, max 153.6M, 145.6M free.
Jan 31 06:09:23 localhost systemd-journald[675]: Received client request to flush runtime journal.

● systemd-logind.service - User Login Management
     Loaded: loaded (/usr/lib/systemd/system/systemd-logind.service; static)
    Drop-In: /usr/lib/systemd/system/systemd-logind.service.d
             └─10-grub2-logind-service.conf
     Active: active (running) since Sat 2026-01-31 06:09:25 UTC; 3h 11min ago
       Docs: man:sd-login(3)
             man:systemd-logind.service(8)
             man:logind.conf(5)
             man:org.freedesktop.login1(5)
   Main PID: 816 (systemd-logind)
     Status: "Processing requests..."
         IO: 4.0K read, 0B written
      Tasks: 1 (limit: 48560)
     Memory: 5.8M (peak: 9.3M)
        CPU: 5.655s
     CGroup: /system.slice/systemd-logind.service
             └─816 /usr/lib/systemd/systemd-logind

Jan 31 09:14:50 compute-0 systemd-logind[816]: New session 64 of user zuul.
Jan 31 09:16:30 compute-0 systemd-logind[816]: Session 64 logged out. Waiting for processes to exit.
Jan 31 09:16:30 compute-0 systemd-logind[816]: Removed session 64.
Jan 31 09:16:31 compute-0 systemd-logind[816]: New session 65 of user zuul.
Jan 31 09:16:31 compute-0 systemd-logind[816]: Session 65 logged out. Waiting for processes to exit.
Jan 31 09:16:31 compute-0 systemd-logind[816]: Removed session 65.
Jan 31 09:16:31 compute-0 systemd-logind[816]: New session 66 of user zuul.
Jan 31 09:16:31 compute-0 systemd-logind[816]: Session 66 logged out. Waiting for processes to exit.
Jan 31 09:16:31 compute-0 systemd-logind[816]: Removed session 66.
Jan 31 09:20:34 compute-0 systemd-logind[816]: New session 67 of user zuul.

○ systemd-machine-id-commit.service - Commit a transient machine-id on disk
     Loaded: loaded (/usr/lib/systemd/system/systemd-machine-id-commit.service; static)
     Active: inactive (dead)
  Condition: start condition failed at Sat 2026-01-31 06:09:23 UTC; 3h 11min ago
       Docs: man:systemd-machine-id-commit.service(8)

Jan 31 06:09:23 localhost systemd[1]: Commit a transient machine-id on disk was skipped because of an unmet condition check (ConditionPathIsMountPoint=/etc/machine-id).

● systemd-machined.service -Unit systemd-networkd-wait-online.service could not be found.
 Virtual Machine and Container Registration Service
     Loaded: loaded (/usr/lib/systemd/system/systemd-machined.service; static)
     Active: active (running) since Sat 2026-01-31 07:22:11 UTC; 1h 59min ago
       Docs: man:systemd-machined.service(8)
             man:org.freedesktop.machine1(5)
   Main PID: 214448 (systemd-machine)
     Status: "Processing requests..."
         IO: 0B read, 0B written
      Tasks: 1 (limit: 48560)
     Memory: 1.5M (peak: 2.0M)
        CPU: 4.799s
     CGroup: /system.slice/systemd-machined.service
             └─214448 /usr/lib/systemd/systemd-machined

Jan 31 09:00:24 compute-0 systemd-machined[214448]: New machine qemu-92-instance-000000d2.
Jan 31 09:00:32 compute-0 systemd-machined[214448]: Machine qemu-92-instance-000000d2 terminated.
Jan 31 09:12:49 compute-0 systemd-machined[214448]: New machine qemu-93-instance-000000db.
Jan 31 09:12:55 compute-0 systemd-machined[214448]: Machine qemu-93-instance-000000db terminated.
Jan 31 09:13:44 compute-0 systemd-machined[214448]: New machine qemu-94-instance-000000dc.
Jan 31 09:15:43 compute-0 systemd-machined[214448]: Machine qemu-94-instance-000000dc terminated.
Jan 31 09:16:39 compute-0 systemd-machined[214448]: New machine qemu-95-instance-000000de.
Jan 31 09:17:07 compute-0 systemd-machined[214448]: Machine qemu-95-instance-000000de terminated.
Jan 31 09:17:43 compute-0 systemd-machined[214448]: New machine qemu-96-instance-000000df.
Jan 31 09:19:05 compute-0 systemd-machined[214448]: Machine qemu-96-instance-000000df terminated.

● systemd-modules-load.service - Load Kernel Modules
     Loaded: loaded (/usr/lib/systemd/system/systemd-modules-load.service; static)
     Active: active (exited) since Sat 2026-01-31 07:23:51 UTC; 1h 57min ago
       Docs: man:systemd-modules-load.service(8)
             man:modules-load.d(5)
   Main PID: 229711 (code=exited, status=0/SUCCESS)
        CPU: 8ms

Jan 31 07:23:51 compute-0 systemd[1]: Starting Load Kernel Modules...
Jan 31 07:23:51 compute-0 systemd[1]: Finished Load Kernel Modules.

● systemd-network-generator.service - Generate network units from Kernel command line
     Loaded: loaded (/usr/lib/systemd/system/systemd-network-generator.service; enabled; preset: enabled)
     Active: active (exited) since Sat 2026-01-31 06:09:23 UTC; 3h 11min ago
       Docs: man:systemd-network-generator.service(8)
   Main PID: 676 (code=exited, status=0/SUCCESS)
        CPU: 9ms

Jan 31 06:09:23 localhost systemd[1]: Finished Generate network units from Kernel command line.

○ systemd-pcrmachine.service - TPM2 PCR Machine ID Measurement
     Loaded: loaded (/usr/lib/systemd/system/systemd-pcrmachine.service; static)
     Active: inactive (dead)
  Condition: start condition failed at Sat 2026-01-31 06:09:23 UTC; 3h 11min ago
       Docs: man:systemd-pcrmachine.service(8)

○ systemd-pcrphase-initrd.service - TPM2 PCR Barrier (initrd)
     Loaded: loaded (/usr/lib/systemd/system/systemd-pcrphase-initrd.service; static)
     Active: inactive (dead)
       Docs: man:systemd-pcrphase-initrd.service(8)

○ systemd-pcrphase-sysinit.service - TPM2 PCR Barrier (Initialization)
     Loaded: loaded (/usr/lib/systemd/system/systemd-pcrphase-sysinit.service; static)
     Active: inactive (dead)
  Condition: start condition failed at Sat 2026-01-31 06:09:25 UTC; 3h 11min ago
       Docs: man:systemd-pcrphase-sysinit.service(8)

Jan 31 06:09:25 localhost systemd[1]: TPM2 PCR Barrier (Initialization) was skipped because of an unmet condition check (ConditionPathExists=/sys/firmware/efi/efivars/StubPcrKernelImage-4a67b082-0a4c-41cf-b6c7-440b29bb8c4f).

○ systemd-pcrphase.service - TPM2 PCR Barrier (User)
     Loaded: loaded (/usr/lib/systemd/system/systemd-pcrphase.service; static)
     Active: inactive (dead)
  Condition: start condition failed at Sat 2026-01-31 06:09:27 UTC; 3h 11min ago
       Docs: man:systemd-pcrphase.service(8)

Jan 31 06:09:27 np0005603608.novalocal systemd[1]: TPM2 PCR Barrier (User) was skipped because of an unmet condition check (ConditionPathExists=/sys/firmware/efi/efivars/StubPUnit systemd-timesyncd.service could not be found.
crKernelImage-4a67b082-0a4c-41cf-b6c7-440b29bb8c4f).

○ systemd-pstore.service - Platform Persistent Storage Archival
     Loaded: loaded (/usr/lib/systemd/system/systemd-pstore.service; enabled; preset: enabled)
     Active: inactive (dead)
  Condition: start condition failed at Sat 2026-01-31 06:09:23 UTC; 3h 11min ago
       Docs: man:systemd-pstore(8)

Jan 31 06:09:23 localhost systemd[1]: Platform Persistent Storage Archival was skipped because of an unmet condition check (ConditionDirectoryNotEmpty=/sys/fs/pstore).

● systemd-random-seed.service - Load/Save OS Random Seed
     Loaded: loaded (/usr/lib/systemd/system/systemd-random-seed.service; static)
     Active: active (exited) since Sat 2026-01-31 06:09:23 UTC; 3h 11min ago
       Docs: man:systemd-random-seed.service(8)
             man:random(4)
   Main PID: 687 (code=exited, status=0/SUCCESS)
        CPU: 7ms

Jan 31 06:09:23 localhost systemd[1]: Starting Load/Save OS Random Seed...
Jan 31 06:09:23 localhost systemd[1]: Finished Load/Save OS Random Seed.

● systemd-remount-fs.service - Remount Root and Kernel File Systems
     Loaded: loaded (/usr/lib/systemd/system/systemd-remount-fs.service; enabled-runtime; preset: disabled)
     Active: active (exited) since Sat 2026-01-31 06:09:23 UTC; 3h 11min ago
       Docs: man:systemd-remount-fs.service(8)
             https://www.freedesktop.org/wiki/Software/systemd/APIFileSystems
   Main PID: 677 (code=exited, status=0/SUCCESS)
        CPU: 17ms

Jan 31 06:09:23 localhost systemd[1]: Finished Remount Root and Kernel File Systems.

○ systemd-repart.service - Repartition Root Disk
     Loaded: loaded (/usr/lib/systemd/system/systemd-repart.service; static)
     Active: inactive (dead)
  Condition: start condition failed at Sat 2026-01-31 06:09:23 UTC; 3h 11min ago
       Docs: man:systemd-repart.service(8)

○ systemd-rfkill.service - Load/Save RF Kill Switch Status
     Loaded: loaded (/usr/lib/systemd/system/systemd-rfkill.service; static)
     Active: inactive (dead)
TriggeredBy: ● systemd-rfkill.socket
       Docs: man:systemd-rfkill.service(8)

● systemd-sysctl.service - Apply Kernel Variables
     Loaded: loaded (/usr/lib/systemd/system/systemd-sysctl.service; static)
     Active: active (exited) since Sat 2026-01-31 06:59:47 UTC; 2h 21min ago
       Docs: man:systemd-sysctl.service(8)
             man:sysctl.d(5)
   Main PID: 45073 (code=exited, status=0/SUCCESS)
        CPU: 13ms

Jan 31 06:59:47 compute-0 systemd[1]: Starting Apply Kernel Variables...
Jan 31 06:59:47 compute-0 systemd[1]: Finished Apply Kernel Variables.

○ systemd-sysext.service - Merge System Extension Images into /usr/ and /opt/
     Loaded: loaded (/usr/lib/systemd/system/systemd-sysext.service; disabled; preset: disabled)
     Active: inactive (dead)
       Docs: man:systemd-sysext.service(8)

● systemd-sysusers.service - Create System Users
     Loaded: loaded (/usr/lib/systemd/system/systemd-sysusers.service; static)
     Active: active (exited) since Sat 2026-01-31 06:09:23 UTC; 3h 11min ago
       Docs: man:sysusers.d(5)
             man:systemd-sysusers.service(8)
   Main PID: 688 (code=exited, status=0/SUCCESS)
        CPU: 18ms

Jan 31 06:09:23 localhost systemd[1]: Starting Create System Users...
Jan 31 06:09:23 localhost systemd[1]: Finished Create System Users.

○ systemd-tmpfiles-clean.service - Cleanup of Temporary Directories
     Loaded: loaded (/usr/lib/systemd/system/systemd-tmpfiles-clean.service; static)
     Active: inactive (dead) since Sat 2026-01-31 06:24:48 UTC; 2h 56min ago
TriggeredBy: ● systemd-tmpfiles-clean.timer
       Docs: man:tmpfiles.d(5)
             man:systemd-tmpfiles(8)
   Main PID: 7481 (code=exited, status=0/SUCCESS)
        CPU: 34ms

Jan 31 06:24:48 np0005603608.novalocal systemd[1]: Starting Cleanup of Temporary Directories...
Jan 31 06:24:48 np0005603608.novalocal systemd[1]: systemd-tmpfiles-clean.service: Deactivated successfully.
Jan 31 06:24:48 np0005603608.novalocal systemd[1]: Finished Cleanup of Temporary Directories.

● systemd-tmpfiles-setup-dev.service - CrUnit systemd-tmpfiles.service could not be found.
eate Static Device Nodes in /dev
     Loaded: loaded (/usr/lib/systemd/system/systemd-tmpfiles-setup-dev.service; static)
     Active: active (exited) since Sat 2026-01-31 06:09:23 UTC; 3h 11min ago
       Docs: man:tmpfiles.d(5)
             man:systemd-tmpfiles(8)
   Main PID: 690 (code=exited, status=0/SUCCESS)
        CPU: 32ms

Jan 31 06:09:23 localhost systemd[1]: Starting Create Static Device Nodes in /dev...
Jan 31 06:09:23 localhost systemd[1]: Finished Create Static Device Nodes in /dev.

● systemd-tmpfiles-setup.service - Create Volatile Files and Directories
     Loaded: loaded (/usr/lib/systemd/system/systemd-tmpfiles-setup.service; static)
     Active: active (exited) since Sat 2026-01-31 06:09:23 UTC; 3h 11min ago
       Docs: man:tmpfiles.d(5)
             man:systemd-tmpfiles(8)
   Main PID: 694 (code=exited, status=0/SUCCESS)
        CPU: 67ms

Jan 31 06:09:23 localhost systemd[1]: Starting Create Volatile Files and Directories...
Jan 31 06:09:23 localhost systemd[1]: Finished Create Volatile Files and Directories.

● systemd-udev-settle.service - Wait for udev To Complete Device Initialization
     Loaded: loaded (/usr/lib/systemd/system/systemd-udev-settle.service; static)
     Active: active (exited) since Sat 2026-01-31 07:23:46 UTC; 1h 57min ago
       Docs: man:systemd-udev-settle.service(8)
   Main PID: 228798 (code=exited, status=0/SUCCESS)
        CPU: 12ms

Jan 31 07:23:46 compute-0 systemd[1]: Starting Wait for udev To Complete Device Initialization...
Jan 31 07:23:46 compute-0 udevadm[228798]: systemd-udev-settle.service is deprecated. Please fix multipathd.service not to pull it in.
Jan 31 07:23:46 compute-0 systemd[1]: Finished Wait for udev To Complete Device Initialization.

● systemd-udev-trigger.service - Coldplug All udev Devices
     Loaded: loaded (/usr/lib/systemd/system/systemd-udev-trigger.service; static)
    Drop-In: /usr/lib/systemd/system/systemd-udev-trigger.service.d
             └─systemd-udev-trigger-no-reload.conf
     Active: active (exited) since Sat 2026-01-31 06:09:23 UTC; 3h 11min ago
       Docs: man:udev(7)
             man:systemd-udevd.service(8)
   Main PID: 680 (code=exited, status=0/SUCCESS)
        CPU: 74ms

Jan 31 06:09:23 localhost systemd[1]: Finished Coldplug All udev Devices.

● systemd-udevd.service - Rule-based Manager for Device Events and Files
     Loaded: loaded (/usr/lib/systemd/system/systemd-udevd.service; static)
     Active: active (running) since Sat 2026-01-31 06:09:24 UTC; 3h 11min ago
TriggeredBy: ● systemd-udevd-kernel.socket
             ● systemd-udevd-control.socket
       Docs: man:systemd-udevd.service(8)
             man:udev(7)
   Main PID: 727 (systemd-udevd)
     Status: "Processing with 32 children at max"
         IO: 58.9M read, 23.8M written
      Tasks: 1
     Memory: 37.7M (peak: 88.8M)
        CPU: 20.709s
     CGroup: /system.slice/systemd-udevd.service
             └─udev
               └─727 /usr/lib/systemd/systemd-udevd

Jan 31 09:12:49 compute-0 systemd-udevd[418926]: Network interface NamePolicy= disabled on kernel command line.
Jan 31 09:13:44 compute-0 systemd-udevd[420012]: Network interface NamePolicy= disabled on kernel command line.
Jan 31 09:13:44 compute-0 systemd-udevd[420017]: Network interface NamePolicy= disabled on kernel command line.
Jan 31 09:14:58 compute-0 lvm[422028]: PV /dev/loop3 online, VG ceph_vg0 is complete.
Jan 31 09:14:58 compute-0 lvm[422028]: VG ceph_vg0 finished
Jan 31 09:16:39 compute-0 systemd-udevd[431120]: Network interface NamePolicy= disabled on kernel command line.
Jan 31 09:17:43 compute-0 systemd-udevd[433111]: Network interface NamePolicy= disabled on kernel command line.
Jan 31 09:17:43 compute-0 systemd-udevd[433113]: Network interface NamePolicy= disabled on kernel command line.
Jan 31 09:20:41 compute-0 lvm[436688]: PV /dev/loop3 online, VG ceph_vg0 is complete.
Jan 31 09:20:41 compute-0 lvm[436688]: VG ceph_vg0 finished

● systemd-update-done.service - Update is Completed
     Loaded: loaded (/usr/lib/systemd/system/systemd-update-done.service;Unit tlp.service could not be found.
 static)
     Active: active (exited) since Sat 2026-01-31 06:09:24 UTC; 3h 11min ago
       Docs: man:systemd-update-done.service(8)
   Main PID: 807 (code=exited, status=0/SUCCESS)
        CPU: 15ms

Jan 31 06:09:24 localhost systemd[1]: Starting Update is Completed...
Jan 31 06:09:24 localhost systemd[1]: Finished Update is Completed.

○ systemd-update-utmp-runlevel.service - Record Runlevel Change in UTMP
     Loaded: loaded (/usr/lib/systemd/system/systemd-update-utmp-runlevel.service; static)
     Active: inactive (dead) since Sat 2026-01-31 06:09:34 UTC; 3h 11min ago
       Docs: man:systemd-update-utmp-runlevel.service(8)
             man:utmp(5)
   Main PID: 1035 (code=exited, status=0/SUCCESS)
        CPU: 6ms

Jan 31 06:09:34 np0005603608.novalocal systemd[1]: Starting Record Runlevel Change in UTMP...
Jan 31 06:09:34 np0005603608.novalocal systemd[1]: systemd-update-utmp-runlevel.service: Deactivated successfully.
Jan 31 06:09:34 np0005603608.novalocal systemd[1]: Finished Record Runlevel Change in UTMP.

● systemd-update-utmp.service - Record System Boot/Shutdown in UTMP
     Loaded: loaded (/usr/lib/systemd/system/systemd-update-utmp.service; static)
     Active: active (exited) since Sat 2026-01-31 06:09:24 UTC; 3h 11min ago
       Docs: man:systemd-update-utmp.service(8)
             man:utmp(5)
   Main PID: 726 (code=exited, status=0/SUCCESS)
        CPU: 8ms

Jan 31 06:09:24 localhost systemd[1]: Starting Record System Boot/Shutdown in UTMP...
Jan 31 06:09:24 localhost systemd[1]: Finished Record System Boot/Shutdown in UTMP.

● systemd-user-sessions.service - Permit User Sessions
     Loaded: loaded (/usr/lib/systemd/system/systemd-user-sessions.service; static)
     Active: active (exited) since Sat 2026-01-31 06:09:34 UTC; 3h 11min ago
       Docs: man:systemd-user-sessions.service(8)
   Main PID: 1007 (code=exited, status=0/SUCCESS)
        CPU: 9ms

Jan 31 06:09:34 np0005603608.novalocal systemd[1]: Starting Permit User Sessions...
Jan 31 06:09:34 np0005603608.novalocal systemd[1]: Finished Permit User Sessions.

○ systemd-vconsole-setup.service - Setup Virtual Console
     Loaded: loaded (/usr/lib/systemd/system/systemd-vconsole-setup.service; static)
     Active: inactive (dead) since Sat 2026-01-31 06:09:21 UTC; 3h 11min ago
   Duration: 2.578s
       Docs: man:systemd-vconsole-setup.service(8)
             man:vconsole.conf(5)
   Main PID: 308 (code=exited, status=0/SUCCESS)
        CPU: 232ms

Jan 31 06:09:19 localhost systemd[1]: Finished Setup Virtual Console.
Jan 31 06:09:21 localhost systemd[1]: systemd-vconsole-setup.service: Deactivated successfully.
Jan 31 06:09:21 localhost systemd[1]: Stopped Setup Virtual Console.
Notice: journal has been rotated since unit was started, output may be incomplete.

● tuned.service - Dynamic System Tuning Daemon
     Loaded: loaded (/usr/lib/systemd/system/tuned.service; enabled; preset: enabled)
     Active: active (running) since Sat 2026-01-31 07:11:33 UTC; 2h 9min ago
       Docs: man:tuned(8)
             man:tuned.conf(5)
             man:tuned-adm(8)
   Main PID: 108365 (tuned)
         IO: 28.0K read, 0B written
      Tasks: 4 (limit: 48560)
     Memory: 14.0M (peak: 15.9M)
        CPU: 2.593s
     CGroup: /system.slice/tuned.service
             └─108365 /usr/bin/python3 -Es /usr/sbin/tuned -l -P

Jan 31 07:11:32 compute-0 systemd[1]: Starting Dynamic System Tuning Daemon...
Jan 31 07:11:33 compute-0 systemd[1]: Started Dynamic System Tuning Daemon.

○ unbound-anchor.service - update of the root trust anchor for DNSSEC validation in unbound
     Loaded: loaded (/usr/lib/systemd/system/unbound-anchor.service; static)
     Active: inactive (dead)
TriggeredBy: ● unbound-anchor.timer
       Docs: man:unbound-anchor(8)

● user-runtime-dir@1000.service - User Runtime Directory /run/user/1000
     Loaded: loaded (/usr/lib/systemd/system/user-runtime-dir@.service; static)
     Active: active (exited) since Sat 2026-01-31 06:09:53 UTC; 3h 11min ago
       Docs: man:user@.service(5)
   Main PID: 4307 (code=exited, status=0/SUCCESS)
        CPU: 9ms

Jan 31 06:09:53 np0005603608.novalocal systemd[1]: Starting User Runtime Directory /run/user/1000...
Jan 31 06:09:53 np0005603608.novalocal systemd[1]: Finished User Runtime Directory /run/user/1000.

● user-runtime-dir@42477.service - User Runtime Directory /run/user/42477
     Loaded: loaded (/usr/lib/systemd/system/user-runtime-dir@.service; static)
     Active: active (exited) since Sat 2026-01-31 07:05:25 UTC; 2h 15min ago
       Docs: man:user@.service(5)
   Main PID: 76129 (code=exited, status=0/SUCCESS)
        CPU: 13ms

Jan 31 07:05:25 compute-0 systemd[1]: Starting User Runtime Directory /run/user/42477...
Jan 31 07:05:25 compute-0 systemd[1]: Finished User Runtime Directory /run/user/42477.

● user@1000.service - User Manager for UID 1000
     Loaded: loaded (/usr/lib/systemd/system/user@.service; static)
    Drop-In: /usr/lib/systemd/system/user@.service.d
             └─10-login-barrier.conf
     Active: active (running) since Sat 2026-01-31 06:09:53 UTC; 3h 11min ago
       Docs: man:user@.service(5)
   Main PID: 4308 (systemd)
     Status: "Ready."
         IO: 676.0K read, 8.0K written
      Tasks: 5
     Memory: 9.6M (peak: 15.9M)
        CPU: 8.749s
     CGroup: /user.slice/user-1000.slice/user@1000.service
             ├─app.slice
             │ └─dbus-broker.service
             │   ├─12265 /usr/bin/dbus-broker-launch --scope user
             │   └─12293 dbus-broker --log 4 --controller 9 --machine-id bf0bc0bb03de29b24cba1cc9599cf5d0 --max-bytes 100000000000000 --max-fds 25000000000000 --max-matches 5000000000
             ├─init.scope
             │ ├─4308 /usr/lib/systemd/systemd --user
             │ └─4310 "(sd-pam)"
             └─user.slice
               └─podman-pause-03c69239.scope
                 └─12088 catatonit -P

Jan 31 06:28:53 np0005603608.novalocal dbus-broker-launch[12265]: Policy to allow eavesdropping in /usr/share/dbus-1/session.conf +31: Eavesdropping is deprecated and ignored
Jan 31 06:28:53 np0005603608.novalocal dbus-broker-launch[12265]: Policy to allow eavesdropping in /usr/share/dbus-1/session.conf +33: Eavesdropping is deprecated and ignored
Jan 31 06:28:53 np0005603608.novalocal systemd[4308]: Started D-Bus User Message Bus.
Jan 31 06:28:53 np0005603608.novalocal dbus-broker-lau[12265]: Ready
Jan 31 06:28:53 np0005603608.novalocal systemd[4308]: selinux: avc:  op=load_policy lsm=selinux seqno=4 res=1
Jan 31 06:28:53 np0005603608.novalocal systemd[4308]: Created slice Slice /user.
Jan 31 06:28:53 np0005603608.novalocal systemd[4308]: podman-12073.scope: unit configures an IP firewall, but not running as root.
Jan 31 06:28:53 np0005603608.novalocal systemd[4308]: (This warning is only shown for the first unit using IP firewalling.)
Jan 31 06:28:53 np0005603608.novalocal systemd[4308]: Started podman-12073.scope.
Jan 31 06:28:54 np0005603608.novalocal systemd[4308]: Started podman-pause-03c69239.scope.

● user@42477.service - User Manager for UID 42477
     Loaded: loaded (/usr/lib/systemd/system/user@.service; static)
    Drop-In: /usr/lib/systemd/system/user@.service.d
             └─10-login-barrier.conf
     Active: active (running) since Sat 2026-01-31 07:05:25 UTC; 2h 15min ago
       Docs: man:user@.service(5)
   Main PID: 76130 (systemd)
     Status: "Ready."
         IO: 0B read, 0B written
      Tasks: 2
     Memory: 7.5M (peak: 11.2M)
        CPU: 6.782s
     CGroup: /user.slice/user-42477.slice/user@42477.service
             └─init.scope
               ├─76130 /usr/lib/systemd/systemd --user
               └─76132 "(sd-pam)"

Jan 31 07:05:25 compute-0 systemd[76130]: Reached target Sockets.
Jan 31 07:05:25 compute-0 systemd[76130]: Reached target Basic System.
Jan 31 07:05:25 compute-0 systemd[76130]: Reached target Main User Target.
Jan 31 07:05:25 compute-0 systemd[76130]: Startup finished in 111ms.
Jan 31 07:05:25 compute-0 systemd[1]: Started User Manager for UID 42477.
Jan 31 07:07:26 compute-0 systemd[76130]: Starting Mark boot as successful...
Jan 31 07:07:26 compute-0 systemd[76130]: Finished Mark boot as successful.
Jan 31 07:10:48 compute-0 systemd[76130]: Created slice User Background Tasks Slice.
Jan 31 07:10:48 compute-0 systemd[76130]: Starting Cleanup of User's Temporary Files and Directories...
Jan 31 07:10:48 compute-0 systemd[76130]: Finished Cleanup of User's Temporary Files and Directories.

○ virtinterfaced.service - libvirt interface daemon
     Loaded: loaded (/usr/lib/systemd/system/virtinterfaced.service; disabled; preset: disabled)
     Active: inactive (dead)
TriggeredBy: ○ virtinterfaced-admin.socket
             ○ virtinterfaced.socket
             ○ virtinterfaced-ro.socket
       Docs: man:virtinterfaced(8)
             https://libvirt.org/

○ virtlockd.service - libvirt locking daemon
     Loaded: loaded (/usr/lib/systemd/system/virtlockd.service; disabled; preset: disabled)
     Active: inactive (dead)
TriggeredBy: ● virtlockd.socket
             ○ virtlockd-admin.socket
       Docs: man:virtlockd(8)
             https://libvirt.org/

● virtlogd.service - libvirt logging daemon
     Loaded: loaded (/usr/lib/systemd/system/virtlogd.service; enabled; preset: disabled)
     Active: active (running) since Sat 2026-01-31 07:22:08 UTC; 1h 59min ago
TriggeredBy: ● virtlogd.socket
             ● virtlogd-admin.socket
       Docs: man:virtlogd(8)
             https://libvirt.org/
   Main PID: 213815 (virtlogd)
         IO: 644.0K read, 7.3M written
      Tasks: 1 (limit: 48560)
     Memory: 4.1M (peak: 4.6M)
        CPU: 1min 41.243s
     CGroup: /system.slice/virtlogd.service
             └─213815 /usr/sbin/virtlogd

Jan 31 07:22:08 compute-0 systemd[1]: Starting libvirt logging daemon...
Jan 31 07:22:08 compute-0 systemd[1]: Started libvirt logging daemon.

○ virtnetworkd.service - libvirt network daemon
     Loaded: loaded (/usr/lib/systemd/system/virtnetworkd.service; disabled; preset: disabled)
     Active: inactive (dead)
TriggeredBy: ○ virtnetworkd-admin.socket
             ○ virtnetworkd.socket
             ○ virtnetworkd-ro.socket
       Docs: man:virtnetworkd(8)
             https://libvirt.org/

● virtnodedevd.service - libvirt nodedev daemon
     Loaded: loaded (/usr/lib/systemd/system/virtnodedevd.service; enabled; preset: disabled)
     Active: active (running) since Sat 2026-01-31 07:25:45 UTC; 1h 55min ago
TriggeredBy: ● virtnodedevd-admin.socket
             ● virtnodedevd.socket
             ● virtnodedevd-ro.socket
       Docs: man:virtnodedevd(8)
             https://libvirt.org/
   Main PID: 248127 (virtnodedevd)
         IO: 4.7M read, 0B written
      Tasks: 20 (limit: 48560)
     Memory: 10.7M (peak: 12.2M)
        CPU: 8.727s
     CGroup: /system.slice/virtnodedevd.service
             └─248127 /usr/sbin/virtnodedevd --timeout 120

Jan 31 07:25:45 compute-0 systemd[1]: Starting libvirt nodedev daemon...
Jan 31 07:25:45 compute-0 systemd[1]: Started libvirt nodedev daemon.

○ virtnwfilterd.service - libvirt nwfilter daemon
     Loaded: loaded (/usr/lib/systemd/system/virtnwfilterd.service; disabled; preset: disabled)
     Active: inactive (dead)
TriggeredBy: ○ virtnwfilterd-ro.socket
             ○ virtnwfilterd.socket
             ○ virtnwfilterd-admin.socket
       Docs: man:virtnwfilterd(8)
             https://libvirt.org/

○ virtproxyd.service - libvirt proxy daemon
     Loaded: loaded (/usr/lib/systemd/system/virtproxyd.service; enabled; preset: disabled)
     Active: inactive (dead) since Sat 2026-01-31 08:32:05 UTC; 49min ago
   Duration: 2min 1.922s
TriggeredBy: ● virtproxyd-ro.socket
             ● virtproxyd-tls.socket
             ● virtproxyd.socket
             ● virtproxyd-admin.socket
       Docs: man:virtproxyd(8)
             https://libvirt.org/
    Process: 360097 ExecStart=/usr/sbin/virtproxyd $VIRTPROXYD_ARGS (code=exited, status=0/SUCCESS)
   Main PID: 360097 (code=exited, status=0/SUCCESS)
        CPU: 78ms

Jan 31 08:30:03 compute-0 systemd[1]: Starting libvirt proxy daemon...
Jan 31 08:30:03 compute-0 systemd[1]: Started libvirt proxy daemon.
Jan 31 Unit ypbind.service could not be found.
Unit yppasswdd.service could not be found.
Unit ypserv.service could not be found.
Unit ypxfrd.service could not be found.
08:32:05 compute-0 systemd[1]: virtproxyd.service: Deactivated successfully.

● virtqemud.service - libvirt QEMU daemon
     Loaded: loaded (/usr/lib/systemd/system/virtqemud.service; enabled; preset: enabled)
     Active: active (running) since Sat 2026-01-31 07:25:36 UTC; 1h 55min ago
TriggeredBy: ● virtqemud-ro.socket
             ● virtqemud-admin.socket
             ● virtqemud.socket
       Docs: man:virtqemud(8)
             https://libvirt.org/
   Main PID: 247621 (virtqemud)
         IO: 48.1M read, 67.4M written
      Tasks: 19 (limit: 32768)
     Memory: 73.2M (peak: 140.5M)
        CPU: 26.039s
     CGroup: /system.slice/virtqemud.service
             └─247621 /usr/sbin/virtqemud --timeout 120

Jan 31 08:30:13 compute-0 virtqemud[247621]: Domain id=79 name='instance-000000a9' uuid=2b24a8d0-ad95-4460-acf1-0acb658330aa is tainted: custom-monitor
Jan 31 08:32:07 compute-0 virtqemud[247621]: argument unsupported: QEMU guest agent is not configured
Jan 31 08:54:17 compute-0 virtqemud[247621]: End of file while reading data: Input/output error
Jan 31 09:14:57 compute-0 virtqemud[247621]: Failed to connect socket to '/var/run/libvirt/virtnetworkd-sock-ro': No such file or directory
Jan 31 09:14:57 compute-0 virtqemud[247621]: Failed to connect socket to '/var/run/libvirt/virtnwfilterd-sock-ro': No such file or directory
Jan 31 09:14:57 compute-0 virtqemud[247621]: Failed to connect socket to '/var/run/libvirt/virtstoraged-sock-ro': No such file or directory
Jan 31 09:15:41 compute-0 virtqemud[247621]: Failed to connect socket to '/var/run/libvirt/virtstoraged-sock-ro': No such file or directory
Jan 31 09:20:40 compute-0 virtqemud[247621]: Failed to connect socket to '/var/run/libvirt/virtnetworkd-sock-ro': No such file or directory
Jan 31 09:20:40 compute-0 virtqemud[247621]: Failed to connect socket to '/var/run/libvirt/virtnwfilterd-sock-ro': No such file or directory
Jan 31 09:20:40 compute-0 virtqemud[247621]: Failed to connect socket to '/var/run/libvirt/virtstoraged-sock-ro': No such file or directory

● virtsecretd.service - libvirt secret daemon
     Loaded: loaded (/usr/lib/systemd/system/virtsecretd.service; enabled; preset: disabled)
     Active: active (running) since Sat 2026-01-31 07:30:41 UTC; 1h 50min ago
TriggeredBy: ● virtsecretd.socket
             ● virtsecretd-ro.socket
             ● virtsecretd-admin.socket
       Docs: man:virtsecretd(8)
             https://libvirt.org/
   Main PID: 253515 (virtsecretd)
         IO: 740.0K read, 0B written
      Tasks: 18 (limit: 48560)
     Memory: 4.8M (peak: 5.8M)
        CPU: 1.297s
     CGroup: /system.slice/virtsecretd.service
             └─253515 /usr/sbin/virtsecretd --timeout 120

Jan 31 07:30:41 compute-0 systemd[1]: Starting libvirt secret daemon...
Jan 31 07:30:41 compute-0 systemd[1]: Started libvirt secret daemon.

○ virtstoraged.service - libvirt storage daemon
     Loaded: loaded (/usr/lib/systemd/system/virtstoraged.service; disabled; preset: disabled)
     Active: inactive (dead)
TriggeredBy: ○ virtstoraged-ro.socket
             ○ virtstoraged-admin.socket
             ○ virtstoraged.socket
       Docs: man:virtstoraged(8)
             https://libvirt.org/

● -.slice - Root Slice
     Loaded: loaded
     Active: active since Sat 2026-01-31 06:09:18 UTC; 3h 11min ago
      Until: Sat 2026-01-31 06:09:18 UTC; 3h 11min ago
       Docs: man:systemd.special(7)
      Tasks: 1322
     Memory: 3.8G
        CPU: 1h 40min 43.570s
     CGroup: /
             ├─442196 turbostat --debug sleep 10
             ├─442200 sleep 10
             ├─init.scope
             │ └─1 /usr/lib/systemd/systemd --switched-root --system --deserialize 31
             ├─machine.slice
             │ ├─libpod-08cc1288983ebbc3e029df4ba78417491faf6dcee06220d37f18f3223d9f406f.scope
             │ │ └─container
             │ │   ├─247706 dumb-init --single-child -- kolla_start
             │ │   ├─247708 /usr/bin/python3 /usr/bin/nova-compute
             │ │   ├─254428 /usr/bin/python3 /bin/privsep-helper --config-file /etc/nova/nova.conf --config-file /etc/nova/nova-compute.conf --config-dir /etc/nova/nova.conf.d --privsep_context vif_plug_ovs.privsep.vif_plug --privsep_sock_path /tmp/tmpf8bmbhs4/privsep.sock
             │ │   ├─255323 /usr/bin/python3 /bin/privsep-helper --config-file /etc/nova/nova.conf --config-file /etc/nova/nova-compute.conf --config-dir /etc/nova/nova.conf.d --privsep_context os_brick.privileged.default --privsep_sock_path /tmp/tmpm6uxsdmh/privsep.sock
             │ │   └─315733 /usr/bin/python3 /bin/privsep-helper --config-file /etc/nova/nova.conf --config-file /etc/nova/nova-compute.conf --config-dir /etc/nova/nova.conf.d --privsep_context nova.privsep.sys_admin_pctxt --privsep_sock_path /tmp/tmpel5it6fi/privsep.sock
             │ ├─libpod-1e25cafb71c13c532a40aebd7c2342bf479b9dc985b78d8189396343297af238.scope
             │ │ └─container
             │ │   ├─149459 dumb-init --single-child -- kolla_start
             │ │   └─149462 /usr/bin/ovn-controller --pidfile unix:/run/openvswitch/db.sock -p /etc/pki/tls/private/ovndb.key -c /etc/pki/tls/certs/ovndb.crt -C /etc/pki/tls/certs/ovndbca.crt
             │ └─libpod-4f21208ce36b9cf814979a8ea23334150fc70d101a3b284bd29d2af80b4ea3b1.scope
             │   └─container
             │     ├─160023 dumb-init --single-child -- kolla_start
             │     ├─160028 "neutron-ovn-metadata-agent (/usr/bin/python3 /usr/bin/neutron-ovn-metadata-agent)"
             │     ├─160292 "neutron-ovn-metadata-agent (/usr/bin/python3 /usr/bin/neutron-ovn-metadata-agent)"
             │     ├─160297 /usr/bin/python3 /bin/privsep-helper --config-file /etc/neutron/neutron.conf --config-dir /etc/neutron.conf.d --privsep_context neutron.privileged.namespace_cmd --privsep_sock_path /tmp/tmprrz9sb_2/privsep.sock
             │     ├─254935 /usr/bin/python3 /bin/privsep-helper --config-file /etc/neutron/neutron.conf --config-dir /etc/neutron.conf.d --privsep_context neutron.privileged.default --privsep_sock_path /tmp/tmptkbyd576/privsep.sock
             │     └─255113 /usr/bin/python3 /bin/privsep-helper --config-file /etc/neutron/neutron.conf --config-dir /etc/neutron.conf.d --privsep_context neutron.privileged.link_cmd --privsep_sock_path /tmp/tmp6f9jilk8/privsep.sock
             ├─system.slice
             │ ├─NetworkManager.service
             │ │ └─49108 /usr/sbin/NetworkManager --no-daemon
             │ ├─auditd.service
             │ │ ├─699 /sbin/auditd
             │ │ └─701 /usr/sbin/sedispatch
             │ ├─chronyd.service
             │ │ └─58682 /usr/sbin/chronyd -F 2
             │ ├─crond.service
             │ │ └─1009 /usr/sbin/crond -n
             │ ├─dbus-broker.service
             │ │ ├─809 /usr/bin/dbus-broker-launch --scope system --audit
             │ │ └─810 dbus-broker --log 4 --controller 9 --machine-id bf0bc0bb03de29b24cba1cc9599cf5d0 --max-bytes 536870912 --max-fds 4096 --max-matches 131072 --audit
             │ ├─edpm_nova_compute.service
             │ │ └─247704 /usr/bin/conmon --api-version 1 -c 08cc1288983ebbc3e029df4ba78417491faf6dcee06220d37f18f3223d9f406f -u 08cc1288983ebbc3e029df4ba78417491faf6dcee06220d37f18f3223d9f406f -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/08cc1288983ebbc3e029df4ba78417491faf6dcee06220d37f18f3223d9f406f/userdata -p /run/containers/storage/overlay-containers/08cc1288983ebbc3e029df4ba78417491faf6dcee06220d37f18f3223d9f406f/userdata/pidfile -n nova_compute --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/08cc1288983ebbc3e029df4ba78417491faf6dcee06220d37f18f3223d9f406f --full-attach -s -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/08cc1288983ebbc3e029df4ba78417491faf6dcee06220d37f18f3223d9f406f/userdata/oci-log --conmon-pidfile /run/nova_compute.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg 08cc1288983ebbc3e029df4ba78417491faf6dcee06220d37f18f3223d9f406f
             │ ├─edpm_ovn_controller.service
             │ │ └─149457 /usr/bin/conmon --api-version 1 -c 1e25cafb71c13c532a40aebd7c2342bf479b9dc985b78d8189396343297af238 -u 1e25cafb71c13c532a40aebd7c2342bf479b9dc985b78d8189396343297af238 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/1e25cafb71c13c532a40aebd7c2342bf479b9dc985b78d8189396343297af238/userdata -p /run/containers/storage/overlay-containers/1e25cafb71c13c532a40aebd7c2342bf479b9dc985b78d8189396343297af238/userdata/pidfile -n ovn_controller --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/1e25cafb71c13c532a40aebd7c2342bf479b9dc985b78d8189396343297af238 --full-attach -s -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/1e25cafb71c13c532a40aebd7c2342bf479b9dc985b78d8189396343297af238/userdata/oci-log --conmon-pidfile /run/ovn_controller.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg 1e25cafb71c13c532a40aebd7c2342bf479b9dc985b78d8189396343297af238
             │ ├─edpm_ovn_metadata_agent.service
             │ │ └─160021 /usr/bin/conmon --api-version 1 -c 4f21208ce36b9cf814979a8ea23334150fc70d101a3b284bd29d2af80b4ea3b1 -u 4f21208ce36b9cf814979a8ea23334150fc70d101a3b284bd29d2af80b4ea3b1 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/4f21208ce36b9cf814979a8ea23334150fc70d101a3b284bd29d2af80b4ea3b1/userdata -p /run/containers/storage/overlay-containers/4f21208ce36b9cf814979a8ea23334150fc70d101a3b284bd29d2af80b4ea3b1/userdata/pidfile -n ovn_metadata_agent --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/4f21208ce36b9cf814979a8ea23334150fc70d101a3b284bd29d2af80b4ea3b1 --full-attach -s -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/4f21208ce36b9cf814979a8ea23334150fc70d101a3b284bd29d2af80b4ea3b1/userdata/oci-log --conmon-pidfile /run/ovn_metadata_agent.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg 4f21208ce36b9cf814979a8ea23334150fc70d101a3b284bd29d2af80b4ea3b1
             │ ├─gssproxy.service
             │ │ └─875 /usr/sbin/gssproxy -D
             │ ├─irqbalance.service
             │ │ └─815 /usr/sbin/irqbalance
             │ ├─iscsid.service
             │ │ └─231340 /usr/sbin/iscsid -f
             │ ├─multipathd.service
             │ │ └─231500 /sbin/multipathd -d -s
             │ ├─ovs-vswitchd.service
             │ │ └─47404 ovs-vswitchd unix:/var/run/openvswitch/db.sock -vconsole:emer -vsyslog:err -vfile:info --mlockall --user openvswitch:hugetlbfs --no-chdir --log-file=/var/log/openvswitch/ovs-vswitchd.log --pidfile=/var/run/openvswitch/ovs-vswitchd.pid --detach
             │ ├─ovsdb-server.service
             │ │ └─47323 ovsdb-server /etc/openvswitch/conf.db -vconsole:emer -vsyslog:err -vfile:info --remote=punix:/var/run/openvswitch/db.sock --private-key=db:Open_vSwitch,SSL,private_key --certificate=db:Open_vSwitch,SSL,certificate --bootstrap-ca-cert=db:Open_vSwitch,SSL,ca_cert --user openvswitch:hugetlbfs --no-chdir --log-file=/var/log/openvswitch/ovsdb-server.log --pidfile=/var/run/openvswitch/ovsdb-server.pid --detach
             │ ├─polkit.service
             │ │ └─43587 /usr/lib/polkit-1/polkitd --no-debug
             │ ├─rpcbind.service
             │ │ └─697 /usr/bin/rpcbind -w -f
             │ ├─rsyslog.service
             │ │ └─1005 /usr/sbin/rsyslogd -n
             │ ├─sshd.service
             │ │ └─187503 "sshd: /usr/sbin/sshd -D [listener] 0 of 10-100 startups"
             │ ├─system-ceph\x2df70fcd2a\x2ddcb4\x2d5f89\x2da4ba\x2d79a09959083b.slice
             │ │ ├─ceph-f70fcd2a-dcb4-5f89-a4ba-79a09959083b@crash.compute-0.service
             │ │ │ ├─libpod-payload-1da6676927c3fbc16088c8bd8068265d57380b59a05a52c32a3be4739c9d9976
             │ │ │ │ ├─81648 /run/podman-init -- /usr/bin/ceph-crash -n client.crash.compute-0
             │ │ │ │ └─81650 /usr/bin/python3 -s /usr/bin/ceph-crash -n client.crash.compute-0
             │ │ │ └─runtime
             │ │ │   └─81645 /usr/bin/conmon --api-version 1 -c 1da6676927c3fbc16088c8bd8068265d57380b59a05a52c32a3be4739c9d9976 -u 1da6676927c3fbc16088c8bd8068265d57380b59a05a52c32a3be4739c9d9976 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/1da6676927c3fbc16088c8bd8068265d57380b59a05a52c32a3be4739c9d9976/userdata -p /run/containers/storage/overlay-containers/1da6676927c3fbc16088c8bd8068265d57380b59a05a52c32a3be4739c9d9976/userdata/pidfile -n ceph-f70fcd2a-dcb4-5f89-a4ba-79a09959083b-crash-compute-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/1da6676927c3fbc16088c8bd8068265d57380b59a05a52c32a3be4739c9d9976 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/1da6676927c3fbc16088c8bd8068265d57380b59a05a52c32a3be4739c9d9976/userdata/oci-log --conmon-pidfile /run/ceph-f70fcd2a-dcb4-5f89-a4ba-79a09959083b@crash.compute-0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 1da6676927c3fbc16088c8bd8068265d57380b59a05a52c32a3be4739c9d9976
             │ │ ├─ceph-f70fcd2a-dcb4-5f89-a4ba-79a09959083b@haproxy.rgw.default.compute-0.cwtxbj.service
             │ │ │ ├─libpod-payload-b160f7ad791bcb51d7340140782e9b69931fca86a37ca28ddd2321bd134bedd3
             │ │ │ │ ├─95977 /run/podman-init -- docker-entrypoint.sh haproxy -f /var/lib/haproxy/haproxy.cfg
             │ │ │ │ ├─95985 haproxy -W -db -f /var/lib/haproxy/haproxy.cfg
             │ │ │ │ └─95987 haproxy -W -db -f /var/lib/haproxy/haproxy.cfg
             │ │ │ └─runtime
             │ │ │   └─95975 /usr/bin/conmon --api-version 1 -c b160f7ad791bcb51d7340140782e9b69931fca86a37ca28ddd2321bd134bedd3 -u b160f7ad791bcb51d7340140782e9b69931fca86a37ca28ddd2321bd134bedd3 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/b160f7ad791bcb51d7340140782e9b69931fca86a37ca28ddd2321bd134bedd3/userdata -p /run/containers/storage/overlay-containers/b160f7ad791bcb51d7340140782e9b69931fca86a37ca28ddd2321bd134bedd3/userdata/pidfile -n ceph-f70fcd2a-dcb4-5f89-a4ba-79a09959083b-haproxy-rgw-default-compute-0-cwtxbj --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/b160f7ad791bcb51d7340140782e9b69931fca86a37ca28ddd2321bd134bedd3 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/b160f7ad791bcb51d7340140782e9b69931fca86a37ca28ddd2321bd134bedd3/userdata/oci-log --conmon-pidfile /run/ceph-f70fcd2a-dcb4-5f89-a4ba-79a09959083b@haproxy.rgw.default.compute-0.cwtxbj.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg b160f7ad791bcb51d7340140782e9b69931fca86a37ca28ddd2321bd134bedd3
             │ │ ├─ceph-f70fcd2a-dcb4-5f89-a4ba-79a09959083b@keepalived.rgw.default.compute-0.rwjfwq.service
             │ │ │ ├─libpod-payload-37a60ce08c4c74b1713cee8109229a211cbb75cdd957895056cf506b036b5f51
             │ │ │ │ ├─96434 /run/podman-init -- ./init.sh
             │ │ │ │ ├─96436 /usr/sbin/keepalived -n -l -f /etc/keepalived/keepalived.conf
             │ │ │ │ └─96438 /usr/sbin/keepalived -n -l -f /etc/keepalived/keepalived.conf
             │ │ │ └─runtime
             │ │ │   └─96432 /usr/bin/conmon --api-version 1 -c 37a60ce08c4c74b1713cee8109229a211cbb75cdd957895056cf506b036b5f51 -u 37a60ce08c4c74b1713cee8109229a211cbb75cdd957895056cf506b036b5f51 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/37a60ce08c4c74b1713cee8109229a211cbb75cdd957895056cf506b036b5f51/userdata -p /run/containers/storage/overlay-containers/37a60ce08c4c74b1713cee8109229a211cbb75cdd957895056cf506b036b5f51/userdata/pidfile -n ceph-f70fcd2a-dcb4-5f89-a4ba-79a09959083b-keepalived-rgw-default-compute-0-rwjfwq --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/37a60ce08c4c74b1713cee8109229a211cbb75cdd957895056cf506b036b5f51 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/37a60ce08c4c74b1713cee8109229a211cbb75cdd957895056cf506b036b5f51/userdata/oci-log --conmon-pidfile /run/ceph-f70fcd2a-dcb4-5f89-a4ba-79a09959083b@keepalived.rgw.default.compute-0.rwjfwq.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 37a60ce08c4c74b1713cee8109229a211cbb75cdd957895056cf506b036b5f51
             │ │ ├─ceph-f70fcd2a-dcb4-5f89-a4ba-79a09959083b@mds.cephfs.compute-0.voybui.service
             │ │ │ ├─libpod-payload-b8961a2bb89ef92ece9fe756e2217169f987d51ab5ed9d70855c2e200dbe66dd
             │ │ │ │ ├─94764 /run/podman-init -- /usr/bin/ceph-mds -n mds.cephfs.compute-0.voybui -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ │ │ └─94769 /usr/bin/ceph-mds -n mds.cephfs.compute-0.voybui -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ │ └─runtime
             │ │ │   └─94760 /usr/bin/conmon --api-version 1 -c b8961a2bb89ef92ece9fe756e2217169f987d51ab5ed9d70855c2e200dbe66dd -u b8961a2bb89ef92ece9fe756e2217169f987d51ab5ed9d70855c2e200dbe66dd -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/b8961a2bb89ef92ece9fe756e2217169f987d51ab5ed9d70855c2e200dbe66dd/userdata -p /run/containers/storage/overlay-containers/b8961a2bb89ef92ece9fe756e2217169f987d51ab5ed9d70855c2e200dbe66dd/userdata/pidfile -n ceph-f70fcd2a-dcb4-5f89-a4ba-79a09959083b-mds-cephfs-compute-0-voybui --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/b8961a2bb89ef92ece9fe756e2217169f987d51ab5ed9d70855c2e200dbe66dd --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/b8961a2bb89ef92ece9fe756e2217169f987d51ab5ed9d70855c2e200dbe66dd/userdata/oci-log --conmon-pidfile /run/ceph-f70fcd2a-dcb4-5f89-a4ba-79a09959083b@mds.cephfs.compute-0.voybui.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg b8961a2bb89ef92ece9fe756e2217169f987d51ab5ed9d70855c2e200dbe66dd
             │ │ ├─ceph-f70fcd2a-dcb4-5f89-a4ba-79a09959083b@mgr.compute-0.hhuoua.service
             │ │ │ ├─libpod-payload-80eff2094e986ce79ca2f5db33c4e20adbe7c8c35152ee84bfca5e742fce5e26
             │ │ │ │ ├─74789 /run/podman-init -- /usr/bin/ceph-mgr -n mgr.compute-0.hhuoua -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ │ │ └─74791 /usr/bin/ceph-mgr -n mgr.compute-0.hhuoua -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ │ └─runtime
             │ │ │   └─74787 /usr/bin/conmon --api-version 1 -c 80eff2094e986ce79ca2f5db33c4e20adbe7c8c35152ee84bfca5e742fce5e26 -u 80eff2094e986ce79ca2f5db33c4e20adbe7c8c35152ee84bfca5e742fce5e26 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/80eff2094e986ce79ca2f5db33c4e20adbe7c8c35152ee84bfca5e742fce5e26/userdata -p /run/containers/storage/overlay-containers/80eff2094e986ce79ca2f5db33c4e20adbe7c8c35152ee84bfca5e742fce5e26/userdata/pidfile -n ceph-f70fcd2a-dcb4-5f89-a4ba-79a09959083b-mgr-compute-0-hhuoua --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/80eff2094e986ce79ca2f5db33c4e20adbe7c8c35152ee84bfca5e742fce5e26 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/80eff2094e986ce79ca2f5db33c4e20adbe7c8c35152ee84bfca5e742fce5e26/userdata/oci-log --conmon-pidfile /run/ceph-f70fcd2a-dcb4-5f89-a4ba-79a09959083b@mgr.compute-0.hhuoua.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 80eff2094e986ce79ca2f5db33c4e20adbe7c8c35152ee84bfca5e742fce5e26
             │ │ ├─ceph-f70fcd2a-dcb4-5f89-a4ba-79a09959083b@mon.compute-0.service
             │ │ │ ├─libpod-payload-c6500841c07b36a8c971bbdcb750ce9cda5744cce00d5b6822bb699bead089c7
             │ │ │ │ ├─74494 /run/podman-init -- /usr/bin/ceph-mon -n mon.compute-0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false
             │ │ │ │ └─74496 /usr/bin/ceph-mon -n mon.compute-0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false
             │ │ │ └─runtime
             │ │ │   └─74492 /usr/bin/conmon --api-version 1 -c c6500841c07b36a8c971bbdcb750ce9cda5744cce00d5b6822bb699bead089c7 -u c6500841c07b36a8c971bbdcb750ce9cda5744cce00d5b6822bb699bead089c7 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/c6500841c07b36a8c971bbdcb750ce9cda5744cce00d5b6822bb699bead089c7/userdata -p /run/containers/storage/overlay-containers/c6500841c07b36a8c971bbdcb750ce9cda5744cce00d5b6822bb699bead089c7/userdata/pidfile -n ceph-f70fcd2a-dcb4-5f89-a4ba-79a09959083b-mon-compute-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/c6500841c07b36a8c971bbdcb750ce9cda5744cce00d5b6822bb699bead089c7 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/c6500841c07b36a8c971bbdcb750ce9cda5744cce00d5b6822bb699bead089c7/userdata/oci-log --conmon-pidfile /run/ceph-f70fcd2a-dcb4-5f89-a4ba-79a09959083b@mon.compute-0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg c6500841c07b36a8c971bbdcb750ce9cda5744cce00d5b6822bb699bead089c7
             │ │ ├─ceph-f70fcd2a-dcb4-5f89-a4ba-79a09959083b@osd.0.service
             │ │ │ ├─libpod-payload-7fd99238d214935b3dacb5b0284648e15db53a9954031165abce49a65cde0630
             │ │ │ │ ├─84814 /run/podman-init -- /usr/bin/ceph-osd -n osd.0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ │ │ └─84816 /usr/bin/ceph-osd -n osd.0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ │ └─runtime
             │ │ │   └─84812 /usr/bin/conmon --api-version 1 -c 7fd99238d214935b3dacb5b0284648e15db53a9954031165abce49a65cde0630 -u 7fd99238d214935b3dacb5b0284648e15db53a9954031165abce49a65cde0630 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/7fd99238d214935b3dacb5b0284648e15db53a9954031165abce49a65cde0630/userdata -p /run/containers/storage/overlay-containers/7fd99238d214935b3dacb5b0284648e15db53a9954031165abce49a65cde0630/userdata/pidfile -n ceph-f70fcd2a-dcb4-5f89-a4ba-79a09959083b-osd-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/7fd99238d214935b3dacb5b0284648e15db53a9954031165abce49a65cde0630 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/7fd99238d214935b3dacb5b0284648e15db53a9954031165abce49a65cde0630/userdata/oci-log --conmon-pidfile /run/ceph-f70fcd2a-dcb4-5f89-a4ba-79a09959083b@osd.0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 7fd99238d214935b3dacb5b0284648e15db53a9954031165abce49a65cde0630
             │ │ └─ceph-f70fcd2a-dcb4-5f89-a4ba-79a09959083b@rgw.rgw.compute-0.njduba.service
             │ │   ├─libpod-payload-7b0621a2e40c1742efa8191945f46ebb87559261384b35471c2d6d71eb9be595
             │ │   │ ├─94237 /run/podman-init -- /usr/bin/radosgw -n client.rgw.rgw.compute-0.njduba -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │   │ └─94239 /usr/bin/radosgw -n client.rgw.rgw.compute-0.njduba -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │   └─runtime
             │ │     └─94235 /usr/bin/conmon --api-version 1 -c 7b0621a2e40c1742efa8191945f46ebb87559261384b35471c2d6d71eb9be595 -u 7b0621a2e40c1742efa8191945f46ebb87559261384b35471c2d6d71eb9be595 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/7b0621a2e40c1742efa8191945f46ebb87559261384b35471c2d6d71eb9be595/userdata -p /run/containers/storage/overlay-containers/7b0621a2e40c1742efa8191945f46ebb87559261384b35471c2d6d71eb9be595/userdata/pidfile -n ceph-f70fcd2a-dcb4-5f89-a4ba-79a09959083b-rgw-rgw-compute-0-njduba --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/7b0621a2e40c1742efa8191945f46ebb87559261384b35471c2d6d71eb9be595 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/7b0621a2e40c1742efa8191945f46ebb87559261384b35471c2d6d71eb9be595/userdata/oci-log --conmon-pidfile /run/ceph-f70fcd2a-dcb4-5f89-a4ba-79a09959083b@rgw.rgw.compute-0.njduba.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 7b0621a2e40c1742efa8191945f46ebb87559261384b35471c2d6d71eb9be595
             │ ├─system-getty.slice
             │ │ └─getty@tty1.service
             │ │   └─1010 /sbin/agetty -o "-p -- \\u" --noclear - linux
             │ ├─system-serial\x2dgetty.slice
             │ │ └─serial-getty@ttyS0.service
             │ │   └─1011 /sbin/agetty -o "-p -- \\u" --keep-baud 115200,57600,38400,9600 - vt220
             │ ├─systemd-hostnamed.service
             │ │ └─438269 /usr/lib/systemd/systemd-hostnamed
             │ ├─systemd-journald.service
             │ │ └─675 /usr/lib/systemd/systemd-journald
             │ ├─systemd-logind.service
             │ │ └─816 /usr/lib/systemd/systemd-logind
             │ ├─systemd-machined.service
             │ │ └─214448 /usr/lib/systemd/systemd-machined
             │ ├─systemd-udevd.service
             │ │ └─udev
             │ │   └─727 /usr/lib/systemd/systemd-udevd
             │ ├─tuned.service
             │ │ └─108365 /usr/bin/python3 -Es /usr/sbin/tuned -l -P
             │ ├─virtlogd.service
             │ │ └─213815 /usr/sbin/virtlogd
             │ ├─virtnodedevd.service
             │ │ └─248127 /usr/sbin/virtnodedevd --timeout 120
             │ ├─virtqemud.service
             │ │ └─247621 /usr/sbin/virtqemud --timeout 120
             │ └─virtsecretd.service
             │   └─253515 /usr/sbin/virtsecretd --timeout 120
             └─user.slice
               ├─user-1000.slice
               │ ├─session-1.scope
               │ │ └─4520 /usr/bin/python3
               │ ├─session-67.scope
               │ │ ├─435980 "sshd-session: zuul [priv]"
               │ │ ├─435983 "sshd-session: zuul@notty"
               │ │ ├─435984 sudo bash -c "rm -rf /var/tmp/sos-osp && mkdir /var/tmp/sos-osp && sos report --batch --all-logs --tmp-dir=/var/tmp/sos-osp  -p container,openstack_edpm,system,storage,virt"
               │ │ ├─436008 /usr/bin/python3 -s /sbin/sos report --batch --all-logs --tmp-dir=/var/tmp/sos-osp -p container,openstack_edpm,system,storage,virt
               │ │ ├─442195 timeout 15s turbostat --debug sleep 10
               │ │ ├─442903 timeout 300s systemctl status --all
               │ │ ├─442904 systemctl status --all
               │ │ ├─442930 timeout 300s ceph fs ls --format json-pretty
               │ │ ├─442931 /usr/bin/python3 -s /usr/bin/ceph fs ls --format json-pretty
               │ │ ├─442951 timeout 300s semanage module -l
               │ │ └─442952 /usr/bin/python3 -EsI /usr/sbin/semanage module -l
               │ └─user@1000.service
               │   ├─app.slice
               │   │ └─dbus-broker.service
               │   │   ├─12265 /usr/bin/dbus-broker-launch --scope user
               │   │   └─12293 dbus-broker --log 4 --controller 9 --machine-id bf0bc0bb03de29b24cba1cc9599cf5d0 --max-bytes 100000000000000 --max-fds 25000000000000 --max-matches 5000000000
               │   ├─init.scope
               │   │ ├─4308 /usr/lib/systemd/systemd --user
               │   │ └─4310 "(sd-pam)"
               │   └─user.slice
               │     └─podman-pause-03c69239.scope
               │       └─12088 catatonit -P
               └─user-42477.slice
                 ├─session-21.scope
                 │ ├─76126 "sshd-session: ceph-admin [priv]"
                 │ └─76148 "sshd-session: ceph-admin"
                 ├─session-23.scope
                 │ ├─76143 "sshd-session: ceph-admin [priv]"
                 │ └─76149 "sshd-session: ceph-admin@notty"
                 ├─session-24.scope
                 │ ├─76200 "sshd-session: ceph-admin [priv]"
                 │ └─76203 "sshd-session: ceph-admin@notty"
                 ├─session-25.scope
                 │ ├─76254 "sshd-session: ceph-admin [priv]"
                 │ └─76257 "sshd-session: ceph-admin@notty"
                 ├─session-26.scope
                 │ ├─76308 "sshd-session: ceph-admin [priv]"
                 │ └─76311 "sshd-session: ceph-admin@notty"
                 ├─session-27.scope
                 │ ├─76362 "sshd-session: ceph-admin [priv]"
                 │ └─76365 "sshd-session: ceph-admin@notty"
                 ├─session-28.scope
                 │ ├─76416 "sshd-session: ceph-admin [priv]"
                 │ └─76419 "sshd-session: ceph-admin@notty"
                 ├─session-29.scope
                 │ ├─76470 "sshd-session: ceph-admin [priv]"
                 │ └─76473 "sshd-session: ceph-admin@notty"
                 ├─session-30.scope
                 │ ├─76524 "sshd-session: ceph-admin [priv]"
                 │ └─76527 "sshd-session: ceph-admin@notty"
                 ├─session-31.scope
                 │ ├─76578 "sshd-session: ceph-admin [priv]"
                 │ └─76581 "sshd-session: ceph-admin@notty"
                 ├─session-32.scope
                 │ ├─76605 "sshd-session: ceph-admin [priv]"
                 │ └─76608 "sshd-session: ceph-admin@notty"
                 ├─session-33.scope
                 │ ├─76659 "sshd-session: ceph-admin [priv]"
                 │ └─76662 "sshd-session: ceph-admin@notty"
                 └─user@42477.service
                   └─init.scope
                     ├─76130 /usr/lib/systemd/systemd --user
                     └─76132 "(sd-pam)"

Jan 31 09:21:08 compute-0 systemd[1]: Started libpod-conmon-1e62b43494c4c5430967c461a614951b2b3e7d9dc0da33bf378a592cf365588f.scope.
Jan 31 09:21:08 compute-0 systemd[1]: Started libcrun container.
Jan 31 09:21:08 compute-0 systemd[1]: libpod-1e62b43494c4c5430967c461a614951b2b3e7d9dc0da33bf378a592cf365588f.scope: Deactivated successfully.
Jan 31 09:21:08 compute-0 systemd[1]: var-lib-containers-storage-overlay-fe605de0efbfff7f1e129c9c05273d581a069c6245020dd6d05de659c33a3558-merged.mount: Deactivated successfully.
Jan 31 09:21:09 compute-0 systemd[1]: libpod-conmon-1e62b43494c4c5430967c461a614951b2b3e7d9dc0da33bf378a592cf365588f.scope: Deactivated successfully.
Jan 31 09:21:09 compute-0 systemd[1]: Started libpod-conmon-c91ad40d319ca0959f25542c639fa3531098a9c8d610a5d90837a5e49347f30f.scope.
Jan 31 09:21:09 compute-0 systemd[1]: Started libcrun container.
Jan 31 09:21:10 compute-0 systemd[1]: libpod-c91ad40d319ca0959f25542c639fa3531098a9c8d610a5d90837a5e49347f30f.scope: Deactivated successfully.
Jan 31 09:21:10 compute-0 systemd[1]: var-lib-containers-storage-overlay-2b43700eb7572bebd3472a5ea1bd5ebd36f83d647edcce72c5d2c1452938ca5d-merged.mount: Deactivated successfully.
Jan 31 09:21:11 compute-0 systemd[1]: libpod-conmon-c91ad40d319ca0959f25542c639fa3531098a9c8d610a5d90837a5e49347f30f.scope: Deactivated successfully.

● machine.slice - Virtual Machine and Container Slice
     Loaded: loaded (/usr/lib/systemd/system/machine.slice; static)
     Active: active since Sat 2026-01-31 07:04:28 UTC; 2h 16min ago
      Until: Sat 2026-01-31 07:04:28 UTC; 2h 16min ago
       Docs: man:systemd.special(7)
         IO: 330.4M read, 287.8M written
      Tasks: 46
     Memory: 1.4G (peak: 2.4G)
        CPU: 38min 12.778s
     CGroup: /machine.slice
             ├─libpod-08cc1288983ebbc3e029df4ba78417491faf6dcee06220d37f18f3223d9f406f.scope
             │ └─container
             │   ├─247706 dumb-init --single-child -- kolla_start
             │   ├─247708 /usr/bin/python3 /usr/bin/nova-compute
             │   ├─254428 /usr/bin/python3 /bin/privsep-helper --config-file /etc/nova/nova.conf --config-file /etc/nova/nova-compute.conf --config-dir /etc/nova/nova.conf.d --privsep_context vif_plug_ovs.privsep.vif_plug --privsep_sock_path /tmp/tmpf8bmbhs4/privsep.sock
             │   ├─255323 /usr/bin/python3 /bin/privsep-helper --config-file /etc/nova/nova.conf --config-file /etc/nova/nova-compute.conf --config-dir /etc/nova/nova.conf.d --privsep_context os_brick.privileged.default --privsep_sock_path /tmp/tmpm6uxsdmh/privsep.sock
             │   └─315733 /usr/bin/python3 /bin/privsep-helper --config-file /etc/nova/nova.conf --config-file /etc/nova/nova-compute.conf --config-dir /etc/nova/nova.conf.d --privsep_context nova.privsep.sys_admin_pctxt --privsep_sock_path /tmp/tmpel5it6fi/privsep.sock
             ├─libpod-1e25cafb71c13c532a40aebd7c2342bf479b9dc985b78d8189396343297af238.scope
             │ └─container
             │   ├─149459 dumb-init --single-child -- kolla_start
             │   └─149462 /usr/bin/ovn-controller --pidfile unix:/run/openvswitch/db.sock -p /etc/pki/tls/private/ovndb.key -c /etc/pki/tls/certs/ovndb.crt -C /etc/pki/tls/certs/ovndbca.crt
             └─libpod-4f21208ce36b9cf814979a8ea23334150fc70d101a3b284bd29d2af80b4ea3b1.scope
               └─container
                 ├─160023 dumb-init --single-child -- kolla_start
                 ├─160028 "neutron-ovn-metadata-agent (/usr/bin/python3 /usr/bin/neutron-ovn-metadata-agent)"
                 ├─160292 "neutron-ovn-metadata-agent (/usr/bin/python3 /usr/bin/neutron-ovn-metadata-agent)"
                 ├─160297 /usr/bin/python3 /bin/privsep-helper --config-file /etc/neutron/neutron.conf --config-dir /etc/neutron.conf.d --privsep_context neutron.privileged.namespace_cmd --privsep_sock_path /tmp/tmprrz9sb_2/privsep.sock
                 ├─254935 /usr/bin/python3 /bin/privsep-helper --config-file /etc/neutron/neutron.conf --config-dir /etc/neutron.conf.d --privsep_context neutron.privileged.default --privsep_sock_path /tmp/tmptkbyd576/privsep.sock
                 └─255113 /usr/bin/python3 /bin/privsep-helper --config-file /etc/neutron/neutron.conf --config-dir /etc/neutron.conf.d --privsep_context neutron.privileged.link_cmd --privsep_sock_path /tmp/tmp6f9jilk8/privsep.sock

Jan 31 09:21:08 compute-0 frosty_cray[442152]: 167 167
Jan 31 09:21:10 compute-0 hungry_colden[442329]: {
Jan 31 09:21:10 compute-0 hungry_colden[442329]:     "d19aa227-e399-4341-9824-b20a6ddbc903": {
Jan 31 09:21:10 compute-0 hungry_colden[442329]:         "ceph_fsid": "f70fcd2a-dcb4-5f89-a4ba-79a09959083b",
Jan 31 09:21:10 compute-0 hungry_colden[442329]:         "device": "/dev/mapper/ceph_vg0-ceph_lv0",
Jan 31 09:21:10 compute-0 hungry_colden[442329]:         "osd_id": 0,
Jan 31 09:21:10 compute-0 hungry_colden[442329]:         "osd_uuid": "d19aa227-e399-4341-9824-b20a6ddbc903",
Jan 31 09:21:10 compute-0 hungry_colden[442329]:         "type": "bluestore"
Jan 31 09:21:10 compute-0 hungry_colden[442329]:     }
Jan 31 09:21:10 compute-0 hungry_colden[442329]: }

● system-ceph\x2df70fcd2a\x2ddcb4\x2d5f89\x2da4ba\x2d79a09959083b.slice - Slice /system/ceph-f70fcd2a-dcb4-5f89-a4ba-79a09959083b
     Loaded: loaded
     Active: active since Sat 2026-01-31 07:04:31 UTC; 2h 16min ago
      Until: Sat 2026-01-31 07:04:31 UTC; 2h 16min ago
         IO: 3.5G read, 12.3G written
      Tasks: 877
     Memory: 2.3G (peak: 2.5G)
        CPU: 11min 586ms
     CGroup: /system.slice/system-ceph\x2df70fcd2a\x2ddcb4\x2d5f89\x2da4ba\x2d79a09959083b.slice
             ├─ceph-f70fcd2a-dcb4-5f89-a4ba-79a09959083b@crash.compute-0.service
             │ ├─libpod-payload-1da6676927c3fbc16088c8bd8068265d57380b59a05a52c32a3be4739c9d9976
             │ │ ├─81648 /run/podman-init -- /usr/bin/ceph-crash -n client.crash.compute-0
             │ │ └─81650 /usr/bin/python3 -s /usr/bin/ceph-crash -n client.crash.compute-0
             │ └─runtime
             │   └─81645 /usr/bin/conmon --api-version 1 -c 1da6676927c3fbc16088c8bd8068265d57380b59a05a52c32a3be4739c9d9976 -u 1da6676927c3fbc16088c8bd8068265d57380b59a05a52c32a3be4739c9d9976 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/1da6676927c3fbc16088c8bd8068265d57380b59a05a52c32a3be4739c9d9976/userdata -p /run/containers/storage/overlay-containers/1da6676927c3fbc16088c8bd8068265d57380b59a05a52c32a3be4739c9d9976/userdata/pidfile -n ceph-f70fcd2a-dcb4-5f89-a4ba-79a09959083b-crash-compute-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/1da6676927c3fbc16088c8bd8068265d57380b59a05a52c32a3be4739c9d9976 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/1da6676927c3fbc16088c8bd8068265d57380b59a05a52c32a3be4739c9d9976/userdata/oci-log --conmon-pidfile /run/ceph-f70fcd2a-dcb4-5f89-a4ba-79a09959083b@crash.compute-0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 1da6676927c3fbc16088c8bd8068265d57380b59a05a52c32a3be4739c9d9976
             ├─ceph-f70fcd2a-dcb4-5f89-a4ba-79a09959083b@haproxy.rgw.default.compute-0.cwtxbj.service
             │ ├─libpod-payload-b160f7ad791bcb51d7340140782e9b69931fca86a37ca28ddd2321bd134bedd3
             │ │ ├─95977 /run/podman-init -- docker-entrypoint.sh haproxy -f /var/lib/haproxy/haproxy.cfg
             │ │ ├─95985 haproxy -W -db -f /var/lib/haproxy/haproxy.cfg
             │ │ └─95987 haproxy -W -db -f /var/lib/haproxy/haproxy.cfg
             │ └─runtime
             │   └─95975 /usr/bin/conmon --api-version 1 -c b160f7ad791bcb51d7340140782e9b69931fca86a37ca28ddd2321bd134bedd3 -u b160f7ad791bcb51d7340140782e9b69931fca86a37ca28ddd2321bd134bedd3 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/b160f7ad791bcb51d7340140782e9b69931fca86a37ca28ddd2321bd134bedd3/userdata -p /run/containers/storage/overlay-containers/b160f7ad791bcb51d7340140782e9b69931fca86a37ca28ddd2321bd134bedd3/userdata/pidfile -n ceph-f70fcd2a-dcb4-5f89-a4ba-79a09959083b-haproxy-rgw-default-compute-0-cwtxbj --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/b160f7ad791bcb51d7340140782e9b69931fca86a37ca28ddd2321bd134bedd3 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/b160f7ad791bcb51d7340140782e9b69931fca86a37ca28ddd2321bd134bedd3/userdata/oci-log --conmon-pidfile /run/ceph-f70fcd2a-dcb4-5f89-a4ba-79a09959083b@haproxy.rgw.default.compute-0.cwtxbj.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg b160f7ad791bcb51d7340140782e9b69931fca86a37ca28ddd2321bd134bedd3
             ├─ceph-f70fcd2a-dcb4-5f89-a4ba-79a09959083b@keepalived.rgw.default.compute-0.rwjfwq.service
             │ ├─libpod-payload-37a60ce08c4c74b1713cee8109229a211cbb75cdd957895056cf506b036b5f51
             │ │ ├─96434 /run/podman-init -- ./init.sh
             │ │ ├─96436 /usr/sbin/keepalived -n -l -f /etc/keepalived/keepalived.conf
             │ │ └─96438 /usr/sbin/keepalived -n -l -f /etc/keepalived/keepalived.conf
             │ └─runtime
             │   └─96432 /usr/bin/conmon --api-version 1 -c 37a60ce08c4c74b1713cee8109229a211cbb75cdd957895056cf506b036b5f51 -u 37a60ce08c4c74b1713cee8109229a211cbb75cdd957895056cf506b036b5f51 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/37a60ce08c4c74b1713cee8109229a211cbb75cdd957895056cf506b036b5f51/userdata -p /run/containers/storage/overlay-containers/37a60ce08c4c74b1713cee8109229a211cbb75cdd957895056cf506b036b5f51/userdata/pidfile -n ceph-f70fcd2a-dcb4-5f89-a4ba-79a09959083b-keepalived-rgw-default-compute-0-rwjfwq --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/37a60ce08c4c74b1713cee8109229a211cbb75cdd957895056cf506b036b5f51 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/37a60ce08c4c74b1713cee8109229a211cbb75cdd957895056cf506b036b5f51/userdata/oci-log --conmon-pidfile /run/ceph-f70fcd2a-dcb4-5f89-a4ba-79a09959083b@keepalived.rgw.default.compute-0.rwjfwq.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 37a60ce08c4c74b1713cee8109229a211cbb75cdd957895056cf506b036b5f51
             ├─ceph-f70fcd2a-dcb4-5f89-a4ba-79a09959083b@mds.cephfs.compute-0.voybui.service
             │ ├─libpod-payload-b8961a2bb89ef92ece9fe756e2217169f987d51ab5ed9d70855c2e200dbe66dd
             │ │ ├─94764 /run/podman-init -- /usr/bin/ceph-mds -n mds.cephfs.compute-0.voybui -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ └─94769 /usr/bin/ceph-mds -n mds.cephfs.compute-0.voybui -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ └─runtime
             │   └─94760 /usr/bin/conmon --api-version 1 -c b8961a2bb89ef92ece9fe756e2217169f987d51ab5ed9d70855c2e200dbe66dd -u b8961a2bb89ef92ece9fe756e2217169f987d51ab5ed9d70855c2e200dbe66dd -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/b8961a2bb89ef92ece9fe756e2217169f987d51ab5ed9d70855c2e200dbe66dd/userdata -p /run/containers/storage/overlay-containers/b8961a2bb89ef92ece9fe756e2217169f987d51ab5ed9d70855c2e200dbe66dd/userdata/pidfile -n ceph-f70fcd2a-dcb4-5f89-a4ba-79a09959083b-mds-cephfs-compute-0-voybui --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/b8961a2bb89ef92ece9fe756e2217169f987d51ab5ed9d70855c2e200dbe66dd --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/b8961a2bb89ef92ece9fe756e2217169f987d51ab5ed9d70855c2e200dbe66dd/userdata/oci-log --conmon-pidfile /run/ceph-f70fcd2a-dcb4-5f89-a4ba-79a09959083b@mds.cephfs.compute-0.voybui.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg b8961a2bb89ef92ece9fe756e2217169f987d51ab5ed9d70855c2e200dbe66dd
             ├─ceph-f70fcd2a-dcb4-5f89-a4ba-79a09959083b@mgr.compute-0.hhuoua.service
             │ ├─libpod-payload-80eff2094e986ce79ca2f5db33c4e20adbe7c8c35152ee84bfca5e742fce5e26
             │ │ ├─74789 /run/podman-init -- /usr/bin/ceph-mgr -n mgr.compute-0.hhuoua -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ └─74791 /usr/bin/ceph-mgr -n mgr.compute-0.hhuoua -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ └─runtime
             │   └─74787 /usr/bin/conmon --api-version 1 -c 80eff2094e986ce79ca2f5db33c4e20adbe7c8c35152ee84bfca5e742fce5e26 -u 80eff2094e986ce79ca2f5db33c4e20adbe7c8c35152ee84bfca5e742fce5e26 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/80eff2094e986ce79ca2f5db33c4e20adbe7c8c35152ee84bfca5e742fce5e26/userdata -p /run/containers/storage/overlay-containers/80eff2094e986ce79ca2f5db33c4e20adbe7c8c35152ee84bfca5e742fce5e26/userdata/pidfile -n ceph-f70fcd2a-dcb4-5f89-a4ba-79a09959083b-mgr-compute-0-hhuoua --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/80eff2094e986ce79ca2f5db33c4e20adbe7c8c35152ee84bfca5e742fce5e26 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/80eff2094e986ce79ca2f5db33c4e20adbe7c8c35152ee84bfca5e742fce5e26/userdata/oci-log --conmon-pidfile /run/ceph-f70fcd2a-dcb4-5f89-a4ba-79a09959083b@mgr.compute-0.hhuoua.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 80eff2094e986ce79ca2f5db33c4e20adbe7c8c35152ee84bfca5e742fce5e26
             ├─ceph-f70fcd2a-dcb4-5f89-a4ba-79a09959083b@mon.compute-0.service
             │ ├─libpod-payload-c6500841c07b36a8c971bbdcb750ce9cda5744cce00d5b6822bb699bead089c7
             │ │ ├─74494 /run/podman-init -- /usr/bin/ceph-mon -n mon.compute-0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false
             │ │ └─74496 /usr/bin/ceph-mon -n mon.compute-0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false
             │ └─runtime
             │   └─74492 /usr/bin/conmon --api-version 1 -c c6500841c07b36a8c971bbdcb750ce9cda5744cce00d5b6822bb699bead089c7 -u c6500841c07b36a8c971bbdcb750ce9cda5744cce00d5b6822bb699bead089c7 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/c6500841c07b36a8c971bbdcb750ce9cda5744cce00d5b6822bb699bead089c7/userdata -p /run/containers/storage/overlay-containers/c6500841c07b36a8c971bbdcb750ce9cda5744cce00d5b6822bb699bead089c7/userdata/pidfile -n ceph-f70fcd2a-dcb4-5f89-a4ba-79a09959083b-mon-compute-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/c6500841c07b36a8c971bbdcb750ce9cda5744cce00d5b6822bb699bead089c7 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/c6500841c07b36a8c971bbdcb750ce9cda5744cce00d5b6822bb699bead089c7/userdata/oci-log --conmon-pidfile /run/ceph-f70fcd2a-dcb4-5f89-a4ba-79a09959083b@mon.compute-0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg c6500841c07b36a8c971bbdcb750ce9cda5744cce00d5b6822bb699bead089c7
             ├─ceph-f70fcd2a-dcb4-5f89-a4ba-79a09959083b@osd.0.service
             │ ├─libpod-payload-7fd99238d214935b3dacb5b0284648e15db53a9954031165abce49a65cde0630
             │ │ ├─84814 /run/podman-init -- /usr/bin/ceph-osd -n osd.0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ └─84816 /usr/bin/ceph-osd -n osd.0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ └─runtime
             │   └─84812 /usr/bin/conmon --api-version 1 -c 7fd99238d214935b3dacb5b0284648e15db53a9954031165abce49a65cde0630 -u 7fd99238d214935b3dacb5b0284648e15db53a9954031165abce49a65cde0630 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/7fd99238d214935b3dacb5b0284648e15db53a9954031165abce49a65cde0630/userdata -p /run/containers/storage/overlay-containers/7fd99238d214935b3dacb5b0284648e15db53a9954031165abce49a65cde0630/userdata/pidfile -n ceph-f70fcd2a-dcb4-5f89-a4ba-79a09959083b-osd-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/7fd99238d214935b3dacb5b0284648e15db53a9954031165abce49a65cde0630 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/7fd99238d214935b3dacb5b0284648e15db53a9954031165abce49a65cde0630/userdata/oci-log --conmon-pidfile /run/ceph-f70fcd2a-dcb4-5f89-a4ba-79a09959083b@osd.0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 7fd99238d214935b3dacb5b0284648e15db53a9954031165abce49a65cde0630
             └─ceph-f70fcd2a-dcb4-5f89-a4ba-79a09959083b@rgw.rgw.compute-0.njduba.service
               ├─libpod-payload-7b0621a2e40c1742efa8191945f46ebb87559261384b35471c2d6d71eb9be595
               │ ├─94237 /run/podman-init -- /usr/bin/radosgw -n client.rgw.rgw.compute-0.njduba -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
               │ └─94239 /usr/bin/radosgw -n client.rgw.rgw.compute-0.njduba -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
               └─runtime
                 └─94235 /usr/bin/conmon --api-version 1 -c 7b0621a2e40c1742efa8191945f46ebb87559261384b35471c2d6d71eb9be595 -u 7b0621a2e40c1742efa8191945f46ebb87559261384b35471c2d6d71eb9be595 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/7b0621a2e40c1742efa8191945f46ebb87559261384b35471c2d6d71eb9be595/userdata -p /run/containers/storage/overlay-containers/7b0621a2e40c1742efa8191945f46ebb87559261384b35471c2d6d71eb9be595/userdata/pidfile -n ceph-f70fcd2a-dcb4-5f89-a4ba-79a09959083b-rgw-rgw-compute-0-njduba --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/7b0621a2e40c1742efa8191945f46ebb87559261384b35471c2d6d71eb9be595 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/7b0621a2e40c1742efa8191945f46ebb87559261384b35471c2d6d71eb9be595/userdata/oci-log --conmon-pidfile /run/ceph-f70fcd2a-dcb4-5f89-a4ba-79a09959083b@rgw.rgw.compute-0.njduba.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 7b0621a2e40c1742efa8191945f46ebb87559261384b35471c2d6d71eb9be595

Jan 31 09:21:13 compute-0 ceph-mon[74496]: from='client.? 192.168.122.100:0/893647686' entity='client.admin' cmd=[{"prefix": "df", "detail": "detail", "format": "json-pretty"}]: dispatch
Jan 31 09:21:13 compute-0 ceph-mon[74496]: from='client.? 192.168.122.101:0/3564389048' entity='client.admin' cmd=[{"prefix": "df", "format": "json-pretty"}]: dispatch
Jan 31 09:21:13 compute-0 ceph-mon[74496]: from='client.? 192.168.122.100:0/1772620264' entity='client.admin' cmd=[{"prefix": "df", "format": "json-pretty"}]: dispatch
Jan 31 09:21:13 compute-0 ceph-mon[74496]: from='client.? 192.168.122.101:0/2538997185' entity='client.admin' cmd=[{"prefix": "fs dump", "format": "json-pretty"}]: dispatch
Jan 31 09:21:14 compute-0 radosgw[94239]: ====== starting new request req=0x7fdbf7a9a6f0 =====
Jan 31 09:21:14 compute-0 radosgw[94239]: ====== req done req=0x7fdbf7a9a6f0 op status=0 http_status=200 latency=0.000000000s ======
Jan 31 09:21:14 compute-0 radosgw[94239]: beast: 0x7fdbf7a9a6f0: 192.168.122.102 - anonymous [31/Jan/2026:09:21:14.015 +0000] "HEAD / HTTP/1.0" 200 0 - - - latency=0.000000000s
Jan 31 09:21:14 compute-0 ceph-mgr[74791]: log_channel(audit) log [DBG] : from='client.48784 -' entity='client.admin' cmd=[{"prefix": "osd df", "output_method": "tree", "target": ["mon-mgr", ""], "format": "json-pretty"}]: dispatch
Jan 31 09:21:14 compute-0 ceph-mgr[74791]: log_channel(cluster) log [DBG] : pgmap v4500: 305 pgs: 305 active+clean; 120 MiB data, 1.6 GiB used, 19 GiB / 21 GiB avail
Jan 31 09:21:14 compute-0 ceph-mgr[74791]: log_channel(audit) log [DBG] : from='client.48790 -' entity='client.admin' cmd=[{"prefix": "osd df", "target": ["mon-mgr", ""], "format": "json-pretty"}]: dispatch

● system-dbus\x2d:1.1\x2dorg.fedoraproject.SetroubleshootPrivileged.slice - Slice /system/dbus-:1.1-org.fedoraproject.SetroubleshootPrivileged
     Loaded: loaded
     Active: active since Sat 2026-01-31 07:22:10 UTC; 1h 59min ago
      Until: Sat 2026-01-31 07:22:10 UTC; 1h 59min ago
         IO: 16.0K read, 0B written
      Tasks: 0
     Memory: 4.0K (peak: 58.6M)
        CPU: 926ms
     CGroup: /system.slice/system-dbus\x2d:1.1\x2dorg.fedoraproject.SetroubleshootPrivileged.slice

Jan 31 07:22:10 compute-0 systemd[1]: Created slice Slice /system/dbus-:1.1-org.fedoraproject.SetroubleshootPrivileged.

● system-getty.slice - Slice /system/getty
     Loaded: loaded
     Active: active since Sat 2026-01-31 06:09:23 UTC; 3h 11min ago
      Until: Sat 2026-01-31 06:09:23 UTC; 3h 11min ago
         IO: 0B read, 0B written
      Tasks: 1
     Memory: 252.0K (peak: 724.0K)
        CPU: 6ms
     CGroup: /system.slice/system-getty.slice
             └─getty@tty1.service
               └─1010 /sbin/agetty -o "-p -- \\u" --noclear - linux

● system-modprobe.slice - Slice /system/modprobe
     Loaded: loaded
     Active: active since Sat 2026-01-31 06:09:19 UTC; 3h 11min ago
      Until: Sat 2026-01-31 06:09:19 UTC; 3h 11min ago
         IO: 0B read, 0B written
      Tasks: 0
     Memory: 48.0K (peak: 12.3M)
        CPU: 156ms
     CGroup: /system.slice/system-modprobe.slice

Jan 31 06:09:19 localhost systemd[1]: Created slice Slice /system/modprobe.

● system-serial\x2dgetty.slice - Slice /system/serial-getty
     Loaded: loaded
     Active: active since Sat 2026-01-31 06:09:23 UTC; 3h 11min ago
      Until: Sat 2026-01-31 06:09:23 UTC; 3h 11min ago
         IO: 0B read, 0B written
      Tasks: 1
     Memory: 304.0K (peak: 548.0K)
        CPU: 8ms
     CGroup: /system.slice/system-serial\x2dgetty.slice
             └─serial-getty@ttyS0.service
               └─1011 /sbin/agetty -o "-p -- \\u" --keep-baud 115200,57600,38400,9600 - vt220

● system-sshd\x2dkeygen.slice - Slice /system/sshd-keygen
     Loaded: loaded
     Active: active since Sat 2026-01-31 06:09:23 UTC; 3h 11min ago
      Until: Sat 2026-01-31 06:09:23 UTC; 3h 11min ago
         IO: 0B read, 0B written
      Tasks: 0
     Memory: 0B (peak: 0B)
        CPU: 0
     CGroup: /system.slice/system-sshd\x2dkeygen.slice

● system.slice - System Slice
     Loaded: loaded
     Active: active since Sat 2026-01-31 06:09:18 UTC; 3h 11min ago
      Until: Sat 2026-01-31 06:09:18 UTC; 3h 11min ago
       Docs: man:systemd.special(7)
         IO: 3.6G read, 12.7G written
      Tasks: 1007
     Memory: 3.1G (peak: 3.3G)
        CPU: 21min 32.653s
     CGroup: /system.slice
             ├─NetworkManager.service
             │ └─49108 /usr/sbin/NetworkManager --no-daemon
             ├─auditd.service
             │ ├─699 /sbin/auditd
             │ └─701 /usr/sbin/sedispatch
             ├─chronyd.service
             │ └─58682 /usr/sbin/chronyd -F 2
             ├─crond.service
             │ └─1009 /usr/sbin/crond -n
             ├─dbus-broker.service
             │ ├─809 /usr/bin/dbus-broker-launch --scope system --audit
             │ └─810 dbus-broker --log 4 --controller 9 --machine-id bf0bc0bb03de29b24cba1cc9599cf5d0 --max-bytes 536870912 --max-fds 4096 --max-matches 131072 --audit
             ├─edpm_nova_compute.service
             │ └─247704 /usr/bin/conmon --api-version 1 -c 08cc1288983ebbc3e029df4ba78417491faf6dcee06220d37f18f3223d9f406f -u 08cc1288983ebbc3e029df4ba78417491faf6dcee06220d37f18f3223d9f406f -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/08cc1288983ebbc3e029df4ba78417491faf6dcee06220d37f18f3223d9f406f/userdata -p /run/containers/storage/overlay-containers/08cc1288983ebbc3e029df4ba78417491faf6dcee06220d37f18f3223d9f406f/userdata/pidfile -n nova_compute --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/08cc1288983ebbc3e029df4ba78417491faf6dcee06220d37f18f3223d9f406f --full-attach -s -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/08cc1288983ebbc3e029df4ba78417491faf6dcee06220d37f18f3223d9f406f/userdata/oci-log --conmon-pidfile /run/nova_compute.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg 08cc1288983ebbc3e029df4ba78417491faf6dcee06220d37f18f3223d9f406f
             ├─edpm_ovn_controller.service
             │ └─149457 /usr/bin/conmon --api-version 1 -c 1e25cafb71c13c532a40aebd7c2342bf479b9dc985b78d8189396343297af238 -u 1e25cafb71c13c532a40aebd7c2342bf479b9dc985b78d8189396343297af238 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/1e25cafb71c13c532a40aebd7c2342bf479b9dc985b78d8189396343297af238/userdata -p /run/containers/storage/overlay-containers/1e25cafb71c13c532a40aebd7c2342bf479b9dc985b78d8189396343297af238/userdata/pidfile -n ovn_controller --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/1e25cafb71c13c532a40aebd7c2342bf479b9dc985b78d8189396343297af238 --full-attach -s -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/1e25cafb71c13c532a40aebd7c2342bf479b9dc985b78d8189396343297af238/userdata/oci-log --conmon-pidfile /run/ovn_controller.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg 1e25cafb71c13c532a40aebd7c2342bf479b9dc985b78d8189396343297af238
             ├─edpm_ovn_metadata_agent.service
             │ └─160021 /usr/bin/conmon --api-version 1 -c 4f21208ce36b9cf814979a8ea23334150fc70d101a3b284bd29d2af80b4ea3b1 -u 4f21208ce36b9cf814979a8ea23334150fc70d101a3b284bd29d2af80b4ea3b1 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/4f21208ce36b9cf814979a8ea23334150fc70d101a3b284bd29d2af80b4ea3b1/userdata -p /run/containers/storage/overlay-containers/4f21208ce36b9cf814979a8ea23334150fc70d101a3b284bd29d2af80b4ea3b1/userdata/pidfile -n ovn_metadata_agent --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/4f21208ce36b9cf814979a8ea23334150fc70d101a3b284bd29d2af80b4ea3b1 --full-attach -s -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/4f21208ce36b9cf814979a8ea23334150fc70d101a3b284bd29d2af80b4ea3b1/userdata/oci-log --conmon-pidfile /run/ovn_metadata_agent.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg 4f21208ce36b9cf814979a8ea23334150fc70d101a3b284bd29d2af80b4ea3b1
             ├─gssproxy.service
             │ └─875 /usr/sbin/gssproxy -D
             ├─irqbalance.service
             │ └─815 /usr/sbin/irqbalance
             ├─iscsid.service
             │ └─231340 /usr/sbin/iscsid -f
             ├─multipathd.service
             │ └─231500 /sbin/multipathd -d -s
             ├─ovs-vswitchd.service
             │ └─47404 ovs-vswitchd unix:/var/run/openvswitch/db.sock -vconsole:emer -vsyslog:err -vfile:info --mlockall --user openvswitch:hugetlbfs --no-chdir --log-file=/var/log/openvswitch/ovs-vswitchd.log --pidfile=/var/run/openvswitch/ovs-vswitchd.pid --detach
             ├─ovsdb-server.service
             │ └─47323 ovsdb-server /etc/openvswitch/conf.db -vconsole:emer -vsyslog:err -vfile:info --remote=punix:/var/run/openvswitch/db.sock --private-key=db:Open_vSwitch,SSL,private_key --certificate=db:Open_vSwitch,SSL,certificate --bootstrap-ca-cert=db:Open_vSwitch,SSL,ca_cert --user openvswitch:hugetlbfs --no-chdir --log-file=/var/log/openvswitch/ovsdb-server.log --pidfile=/var/run/openvswitch/ovsdb-server.pid --detach
             ├─polkit.service
             │ └─43587 /usr/lib/polkit-1/polkitd --no-debug
             ├─rpcbind.service
             │ └─697 /usr/bin/rpcbind -w -f
             ├─rsyslog.service
             │ └─1005 /usr/sbin/rsyslogd -n
             ├─sshd.service
             │ └─187503 "sshd: /usr/sbin/sshd -D [listener] 0 of 10-100 startups"
             ├─system-ceph\x2df70fcd2a\x2ddcb4\x2d5f89\x2da4ba\x2d79a09959083b.slice
             │ ├─ceph-f70fcd2a-dcb4-5f89-a4ba-79a09959083b@crash.compute-0.service
             │ │ ├─libpod-payload-1da6676927c3fbc16088c8bd8068265d57380b59a05a52c32a3be4739c9d9976
             │ │ │ ├─81648 /run/podman-init -- /usr/bin/ceph-crash -n client.crash.compute-0
             │ │ │ └─81650 /usr/bin/python3 -s /usr/bin/ceph-crash -n client.crash.compute-0
             │ │ └─runtime
             │ │   └─81645 /usr/bin/conmon --api-version 1 -c 1da6676927c3fbc16088c8bd8068265d57380b59a05a52c32a3be4739c9d9976 -u 1da6676927c3fbc16088c8bd8068265d57380b59a05a52c32a3be4739c9d9976 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/1da6676927c3fbc16088c8bd8068265d57380b59a05a52c32a3be4739c9d9976/userdata -p /run/containers/storage/overlay-containers/1da6676927c3fbc16088c8bd8068265d57380b59a05a52c32a3be4739c9d9976/userdata/pidfile -n ceph-f70fcd2a-dcb4-5f89-a4ba-79a09959083b-crash-compute-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/1da6676927c3fbc16088c8bd8068265d57380b59a05a52c32a3be4739c9d9976 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/1da6676927c3fbc16088c8bd8068265d57380b59a05a52c32a3be4739c9d9976/userdata/oci-log --conmon-pidfile /run/ceph-f70fcd2a-dcb4-5f89-a4ba-79a09959083b@crash.compute-0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 1da6676927c3fbc16088c8bd8068265d57380b59a05a52c32a3be4739c9d9976
             │ ├─ceph-f70fcd2a-dcb4-5f89-a4ba-79a09959083b@haproxy.rgw.default.compute-0.cwtxbj.service
             │ │ ├─libpod-payload-b160f7ad791bcb51d7340140782e9b69931fca86a37ca28ddd2321bd134bedd3
             │ │ │ ├─95977 /run/podman-init -- docker-entrypoint.sh haproxy -f /var/lib/haproxy/haproxy.cfg
             │ │ │ ├─95985 haproxy -W -db -f /var/lib/haproxy/haproxy.cfg
             │ │ │ └─95987 haproxy -W -db -f /var/lib/haproxy/haproxy.cfg
             │ │ └─runtime
             │ │   └─95975 /usr/bin/conmon --api-version 1 -c b160f7ad791bcb51d7340140782e9b69931fca86a37ca28ddd2321bd134bedd3 -u b160f7ad791bcb51d7340140782e9b69931fca86a37ca28ddd2321bd134bedd3 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/b160f7ad791bcb51d7340140782e9b69931fca86a37ca28ddd2321bd134bedd3/userdata -p /run/containers/storage/overlay-containers/b160f7ad791bcb51d7340140782e9b69931fca86a37ca28ddd2321bd134bedd3/userdata/pidfile -n ceph-f70fcd2a-dcb4-5f89-a4ba-79a09959083b-haproxy-rgw-default-compute-0-cwtxbj --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/b160f7ad791bcb51d7340140782e9b69931fca86a37ca28ddd2321bd134bedd3 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/b160f7ad791bcb51d7340140782e9b69931fca86a37ca28ddd2321bd134bedd3/userdata/oci-log --conmon-pidfile /run/ceph-f70fcd2a-dcb4-5f89-a4ba-79a09959083b@haproxy.rgw.default.compute-0.cwtxbj.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg b160f7ad791bcb51d7340140782e9b69931fca86a37ca28ddd2321bd134bedd3
             │ ├─ceph-f70fcd2a-dcb4-5f89-a4ba-79a09959083b@keepalived.rgw.default.compute-0.rwjfwq.service
             │ │ ├─libpod-payload-37a60ce08c4c74b1713cee8109229a211cbb75cdd957895056cf506b036b5f51
             │ │ │ ├─96434 /run/podman-init -- ./init.sh
             │ │ │ ├─96436 /usr/sbin/keepalived -n -l -f /etc/keepalived/keepalived.conf
             │ │ │ └─96438 /usr/sbin/keepalived -n -l -f /etc/keepalived/keepalived.conf
             │ │ └─runtime
             │ │   └─96432 /usr/bin/conmon --api-version 1 -c 37a60ce08c4c74b1713cee8109229a211cbb75cdd957895056cf506b036b5f51 -u 37a60ce08c4c74b1713cee8109229a211cbb75cdd957895056cf506b036b5f51 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/37a60ce08c4c74b1713cee8109229a211cbb75cdd957895056cf506b036b5f51/userdata -p /run/containers/storage/overlay-containers/37a60ce08c4c74b1713cee8109229a211cbb75cdd957895056cf506b036b5f51/userdata/pidfile -n ceph-f70fcd2a-dcb4-5f89-a4ba-79a09959083b-keepalived-rgw-default-compute-0-rwjfwq --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/37a60ce08c4c74b1713cee8109229a211cbb75cdd957895056cf506b036b5f51 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/37a60ce08c4c74b1713cee8109229a211cbb75cdd957895056cf506b036b5f51/userdata/oci-log --conmon-pidfile /run/ceph-f70fcd2a-dcb4-5f89-a4ba-79a09959083b@keepalived.rgw.default.compute-0.rwjfwq.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 37a60ce08c4c74b1713cee8109229a211cbb75cdd957895056cf506b036b5f51
             │ ├─ceph-f70fcd2a-dcb4-5f89-a4ba-79a09959083b@mds.cephfs.compute-0.voybui.service
             │ │ ├─libpod-payload-b8961a2bb89ef92ece9fe756e2217169f987d51ab5ed9d70855c2e200dbe66dd
             │ │ │ ├─94764 /run/podman-init -- /usr/bin/ceph-mds -n mds.cephfs.compute-0.voybui -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ │ └─94769 /usr/bin/ceph-mds -n mds.cephfs.compute-0.voybui -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ └─runtime
             │ │   └─94760 /usr/bin/conmon --api-version 1 -c b8961a2bb89ef92ece9fe756e2217169f987d51ab5ed9d70855c2e200dbe66dd -u b8961a2bb89ef92ece9fe756e2217169f987d51ab5ed9d70855c2e200dbe66dd -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/b8961a2bb89ef92ece9fe756e2217169f987d51ab5ed9d70855c2e200dbe66dd/userdata -p /run/containers/storage/overlay-containers/b8961a2bb89ef92ece9fe756e2217169f987d51ab5ed9d70855c2e200dbe66dd/userdata/pidfile -n ceph-f70fcd2a-dcb4-5f89-a4ba-79a09959083b-mds-cephfs-compute-0-voybui --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/b8961a2bb89ef92ece9fe756e2217169f987d51ab5ed9d70855c2e200dbe66dd --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/b8961a2bb89ef92ece9fe756e2217169f987d51ab5ed9d70855c2e200dbe66dd/userdata/oci-log --conmon-pidfile /run/ceph-f70fcd2a-dcb4-5f89-a4ba-79a09959083b@mds.cephfs.compute-0.voybui.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg b8961a2bb89ef92ece9fe756e2217169f987d51ab5ed9d70855c2e200dbe66dd
             │ ├─ceph-f70fcd2a-dcb4-5f89-a4ba-79a09959083b@mgr.compute-0.hhuoua.service
             │ │ ├─libpod-payload-80eff2094e986ce79ca2f5db33c4e20adbe7c8c35152ee84bfca5e742fce5e26
             │ │ │ ├─74789 /run/podman-init -- /usr/bin/ceph-mgr -n mgr.compute-0.hhuoua -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ │ └─74791 /usr/bin/ceph-mgr -n mgr.compute-0.hhuoua -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ └─runtime
             │ │   └─74787 /usr/bin/conmon --api-version 1 -c 80eff2094e986ce79ca2f5db33c4e20adbe7c8c35152ee84bfca5e742fce5e26 -u 80eff2094e986ce79ca2f5db33c4e20adbe7c8c35152ee84bfca5e742fce5e26 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/80eff2094e986ce79ca2f5db33c4e20adbe7c8c35152ee84bfca5e742fce5e26/userdata -p /run/containers/storage/overlay-containers/80eff2094e986ce79ca2f5db33c4e20adbe7c8c35152ee84bfca5e742fce5e26/userdata/pidfile -n ceph-f70fcd2a-dcb4-5f89-a4ba-79a09959083b-mgr-compute-0-hhuoua --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/80eff2094e986ce79ca2f5db33c4e20adbe7c8c35152ee84bfca5e742fce5e26 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/80eff2094e986ce79ca2f5db33c4e20adbe7c8c35152ee84bfca5e742fce5e26/userdata/oci-log --conmon-pidfile /run/ceph-f70fcd2a-dcb4-5f89-a4ba-79a09959083b@mgr.compute-0.hhuoua.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 80eff2094e986ce79ca2f5db33c4e20adbe7c8c35152ee84bfca5e742fce5e26
             │ ├─ceph-f70fcd2a-dcb4-5f89-a4ba-79a09959083b@mon.compute-0.service
             │ │ ├─libpod-payload-c6500841c07b36a8c971bbdcb750ce9cda5744cce00d5b6822bb699bead089c7
             │ │ │ ├─74494 /run/podman-init -- /usr/bin/ceph-mon -n mon.compute-0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false
             │ │ │ └─74496 /usr/bin/ceph-mon -n mon.compute-0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false
             │ │ └─runtime
             │ │   └─74492 /usr/bin/conmon --api-version 1 -c c6500841c07b36a8c971bbdcb750ce9cda5744cce00d5b6822bb699bead089c7 -u c6500841c07b36a8c971bbdcb750ce9cda5744cce00d5b6822bb699bead089c7 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/c6500841c07b36a8c971bbdcb750ce9cda5744cce00d5b6822bb699bead089c7/userdata -p /run/containers/storage/overlay-containers/c6500841c07b36a8c971bbdcb750ce9cda5744cce00d5b6822bb699bead089c7/userdata/pidfile -n ceph-f70fcd2a-dcb4-5f89-a4ba-79a09959083b-mon-compute-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/c6500841c07b36a8c971bbdcb750ce9cda5744cce00d5b6822bb699bead089c7 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/c6500841c07b36a8c971bbdcb750ce9cda5744cce00d5b6822bb699bead089c7/userdata/oci-log --conmon-pidfile /run/ceph-f70fcd2a-dcb4-5f89-a4ba-79a09959083b@mon.compute-0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg c6500841c07b36a8c971bbdcb750ce9cda5744cce00d5b6822bb699bead089c7
             │ ├─ceph-f70fcd2a-dcb4-5f89-a4ba-79a09959083b@osd.0.service
             │ │ ├─libpod-payload-7fd99238d214935b3dacb5b0284648e15db53a9954031165abce49a65cde0630
             │ │ │ ├─84814 /run/podman-init -- /usr/bin/ceph-osd -n osd.0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ │ └─84816 /usr/bin/ceph-osd -n osd.0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ └─runtime
             │ │   └─84812 /usr/bin/conmon --api-version 1 -c 7fd99238d214935b3dacb5b0284648e15db53a9954031165abce49a65cde0630 -u 7fd99238d214935b3dacb5b0284648e15db53a9954031165abce49a65cde0630 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/7fd99238d214935b3dacb5b0284648e15db53a9954031165abce49a65cde0630/userdata -p /run/containers/storage/overlay-containers/7fd99238d214935b3dacb5b0284648e15db53a9954031165abce49a65cde0630/userdata/pidfile -n ceph-f70fcd2a-dcb4-5f89-a4ba-79a09959083b-osd-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/7fd99238d214935b3dacb5b0284648e15db53a9954031165abce49a65cde0630 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/7fd99238d214935b3dacb5b0284648e15db53a9954031165abce49a65cde0630/userdata/oci-log --conmon-pidfile /run/ceph-f70fcd2a-dcb4-5f89-a4ba-79a09959083b@osd.0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 7fd99238d214935b3dacb5b0284648e15db53a9954031165abce49a65cde0630
             │ └─ceph-f70fcd2a-dcb4-5f89-a4ba-79a09959083b@rgw.rgw.compute-0.njduba.service
             │   ├─libpod-payload-7b0621a2e40c1742efa8191945f46ebb87559261384b35471c2d6d71eb9be595
             │   │ ├─94237 /run/podman-init -- /usr/bin/radosgw -n client.rgw.rgw.compute-0.njduba -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │   │ └─94239 /usr/bin/radosgw -n client.rgw.rgw.compute-0.njduba -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │   └─runtime
             │     └─94235 /usr/bin/conmon --api-version 1 -c 7b0621a2e40c1742efa8191945f46ebb87559261384b35471c2d6d71eb9be595 -u 7b0621a2e40c1742efa8191945f46ebb87559261384b35471c2d6d71eb9be595 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/7b0621a2e40c1742efa8191945f46ebb87559261384b35471c2d6d71eb9be595/userdata -p /run/containers/storage/overlay-containers/7b0621a2e40c1742efa8191945f46ebb87559261384b35471c2d6d71eb9be595/userdata/pidfile -n ceph-f70fcd2a-dcb4-5f89-a4ba-79a09959083b-rgw-rgw-compute-0-njduba --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/7b0621a2e40c1742efa8191945f46ebb87559261384b35471c2d6d71eb9be595 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/7b0621a2e40c1742efa8191945f46ebb87559261384b35471c2d6d71eb9be595/userdata/oci-log --conmon-pidfile /run/ceph-f70fcd2a-dcb4-5f89-a4ba-79a09959083b@rgw.rgw.compute-0.njduba.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 7b0621a2e40c1742efa8191945f46ebb87559261384b35471c2d6d71eb9be595
             ├─system-getty.slice
             │ └─getty@tty1.service
             │   └─1010 /sbin/agetty -o "-p -- \\u" --noclear - linux
             ├─system-serial\x2dgetty.slice
             │ └─serial-getty@ttyS0.service
             │   └─1011 /sbin/agetty -o "-p -- \\u" --keep-baud 115200,57600,38400,9600 - vt220
             ├─systemd-hostnamed.service
             │ └─438269 /usr/lib/systemd/systemd-hostnamed
             ├─systemd-journald.service
             │ └─675 /usr/lib/systemd/systemd-journald
             ├─systemd-logind.service
             │ └─816 /usr/lib/systemd/systemd-logind
             ├─systemd-machined.service
             │ └─214448 /usr/lib/systemd/systemd-machined
             ├─systemd-udevd.service
             │ └─udev
             │   └─727 /usr/lib/systemd/systemd-udevd
             ├─tuned.service
             │ └─108365 /usr/bin/python3 -Es /usr/sbin/tuned -l -P
             ├─virtlogd.service
             │ └─213815 /usr/sbin/virtlogd
             ├─virtnodedevd.service
             │ └─248127 /usr/sbin/virtnodedevd --timeout 120
             ├─virtqemud.service
             │ └─247621 /usr/sbin/virtqemud --timeout 120
             └─virtsecretd.service
               └─253515 /usr/sbin/virtsecretd --timeout 120

Jan 31 09:21:04 compute-0 nova_compute[247704]: 2026-01-31 09:21:04.121 247708 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 25 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263[00m
Jan 31 09:21:05 compute-0 nova_compute[247704]: 2026-01-31 09:21:05.447 247708 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 25 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263[00m
Jan 31 09:21:07 compute-0 podman[441974]: 2026-01-31 09:21:07.926880046 +0000 UTC m=+0.097249084 container health_status 1e25cafb71c13c532a40aebd7c2342bf479b9dc985b78d8189396343297af238 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, config_id=ovn_controller, io.buildah.version=1.41.3, tcib_managed=true, maintainer=OpenStack Kubernetes Operator team, org.label-schema.vendor=CentOS, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'EDPM_CONFIG_HASH': '93c1edad6c3ce19ccbf4cad1c823140b960799b036165432d2a9b50972fa7d6a-1ebd21696fff3d9ce9d1c627d87eb768e7a7895873c4ad726f2d4c0751d2120c-1ebd21696fff3d9ce9d1c627d87eb768e7a7895873c4ad726f2d4c0751d2120c-1ebd21696fff3d9ce9d1c627d87eb768e7a7895873c4ad726f2d4c0751d2120c'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, container_name=ovn_controller, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, tcib_build_tag=b85d0548925081ae8c6bdd697658cec4, managed_by=edpm_ansible, org.label-schema.build-date=20260127, org.label-schema.license=GPLv2)
Jan 31 09:21:08 compute-0 ovn_metadata_agent[160021]: 2026-01-31 09:21:08.083 160028 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Running txn n=1 command(idx=0): DbSetCommand(_result=None, table=Chassis_Private, record=5c307474-e9ec-4d19-9f52-463eb0ff26d1, col_values=(('external_ids', {'neutron:ovn-metadata-sb-cfg': '119'}),), if_exists=True) do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:89[00m
Jan 31 09:21:09 compute-0 nova_compute[247704]: 2026-01-31 09:21:09.123 247708 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 25 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263[00m
Jan 31 09:21:10 compute-0 nova_compute[247704]: 2026-01-31 09:21:10.450 247708 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 25 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263[00m
Jan 31 09:21:11 compute-0 ovn_metadata_agent[160021]: 2026-01-31 09:21:11.253 160028 DEBUG oslo_concurrency.lockutils [-] Acquiring lock "_check_child_processes" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404[00m
Jan 31 09:21:11 compute-0 ovn_metadata_agent[160021]: 2026-01-31 09:21:11.254 160028 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" acquired by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409[00m
Jan 31 09:21:11 compute-0 ovn_metadata_agent[160021]: 2026-01-31 09:21:11.254 160028 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" "released" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: held 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423[00m
Jan 31 09:21:14 compute-0 nova_compute[247704]: 2026-01-31 09:21:14.126 247708 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 25 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263[00m

● user-1000.slice - User Slice of UID 1000
     Loaded: loaded
    Drop-In: /usr/lib/systemd/system/user-.slice.d
             └─10-defaults.conf
     Active: active since Sat 2026-01-31 06:09:53 UTC; 3h 11min ago
      Until: Sat 2026-01-31 06:09:53 UTC; 3h 11min ago
       Docs: man:user@.service(5)
         IO: 581.0M read, 8.4G written
      Tasks: 37 (limit: 20031)
     Memory: 1.7G (peak: 4.5G)
        CPU: 21min 52.396s
     CGroup: /user.slice/user-1000.slice
             ├─session-1.scope
             │ └─4520 /usr/bin/python3
             ├─session-67.scope
             │ ├─435980 "sshd-session: zuul [priv]"
             │ ├─435983 "sshd-session: zuul@notty"
             │ ├─435984 sudo bash -c "rm -rf /var/tmp/sos-osp && mkdir /var/tmp/sos-osp && sos report --batch --all-logs --tmp-dir=/var/tmp/sos-osp  -p container,openstack_edpm,system,storage,virt"
             │ ├─436008 /usr/bin/python3 -s /sbin/sos report --batch --all-logs --tmp-dir=/var/tmp/sos-osp -p container,openstack_edpm,system,storage,virt
             │ ├─442195 timeout 15s turbostat --debug sleep 10
             │ ├─442903 timeout 300s systemctl status --all
             │ ├─442904 systemctl status --all
             │ ├─442930 timeout 300s ceph fs ls --format json-pretty
             │ ├─442931 /usr/bin/python3 -s /usr/bin/ceph fs ls --format json-pretty
             │ ├─442951 timeout 300s semanage module -l
             │ └─442952 /usr/bin/python3 -EsI /usr/sbin/semanage module -l
             └─user@1000.service
               ├─app.slice
               │ └─dbus-broker.service
               │   ├─12265 /usr/bin/dbus-broker-launch --scope user
               │   └─12293 dbus-broker --log 4 --controller 9 --machine-id bf0bc0bb03de29b24cba1cc9599cf5d0 --max-bytes 100000000000000 --max-fds 25000000000000 --max-matches 5000000000
               ├─init.scope
               │ ├─4308 /usr/lib/systemd/systemd --user
               │ └─4310 "(sd-pam)"
               └─user.slice
                 └─podman-pause-03c69239.scope
                   └─12088 catatonit -P

Jan 31 09:16:31 compute-0 sudo[430934]: pam_unix(sudo:session): session closed for user root
Jan 31 09:16:31 compute-0 sshd-session[430933]: Received disconnect from 192.168.122.10 port 40214:11: disconnected by user
Jan 31 09:16:31 compute-0 sshd-session[430933]: Disconnected from user zuul 192.168.122.10 port 40214
Jan 31 09:20:34 compute-0 sudo[435984]:     zuul : PWD=/home/zuul ; USER=root ; COMMAND=/bin/bash -c 'rm -rf /var/tmp/sos-osp && mkdir /var/tmp/sos-osp && sos report --batch --all-logs --tmp-dir=/var/tmp/sos-osp  -p container,openstack_edpm,system,storage,virt'
Jan 31 09:20:34 compute-0 sudo[435984]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Jan 31 09:20:39 compute-0 ovs-vsctl[436342]: ovs|00001|db_ctl_base|ERR|no key "dpdk-init" in Open_vSwitch record "." column other_config
Jan 31 09:20:49 compute-0 crontab[437918]: (root) LIST (root)
Jan 31 09:21:05 compute-0 ovs-appctl[441233]: ovs|00001|daemon_unix|WARN|/var/run/openvswitch/ovs-monitor-ipsec.pid: open: No such file or directory
Jan 31 09:21:05 compute-0 ovs-appctl[441238]: ovs|00001|daemon_unix|WARN|/var/run/openvswitch/ovs-monitor-ipsec.pid: open: No such file or directory
Jan 31 09:21:06 compute-0 ovs-appctl[441243]: ovs|00001|daemon_unix|WARN|/var/run/openvswitch/ovs-monitor-ipsec.pid: open: No such file or directory

● user-42477.slice - User Slice of UID 42477
     Loaded: loaded
    Drop-In: /usr/lib/systemd/system/user-.slice.d
             └─10-defaults.conf
     Active: active since Sat 2026-01-31 07:05:25 UTC; 2h 15min ago
      Until: Sat 2026-01-31 07:05:25 UTC; 2h 15min ago
       Docs: man:user@.service(5)
         IO: 7.9M read, 663.7M written
      Tasks: 26 (limit: 20031)
     Memory: 240.1M (peak: 468.9M)
        CPU: 9min 46.109s
     CGroup: /user.slice/user-42477.slice
             ├─session-21.scope
             │ ├─76126 "sshd-session: ceph-admin [priv]"
             │ └─76148 "sshd-session: ceph-admin"
             ├─session-23.scope
             │ ├─76143 "sshd-session: ceph-admin [priv]"
             │ └─76149 "sshd-session: ceph-admin@notty"
             ├─session-24.scope
             │ ├─76200 "sshd-session: ceph-admin [priv]"
             │ └─76203 "sshd-session: ceph-admin@notty"
             ├─session-25.scope
             │ ├─76254 "sshd-session: ceph-admin [priv]"
             │ └─76257 "sshd-session: ceph-admin@notty"
             ├─session-26.scope
             │ ├─76308 "sshd-session: ceph-admin [priv]"
             │ └─76311 "sshd-session: ceph-admin@notty"
             ├─session-27.scope
             │ ├─76362 "sshd-session: ceph-admin [priv]"
             │ └─76365 "sshd-session: ceph-admin@notty"
             ├─session-28.scope
             │ ├─76416 "sshd-session: ceph-admin [priv]"
             │ └─76419 "sshd-session: ceph-admin@notty"
             ├─session-29.scope
             │ ├─76470 "sshd-session: ceph-admin [priv]"
             │ └─76473 "sshd-session: ceph-admin@notty"
             ├─session-30.scope
             │ ├─76524 "sshd-session: ceph-admin [priv]"
             │ └─76527 "sshd-session: ceph-admin@notty"
             ├─session-31.scope
             │ ├─76578 "sshd-session: ceph-admin [priv]"
             │ └─76581 "sshd-session: ceph-admin@notty"
             ├─session-32.scope
             │ ├─76605 "sshd-session: ceph-admin [priv]"
             │ └─76608 "sshd-session: ceph-admin@notty"
             ├─session-33.scope
             │ ├─76659 "sshd-session: ceph-admin [priv]"
             │ └─76662 "sshd-session: ceph-admin@notty"
             └─user@42477.service
               └─init.scope
                 ├─76130 /usr/lib/systemd/systemd --user
                 └─76132 "(sd-pam)"

Jan 31 09:21:12 compute-0 sudo[442762]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Jan 31 09:21:12 compute-0 sudo[442732]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Jan 31 09:21:12 compute-0 sudo[442762]: pam_unix(sudo:session): session closed for user root
Jan 31 09:21:12 compute-0 sudo[442732]: pam_unix(sudo:session): session closed for user root
Jan 31 09:21:13 compute-0 sudo[442811]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/ls /etc/sysctl.d
Jan 31 09:21:13 compute-0 sudo[442811]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Jan 31 09:21:13 compute-0 sudo[442811]: pam_unix(sudo:session): session closed for user root
Jan 31 09:21:13 compute-0 sudo[442810]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Jan 31 09:21:13 compute-0 sudo[442810]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Jan 31 09:21:13 compute-0 sudo[442810]: pam_unix(sudo:session): session closed for user root

● user.slice - User and Session Slice
     Loaded: loaded (/usr/lib/systemd/system/user.slice; static)
     Active: active since Sat 2026-01-31 06:09:23 UTC; 3h 11min ago
      Until: Sat 2026-01-31 06:09:23 UTC; 3h 11min ago
       Docs: man:systemd.special(7)
         IO: 589.1M read, 9.0G written
      Tasks: 64
     Memory: 1.9G (peak: 4.8G)
        CPU: 31min 40.033s
     CGroup: /user.slice
             ├─user-1000.slice
             │ ├─session-1.scope
             │ │ └─4520 /usr/bin/python3
             │ ├─session-67.scope
             │ │ ├─435980 "sshd-session: zuul [priv]"
             │ │ ├─435983 "sshd-session: zuul@notty"
             │ │ ├─435984 sudo bash -c "rm -rf /var/tmp/sos-osp && mkdir /var/tmp/sos-osp && sos report --batch --all-logs --tmp-dir=/var/tmp/sos-osp  -p container,openstack_edpm,system,storage,virt"
             │ │ ├─436008 /usr/bin/python3 -s /sbin/sos report --batch --all-logs --tmp-dir=/var/tmp/sos-osp -p container,openstack_edpm,system,storage,virt
             │ │ ├─442195 timeout 15s turbostat --debug sleep 10
             │ │ ├─442903 timeout 300s systemctl status --all
             │ │ ├─442904 systemctl status --all
             │ │ ├─442930 timeout 300s ceph fs ls --format json-pretty
             │ │ ├─442931 /usr/bin/python3 -s /usr/bin/ceph fs ls --format json-pretty
             │ │ ├─442951 timeout 300s semanage module -l
             │ │ └─442952 /usr/bin/python3 -EsI /usr/sbin/semanage module -l
             │ └─user@1000.service
             │   ├─app.slice
             │   │ └─dbus-broker.service
             │   │   ├─12265 /usr/bin/dbus-broker-launch --scope user
             │   │   └─12293 dbus-broker --log 4 --controller 9 --machine-id bf0bc0bb03de29b24cba1cc9599cf5d0 --max-bytes 100000000000000 --max-fds 25000000000000 --max-matches 5000000000
             │   ├─init.scope
             │   │ ├─4308 /usr/lib/systemd/systemd --user
             │   │ └─4310 "(sd-pam)"
             │   └─user.slice
             │     └─podman-pause-03c69239.scope
             │       └─12088 catatonit -P
             └─user-42477.slice
               ├─session-21.scope
               │ ├─76126 "sshd-session: ceph-admin [priv]"
               │ └─76148 "sshd-session: ceph-admin"
               ├─session-23.scope
               │ ├─76143 "sshd-session: ceph-admin [priv]"
               │ └─76149 "sshd-session: ceph-admin@notty"
               ├─session-24.scope
               │ ├─76200 "sshd-session: ceph-admin [priv]"
               │ └─76203 "sshd-session: ceph-admin@notty"
               ├─session-25.scope
               │ ├─76254 "sshd-session: ceph-admin [priv]"
               │ └─76257 "sshd-session: ceph-admin@notty"
               ├─session-26.scope
               │ ├─76308 "sshd-session: ceph-admin [priv]"
               │ └─76311 "sshd-session: ceph-admin@notty"
               ├─session-27.scope
               │ ├─76362 "sshd-session: ceph-admin [priv]"
               │ └─76365 "sshd-session: ceph-admin@notty"
               ├─session-28.scope
               │ ├─76416 "sshd-session: ceph-admin [priv]"
               │ └─76419 "sshd-session: ceph-admin@notty"
               ├─session-29.scope
               │ ├─76470 "sshd-session: ceph-admin [priv]"
               │ └─76473 "sshd-session: ceph-admin@notty"
               ├─session-30.scope
               │ ├─76524 "sshd-session: ceph-admin [priv]"
               │ └─76527 "sshd-session: ceph-admin@notty"
               ├─session-31.scope
               │ ├─76578 "sshd-session: ceph-admin [priv]"
               │ └─76581 "sshd-session: ceph-admin@notty"
               ├─session-32.scope
               │ ├─76605 "sshd-session: ceph-admin [priv]"
               │ └─76608 "sshd-session: ceph-admin@notty"
               ├─session-33.scope
               │ ├─76659 "sshd-session: ceph-admin [priv]"
               │ └─76662 "sshd-session: ceph-admin@notty"
               └─user@42477.service
                 └─init.scope
                   ├─76130 /usr/lib/systemd/systemd --user
                   └─76132 "(sd-pam)"

● dbus.socket - D-Bus System Message Bus Socket
     Loaded: loaded (/usr/lib/systemd/system/dbus.socket; enabled; preset: enabled)
     Active: active (running) since Sat 2026-01-31 06:09:25 UTC; 3h 11min ago
      Until: Sat 2026-01-31 06:09:25 UTC; 3h 11min ago
   Triggers: ● dbus-broker.service
     Listen: /run/dbus/system_bus_socket (Stream)
     CGroup: /system.slice/dbus.socket

Jan 31 06:09:25 localhost systemd[1]: Listening on D-Bus System Message Bus Socket.

● dm-event.socket - Device-mapper event daemon FIFOs
     Loaded: loaded (/usr/lib/systemd/system/dm-event.socket; enabled; preset: enabled)
     Active: active (listening) since Sat 2026-01-31 06:57:50 UTC; 2h 23min ago
      Until: Sat 2026-01-31 06:57:50 UTC; 2h 23min ago
   Triggers: ● dm-event.service
       Docs: man:dmeventd(8)
     Listen: /run/dmeventd-server (FIFO)
             /run/dmeventd-client (FIFO)
     CGroup: /system.slice/dm-event.socket

Jan 31 06:57:50 compute-0 systemd[1]: Listening on Device-mapper event daemon FIFOs.

● iscsid.socket - Open-iSCSI iscsid Socket
     Loaded: loaded (/usr/lib/systemd/system/iscsid.socket; enabled; preset: enabled)
     Active: active (running) since Sat 2026-01-31 07:23:18 UTC; 1h 57min ago
      Until: Sat 2026-01-31 07:23:18 UTC; 1h 57min ago
   Triggers: ● iscsid.service
       Docs: man:iscsid(8)
             man:iscsiadm(8)
     Listen: @ISCSIADM_ABSTRACT_NAMESPACE (Stream)
     CGroup: /system.slice/iscsid.socket

Jan 31 07:23:18 compute-0 systemd[1]: Listening on Open-iSCSI iscsid Socket.

○ iscsiuio.socket - Open-iSCSI iscsiuio Socket
     Loaded: loaded (/usr/lib/systemd/system/iscsiuio.socket; enabled; preset: enabled)
     Active: inactive (dead)
   Triggers: ● iscsiuio.service
       Docs: man:iscsiuio(8)
     Listen: @ISCSID_UIP_ABSTRACT_NAMESPACE (Stream)

○ libvirtd-admin.socket - libvirt legacy monolithic daemon admin socket
     Loaded: loaded (/usr/lib/systemd/system/libvirtd-admin.socket; disabled; preset: disabled)
     Active: inactive (dead)
   Triggers: ● libvirtd.service
     Listen: /run/libvirt/libvirt-admin-sock (Stream)

○ libvirtd-ro.socket - libvirt legacy monolithic daemon read-only socket
     Loaded: loaded (/usr/lib/systemd/system/libvirtd-ro.socket; disabled; preset: disabled)
     Active: inactive (dead)
   Triggers: ● libvirtd.service
     Listen: /run/libvirt/libvirt-sock-ro (Stream)

○ libvirtd-tls.socket
     Loaded: masked (Reason: Unit libvirtd-tls.socket is masked.)
     Active: inactive (dead)

○ libvirtd.socket - libvirt legacy monolithic daemon socket
     Loaded: loaded (/usr/lib/systemd/system/libvirtd.socket; disabled; preset: disabled)
     Active: inactive (dead)
   Triggers: ● libvirtd.service
     Listen: /run/libvirt/libvirt-sock (Stream)

● lvm2-lvmpolld.socket - LVM2 poll daemon socket
     Loaded: loaded (/usr/lib/systemd/system/lvm2-lvmpolld.socket; enabled; preset: enabled)
     Active: active (listening) since Sat 2026-01-31 06:57:50 UTC; 2h 23min ago
      Until: Sat 2026-01-31 06:57:50 UTC; 2h 23min ago
   Triggers: ● lvm2-lvmpolld.service
       Docs: man:lvmpolld(8)
     Listen: /run/lvm/lvmpolld.socket (Stream)
     CGroup: /system.slice/lvm2-lvmpolld.socket

Jan 31 06:57:50 compute-0 systemd[1]: Listening on LVM2 poll daemon socket.

● multipathd.socket - multipathd control socket
     Loaded: loaded (/usr/lib/systemd/system/multipathd.socket; enabled; preset: disabled)
     Active: active (running) since Sat 2026-01-31 07:23:45 UTC; 1h 57min ago
      Until: Sat 2026-01-31 07:23:45 UTC; 1h 57min ago
   Triggers: ● multipathd.service
     Listen: @/org/kernel/linux/storage/multipathd (Stream)
             /run/multipathd.socket (Stream)
     CGroup: /system.slice/multipathd.socket

Jan 31 07:23:45 compute-0 systemd[1]: Listening on multipathd control socket.

● rpcbind.socket - RPCbind Server Activation Socket
     Loaded: loaded (/usr/lib/systemd/system/rpcbind.socket; enabled; preset: enabled)
     Active: active (running) since Sat 2026-01-31 06:09:23 UTC; 3h 11min ago
      Until: Sat 2026-01-31 06:09:23 UTC; 3h 11min ago
   Triggers: ● rpcbind.service
     Listen: /run/rpcbind.sock (Stream)
             0.0.0.0:111 (Stream)
             0.0.0.0:111 (Datagram)
             [::]:111 (Stream)
             [::]:111 (Datagram)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48560)
     Memory: 16.0K (peak: 288.0K)
        CPU: 4ms
     CGroup: /system.slice/rpcbind.socket

● sssd-kcm.socket - SSSD Kerberos Cache Manager responder socket
     Loaded: loaded (/usr/lib/systemd/system/sssd-kcm.socket; enabled; preset: enabled)
     Active: active (listening) since Sat 2026-01-31 06:09:25 UTC; 3h 11min ago
      Until: Sat 2026-01-31 06:09:25 UTC; 3h 11min ago
   Triggers: ● sssd-kcm.service
       Docs: man:sssd-kcm(8)
     Listen: /run/.heim_org.h5l.kcm-socket (Stream)
     CGroup: /system.slice/sssd-kcm.socket

Jan 31 06:09:25 localhost systemd[1]: Listening on SSSD Kerberos Cache Manager responder socket.

○ syslog.socket - Syslog Socket
     Loaded: loaded (/usr/lib/systemd/system/syslog.socket; static)
     Active: inactive (dead)
   Triggers: ● syslog.service
       Docs: man:systemd.special(7)
             https://www.freedesktop.org/wiki/Software/systemd/syslog
     Listen: /run/systemd/journal/syslog (Datagram)

● systemd-coredump.socket - Process Core Dump Socket
     Loaded: loaded (/usr/lib/systemd/system/systemd-coredump.socket; static)
     Active: active (listening) since Sat 2026-01-31 06:09:23 UTC; 3h 11min ago
      Until: Sat 2026-01-31 06:09:23 UTC; 3h 11min ago
       Docs: man:systemd-coredump(8)
     Listen: /run/systemd/coredump (SequentialPacket)
   Accepted: 0; Connected: 0;
     CGroup: /system.slice/systemd-coredump.socket

● systemd-initctl.socket - initctl Compatibility Named Pipe
     Loaded: loaded (/usr/lib/systemd/system/systemd-initctl.socket; static)
     Active: active (listening) since Sat 2026-01-31 06:09:23 UTC; 3h 11min ago
      Until: Sat 2026-01-31 06:09:23 UTC; 3h 11min ago
   Triggers: ● systemd-initctl.service
       Docs: man:systemd-initctl.socket(8)
     Listen: /run/initctl (FIFO)
     CGroup: /system.slice/systemd-initctl.socket

● systemd-journald-dev-log.socket - Journal Socket (/dev/log)
     Loaded: loaded (/usr/lib/systemd/system/systemd-journald-dev-log.socket; static)
     Active: active (running) since Sat 2026-01-31 06:09:18 UTC; 3h 11min ago
      Until: Sat 2026-01-31 06:09:18 UTC; 3h 11min ago
   Triggers: ● systemd-journald.service
       Docs: man:systemd-journald.service(8)
             man:journald.conf(5)
     Listen: /run/systemd/journal/dev-log (Datagram)
     CGroup: /system.slice/systemd-journald-dev-log.socket

Notice: journal has been rotated since unit was started, output may be incomplete.

● systemd-journald.socket - Journal Socket
     Loaded: loaded (/usr/lib/systemd/system/systemd-journald.socket; static)
     Active: active (running) since Sat 2026-01-31 06:09:18 UTC; 3h 11min ago
      Until: Sat 2026-01-31 06:09:18 UTC; 3h 11min ago
   Triggers: ● systemd-journald.service
       Docs: man:systemd-journald.service(8)
             man:journald.conf(5)
     Listen: /run/systemd/journal/socket (Datagram)
             /run/systemd/journal/stdout (Stream)
     CGroup: /system.slice/systemd-journald.socket

Notice: journal has been rotated since unit was started, output may be incomplete.

● systemd-rfkill.socket - Load/Save RF Kill Switch Status /dev/rfkill Watch
     Loaded: loaded (/usr/lib/systemd/system/systemd-rfkill.socket; static)
     Active: active (listening) since Sat 2026-01-31 06:09:27 UTC; 3h 11min ago
      Until: Sat 2026-01-31 06:09:27 UTC; 3h 11min ago
   Triggers: ● systemd-rfkill.service
       Docs: man:systemd-rfkill.socket(8)
     Listen: /dev/rfkill (Special)
     CGroup: /system.slice/systemd-rfkill.socket

Jan 31 06:09:27 np0005603608.novalocal systemd[1]: Listening on Load/Save RF Kill Switch Status /dev/rfkill Watch.

● systemd-udevd-control.socket - udev Control Socket
     Loaded: loaded (/usr/lib/systemd/system/systemd-udevd-control.socket; static)
     Active: active (running) since Sat 2026-01-31 06:09:23 UTC; 3h 11min ago
      Until: Sat 2026-01-31 06:09:23 UTC; 3h 11min ago
   Triggers: ● systemd-udevd.service
       Docs: man:systemd-udevd-control.socket(8)
             man:udev(7)
     Listen: /run/udev/control (SequentialPacket)
     CGroup: /system.slice/systemd-udevd-control.socket

● systemd-udevd-kernel.socket - udev Kernel Socket
     Loaded: loaded (/usr/lib/systemd/system/systemd-udevd-kernel.socket; static)
     Active: active (running) since Sat 2026-01-31 06:09:23 UTC; 3h 11min ago
      Until: Sat 2026-01-31 06:09:23 UTC; 3h 11min ago
   Triggers: ● systemd-udevd.service
       Docs: man:systemd-udevd-kernel.socket(8)
             man:udev(7)
     Listen: kobject-uevent 1 (Netlink)
     CGroup: /system.slice/systemd-udevd-kernel.socket

○ virtinterfaced-admin.socket - libvirt interface daemon admin socket
     Loaded: loaded (/usr/lib/systemd/system/virtinterfaced-admin.socket; enabled; preset: enabled)
     Active: inactive (dead)
   Triggers: ● virtinterfaced.service
     Listen: /run/libvirt/virtinterfaced-admin-sock (Stream)

○ virtinterfaced-ro.socket - libvirt interface daemon read-only socket
     Loaded: loaded (/usr/lib/systemd/system/virtinterfaced-ro.socket; enabled; preset: enabled)
     Active: inactive (dead)
   Triggers: ● virtinterfaced.service
     Listen: /run/libvirt/virtinterfaced-sock-ro (Stream)

○ virtinterfaced.socket - libvirt interface daemon socket
     Loaded: loaded (/usr/lib/systemd/system/virtinterfaced.socket; enabled; preset: enabled)
     Active: inactive (dead)
   Triggers: ● virtinterfaced.service
     Listen: /run/libvirt/virtinterfaced-sock (Stream)

○ virtlockd-admin.socket - libvirt locking daemon admin socket
     Loaded: loaded (/usr/lib/systemd/system/virtlockd-admin.socket; enabled; preset: enabled)
     Active: inactive (dead)
   Triggers: ● virtlockd.service
     Listen: /run/libvirt/virtlockd-admin-sock (Stream)

● virtlockd.socket - libvirt locking daemon socket
     Loaded: loaded (/usr/lib/systemd/system/virtlockd.socket; enabled; preset: enabled)
     Active: active (listening) since Sat 2026-01-31 07:22:11 UTC; 1h 59min ago
      Until: Sat 2026-01-31 07:22:11 UTC; 1h 59min ago
   Triggers: ● virtlockd.service
     Listen: /run/libvirt/virtlockd-sock (Stream)
     CGroup: /system.slice/virtlockd.socket

Jan 31 07:22:11 compute-0 systemd[1]: Listening on libvirt locking daemon socket.

● virtlogd-admin.socket - libvirt logging daemon admin socket
     Loaded: loaded (/usr/lib/systemd/system/virtlogd-admin.socket; enabled; preset: enabled)
    Drop-In: /etc/systemd/system/virtlogd-admin.socket.d
             └─override.conf
     Active: active (running) since Sat 2026-01-31 07:22:08 UTC; 1h 59min ago
      Until: Sat 2026-01-31 07:22:08 UTC; 1h 59min ago
   Triggers: ● virtlogd.service
     Listen: /run/libvirt/virtlogd-admin-sock (Stream)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48560)
     Memory: 4.0K (peak: 512.0K)
        CPU: 4ms
     CGroup: /system.slice/virtlogd-admin.socket

Jan 31 07:22:08 compute-0 systemd[1]: Starting libvirt logging daemon admin socket...
Jan 31 07:22:08 compute-0 systemd[1]: Listening on libvirt logging daemon admin socket.

● virtlogd.socket - libvirt logging daemon socket
     Loaded: loaded (/usr/lib/systemd/system/virtlogd.socket; enabled; preset: enabled)
    Drop-In: /etc/systemd/system/virtlogd.socket.d
             └─override.conf
     Active: active (running) since Sat 2026-01-31 07:22:08 UTC; 1h 59min ago
      Until: Sat 2026-01-31 07:22:08 UTC; 1h 59min ago
   Triggers: ● virtlogd.service
     Listen: /run/libvirt/virtlogd-sock (Stream)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48560)
     Memory: 4.0K (peak: 512.0K)
        CPU: 4ms
     CGroup: /system.slice/virtlogd.socket

Jan 31 07:22:08 compute-0 systemd[1]: Starting libvirt logging daemon socket...
Jan 31 07:22:08 compute-0 systemd[1]: Listening on libvirt logging daemon socket.

○ virtnetworkd-admin.socket - libvirt network daemon admin socket
     Loaded: loaded (/usr/lib/systemd/system/virtnetworkd-admin.socket; enabled; preset: enabled)
     Active: inactive (dead)
   Triggers: ● virtnetworkd.service
     Listen: /run/libvirt/virtnetworkd-admin-sock (Stream)

○ virtnetworkd-ro.socket - libvirt network daemon read-only socket
     Loaded: loaded (/usr/lib/systemd/system/virtnetworkd-ro.socket; enabled; preset: enabled)
     Active: inactive (dead)
   Triggers: ● virtnetworkd.service
     Listen: /run/libvirt/virtnetworkd-sock-ro (Stream)

○ virtnetworkd.socket - libvirt network daemon socket
     Loaded: loaded (/usr/lib/systemd/system/virtnetworkd.socket; enabled; preset: enabled)
     Active: inactive (dead)
   Triggers: ● virtnetworkd.service
     Listen: /run/libvirt/virtnetworkd-sock (Stream)

● virtnodedevd-admin.socket - libvirt nodedev daemon admin socket
     Loaded: loaded (/usr/lib/systemd/system/virtnodedevd-admin.socket; enabled; preset: enabled)
    Drop-In: /etc/systemd/system/virtnodedevd-admin.socket.d
             └─override.conf
     Active: active (running) since Sat 2026-01-31 07:22:09 UTC; 1h 59min ago
      Until: Sat 2026-01-31 07:22:09 UTC; 1h 59min ago
   Triggers: ● virtnodedevd.service
     Listen: /run/libvirt/virtnodedevd-admin-sock (Stream)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48560)
     Memory: 0B (peak: 512.0K)
        CPU: 3ms
     CGroup: /system.slice/virtnodedevd-admin.socket

Jan 31 07:22:09 compute-0 systemd[1]: Starting libvirt nodedev daemon admin socket...
Jan 31 07:22:09 compute-0 systemd[1]: Listening on libvirt nodedev daemon admin socket.

● virtnodedevd-ro.socket - libvirt nodedev daemon read-only socket
     Loaded: loaded (/usr/lib/systemd/system/virtnodedevd-ro.socket; enabled; preset: enabled)
    Drop-In: /etc/systemd/system/virtnodedevd-ro.socket.d
             └─override.conf
     Active: active (running) since Sat 2026-01-31 07:22:09 UTC; 1h 59min ago
      Until: Sat 2026-01-31 07:22:09 UTC; 1h 59min ago
   Triggers: ● virtnodedevd.service
     Listen: /run/libvirt/virtnodedevd-sock-ro (Stream)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48560)
     Memory: 0B (peak: 512.0K)
        CPU: 4ms
     CGroup: /system.slice/virtnodedevd-ro.socket

Jan 31 07:22:09 compute-0 systemd[1]: Starting libvirt nodedev daemon read-only socket...
Jan 31 07:22:09 compute-0 systemd[1]: Listening on libvirt nodedev daemon read-only socket.

● virtnodedevd.socket - libvirt nodedev daemon socket
     Loaded: loaded (/usr/lib/systemd/system/virtnodedevd.socket; enabled; preset: enabled)
    Drop-In: /etc/systemd/system/virtnodedevd.socket.d
             └─override.conf
     Active: active (running) since Sat 2026-01-31 07:22:09 UTC; 1h 59min ago
      Until: Sat 2026-01-31 07:22:09 UTC; 1h 59min ago
   Triggers: ● virtnodedevd.service
     Listen: /run/libvirt/virtnodedevd-sock (Stream)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48560)
     Memory: 0B (peak: 512.0K)
        CPU: 3ms
     CGroup: /system.slice/virtnodedevd.socket

Jan 31 07:22:09 compute-0 systemd[1]: Starting libvirt nodedev daemon socket...
Jan 31 07:22:09 compute-0 systemd[1]: Listening on libvirt nodedev daemon socket.

○ virtnwfilterd-admin.socket - libvirt nwfilter daemon admin socket
     Loaded: loaded (/usr/lib/systemd/system/virtnwfilterd-admin.socket; enabled; preset: enabled)
     Active: inactive (dead)
   Triggers: ● virtnwfilterd.service
     Listen: /run/libvirt/virtnwfilterd-admin-sock (Stream)

○ virtnwfilterd-ro.socket - libvirt nwfilter daemon read-only socket
     Loaded: loaded (/usr/lib/systemd/system/virtnwfilterd-ro.socket; enabled; preset: enabled)
     Active: inactive (dead)
   Triggers: ● virtnwfilterd.service
     Listen: /run/libvirt/virtnwfilterd-sock-ro (Stream)

○ virtnwfilterd.socket - libvirt nwfilter daemon socket
     Loaded: loaded (/usr/lib/systemd/system/virtnwfilterd.socket; enabled; preset: enabled)
     Active: inactive (dead)
   Triggers: ● virtnwfilterd.service
     Listen: /run/libvirt/virtnwfilterd-sock (Stream)

● virtproxyd-admin.socket - libvirt proxy daemon admin socket
     Loaded: loaded (/usr/lib/systemd/system/virtproxyd-admin.socket; enabled; preset: enabled)
    Drop-In: /etc/systemd/system/virtproxyd-admin.socket.d
             └─override.conf
     Active: active (listening) since Sat 2026-01-31 07:22:10 UTC; 1h 59min ago
      Until: Sat 2026-01-31 07:22:10 UTC; 1h 59min ago
   Triggers: ● virtproxyd.service
     Listen: /run/libvirt/libvirt-admin-sock (Stream)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48560)
     Memory: 0B (peak: 568.0K)
        CPU: 2ms
     CGroup: /system.slice/virtproxyd-admin.socket

Jan 31 07:22:10 compute-0 systemd[1]: Starting libvirt proxy daemon admin socket...
Jan 31 07:22:10 compute-0 systemd[1]: Listening on libvirt proxy daemon admin socket.

● virtproxyd-ro.socket - libvirt proxy daemon read-only socket
     Loaded: loaded (/usr/lib/systemd/system/virtproxyd-ro.socket; enabled; preset: enabled)
    Drop-In: /etc/systemd/system/virtproxyd-ro.socket.d
             └─override.conf
     Active: active (listening) since Sat 2026-01-31 07:22:10 UTC; 1h 59min ago
      Until: Sat 2026-01-31 07:22:10 UTC; 1h 59min ago
   Triggers: ● virtproxyd.service
     Listen: /run/libvirt/libvirt-sock-ro (Stream)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48560)
     Memory: 0B (peak: 512.0K)
        CPU: 1ms
     CGroup: /system.slice/virtproxyd-ro.socket

Jan 31 07:22:10 compute-0 systemd[1]: Starting libvirt proxy daemon read-only socket...
Jan 31 07:22:10 compute-0 systemd[1]: Listening on libvirt proxy daemon read-only socket.

● virtproxyd-tls.socket - libvirt proxy daemon TLS IP socket
     Loaded: loaded (/usr/lib/systemd/system/virtproxyd-tls.socket; enabled; preset: disabled)
     Active: active (listening) since Sat 2026-01-31 07:20:58 UTC; 2h 0min ago
      Until: Sat 2026-01-31 07:20:58 UTC; 2h 0min ago
   Triggers: ● virtproxyd.service
     Listen: [::]:16514 (Stream)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48560)
     Memory: 8.0K (peak: 256.0K)
        CPU: 1ms
     CGroup: /system.slice/virtproxyd-tls.socket

Jan 31 07:20:58 compute-0 systemd[1]: Listening on libvirt proxy daemon TLS IP socket.

● virtproxyd.socket - libvirt proxy daemon socket
     Loaded: loaded (/usr/lib/systemd/system/virtproxyd.socket; enabled; preset: enabled)
    Drop-In: /etc/systemd/system/virtproxyd.socket.d
             └─override.conf
     Active: active (listening) since Sat 2026-01-31 07:20:58 UTC; 2h 0min ago
      Until: Sat 2026-01-31 07:20:58 UTC; 2h 0min ago
   Triggers: ● virtproxyd.service
     Listen: /run/libvirt/libvirt-sock (Stream)
     CGroup: /system.slice/virtproxyd.socket

Jan 31 07:20:58 compute-0 systemd[1]: Listening on libvirt proxy daemon socket.

● virtqemud-admin.socket - libvirt QEMU daemon admin socket
     Loaded: loaded (/usr/lib/systemd/system/virtqemud-admin.socket; enabled; preset: enabled)
    Drop-In: /etc/systemd/system/virtqemud-admin.socket.d
             └─override.conf
     Active: active (running) since Sat 2026-01-31 07:22:11 UTC; 1h 59min ago
      Until: Sat 2026-01-31 07:22:11 UTC; 1h 59min ago
   Triggers: ● virtqemud.service
     Listen: /run/libvirt/virtqemud-admin-sock (Stream)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48560)
     Memory: 0B (peak: 512.0K)
        CPU: 5ms
     CGroup: /system.slice/virtqemud-admin.socket

Jan 31 07:22:11 compute-0 systemd[1]: Starting libvirt QEMU daemon admin socket...
Jan 31 07:22:11 compute-0 systemd[1]: Listening on libvirt QEMU daemon admin socket.

● virtqemud-ro.socket - libvirt QEMU daemon read-only socket
     Loaded: loaded (/usr/lib/systemd/system/virtqemud-ro.socket; enabled; preset: enabled)
    Drop-In: /etc/systemd/system/virtqemud-ro.socket.d
             └─override.conf
     Active: active (running) since Sat 2026-01-31 07:22:11 UTC; 1h 59min ago
      Until: Sat 2026-01-31 07:22:11 UTC; 1h 59min ago
   Triggers: ● virtqemud.service
     Listen: /run/libvirt/virtqemud-sock-ro (Stream)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48560)
     Memory: 0B (peak: 504.0K)
        CPU: 4ms
     CGroup: /system.slice/virtqemud-ro.socket

Jan 31 07:22:11 compute-0 systemd[1]: Starting libvirt QEMU daemon read-only socket...
Jan 31 07:22:11 compute-0 systemd[1]: Listening on libvirt QEMU daemon read-only socket.

● virtqemud.socket - libvirt QEMU daemon socket
     Loaded: loaded (/usr/lib/systemd/system/virtqemud.socket; enabled; preset: enabled)
    Drop-In: /etc/systemd/system/virtqemud.socket.d
             └─override.conf
     Active: active (running) since Sat 2026-01-31 07:22:11 UTC; 1h 59min ago
      Until: Sat 2026-01-31 07:22:11 UTC; 1h 59min ago
   Triggers: ● virtqemud.service
     Listen: /run/libvirt/virtqemud-sock (Stream)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48560)
     Memory: 0B (peak: 512.0K)
        CPU: 5ms
     CGroup: /system.slice/virtqemud.socket

Jan 31 07:22:11 compute-0 systemd[1]: Starting libvirt QEMU daemon socket...
Jan 31 07:22:11 compute-0 systemd[1]: Listening on libvirt QEMU daemon socket.

● virtsecretd-admin.socket - libvirt secret daemon admin socket
     Loaded: loaded (/usr/lib/systemd/system/virtsecretd-admin.socket; enabled; preset: enabled)
    Drop-In: /etc/systemd/system/virtsecretd-admin.socket.d
             └─override.conf
     Active: active (running) since Sat 2026-01-31 07:22:12 UTC; 1h 59min ago
      Until: Sat 2026-01-31 07:22:12 UTC; 1h 59min ago
   Triggers: ● virtsecretd.service
     Listen: /run/libvirt/virtsecretd-admin-sock (Stream)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48560)
     Memory: 4.0K (peak: 512.0K)
        CPU: 3ms
     CGroup: /system.slice/virtsecretd-admin.socket

Jan 31 07:22:12 compute-0 systemd[1]: Starting libvirt secret daemon admin socket...
Jan 31 07:22:12 compute-0 systemd[1]: Listening on libvirt secret daemon admin socket.

● virtsecretd-ro.socket - libvirt secret daemon read-only socket
     Loaded: loaded (/usr/lib/systemd/system/virtsecretd-ro.socket; enabled; preset: enabled)
    Drop-In: /etc/systemd/system/virtsecretd-ro.socket.d
             └─override.conf
     Active: active (running) since Sat 2026-01-31 07:22:12 UTC; 1h 59min ago
      Until: Sat 2026-01-31 07:22:12 UTC; 1h 59min ago
   Triggers: ● virtsecretd.service
     Listen: /run/libvirt/virtsecretd-sock-ro (Stream)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48560)
     Memory: 0B (peak: 512.0K)
        CPU: 3ms
     CGroup: /system.slice/virtsecretd-ro.socket

Jan 31 07:22:12 compute-0 systemd[1]: Starting libvirt secret daemon read-only socket...
Jan 31 07:22:12 compute-0 systemd[1]: Listening on libvirt secret daemon read-only socket.

● virtsecretd.socket - libvirt secret daemon socket
     Loaded: loaded (/usr/lib/systemd/system/virtsecretd.socket; enabled; preset: enabled)
    Drop-In: /etc/systemd/system/virtsecretd.socket.d
             └─override.conf
     Active: active (running) since Sat 2026-01-31 07:22:12 UTC; 1h 59min ago
      Until: Sat 2026-01-31 07:22:12 UTC; 1h 59min ago
   Triggers: ● virtsecretd.service
     Listen: /run/libvirt/virtsecretd-sock (Stream)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48560)
     Memory: 0B (peak: 512.0K)
        CPU: 4ms
     CGroup: /system.slice/virtsecretd.socket

Jan 31 07:22:12 compute-0 systemd[1]: Starting libvirt secret daemon socket...
Jan 31 07:22:12 compute-0 systemd[1]: Listening on libvirt secret daemon socket.

○ virtstoraged-admin.socket - libvirt storage daemon admin socket
     Loaded: loaded (/usr/lib/systemd/system/virtstoraged-admin.socket; enabled; preset: enabled)
     Active: inactive (dead)
   Triggers: ● virtstoraged.service
     Listen: /run/libvirt/virtstoraged-admin-sock (Stream)

○ virtstoraged-ro.socket - libvirt storage daemon read-only socket
     Loaded: loaded (/usr/lib/systemd/system/virtstoraged-ro.socket; enabled; preset: enabled)
     Active: inactive (dead)
   Triggers: ● virtstoraged.service
     Listen: /run/libvirt/virtstoraged-sock-ro (Stream)

○ virtstoraged.socket - libvirt storage daemon socket
     Loaded: loaded (/usr/lib/systemd/system/virtstoraged.socket; enabled; preset: enabled)
     Active: inactive (dead)
   Triggers: ● virtstoraged.service
     Listen: /run/libvirt/virtstoraged-sock (Stream)

● swap.swap - /swap
     Loaded: loaded (/etc/fstab; generated)
     Active: active since Sat 2026-01-31 06:59:43 UTC; 2h 21min ago
      Until: Sat 2026-01-31 06:59:43 UTC; 2h 21min ago
       What: /swap
       Docs: man:fstab(5)
             man:systemd-fstab-generator(8)

● basic.target - Basic System
     Loaded: loaded (/usr/lib/systemd/system/basic.target; static)
     Active: active since Sat 2026-01-31 06:09:25 UTC; 3h 11min ago
      Until: Sat 2026-01-31 06:09:25 UTC; 3h 11min ago
       Docs: man:systemd.special(7)

Jan 31 06:09:25 localhost systemd[1]: Reached target Basic System.

○ blockdev@dev-disk-by\x2duuid-822f14ea\x2d6e7e\x2d41df\x2db0d8\x2dfbe282d9ded8.target - Block Device Preparation for /dev/disk/by-uuid/822f14ea-6e7e-41df-b0d8-fbe282d9ded8
     Loaded: loaded (/usr/lib/systemd/system/blockdev@.target; static)
     Active: inactive (dead)
       Docs: man:systemd.special(7)

○ blockdev@dev-vda1.target - Block Device Preparation for /dev/vda1
     Loaded: loaded (/usr/lib/systemd/system/blockdev@.target; static)
     Active: inactive (dead)
       Docs: man:systemd.special(7)

● ceph-f70fcd2a-dcb4-5f89-a4ba-79a09959083b.target - Ceph cluster f70fcd2a-dcb4-5f89-a4ba-79a09959083b
     Loaded: loaded (/etc/systemd/system/ceph-f70fcd2a-dcb4-5f89-a4ba-79a09959083b.target; enabled; preset: disabled)
     Active: active since Sat 2026-01-31 07:04:30 UTC; 2h 16min ago
      Until: Sat 2026-01-31 07:04:30 UTC; 2h 16min ago

Jan 31 07:04:30 compute-0 systemd[1]: Reached target Ceph cluster f70fcd2a-dcb4-5f89-a4ba-79a09959083b.

● ceph.target - All Ceph clusters and services
     Loaded: loaded (/etc/systemd/system/ceph.target; enabled; preset: disabled)
     Active: active since Sat 2026-01-31 07:04:30 UTC; 2h 16min ago
      Until: Sat 2026-01-31 07:04:30 UTC; 2h 16min ago

Jan 31 07:04:30 compute-0 systemd[1]: Reached target All Ceph clusters and services.

● cloud-config.target - Cloud-config availability
     Loaded: loaded (/usr/lib/systemd/system/cloud-config.target; static)
     Active: active since Sat 2026-01-31 06:09:34 UTC; 3h 11min ago
      Until: Sat 2026-01-31 06:09:34 UTC; 3h 11min ago

Jan 31 06:09:34 np0005603608.novalocal systemd[1]: Reached target Cloud-config availability.

● cloud-init.target - Cloud-init target
     Loaded: loaded (/usr/lib/systemd/system/cloud-init.target; enabled-runtime; preset: disabled)
     Active: active since Sat 2026-01-31 06:09:35 UTC; 3h 11min ago
      Until: Sat 2026-01-31 06:09:35 UTC; 3h 11min ago

Jan 31 06:09:35 np0005603608.novalocal systemd[1]: Reached target Cloud-init target.

○ cryptsetup-pre.target - Local Encrypted Volumes (Pre)
     Loaded: loaded (/usr/lib/systemd/system/cryptsetup-pre.target; static)
     Active: inactive (dead)
       Docs: man:systemd.special(7)

● cryptsetup.target - Local Encrypted Volumes
     Loaded: loaded (/usr/lib/systemd/system/cryptsetup.target; static)
     Active: active since Sat 2026-01-31 06:09:23 UTC; 3h 11min ago
      Until: Sat 2026-01-31 06:09:23 UTC; 3h 11min ago
       Docs: man:systemd.special(7)

● edpm_libvirt.target
     Loaded: loaded (/etc/systemd/system/edpm_libvirt.target; static)
     Active: active since Sat 2026-01-31 07:22:48 UTC; 1h 58min ago
      Until: Sat 2026-01-31 07:22:48 UTC; 1h 58min ago

Jan 31 07:22:48 compute-0 systemd[1]: Reached target edpm_libvirt.target.

○ emergency.target - Emergency Mode
     Loaded: loaded (/usr/lib/systemd/system/emergency.target; static)
     Active: inactive (dead)
       Docs: man:systemd.special(7)

○ first-boot-complete.target - First Boot Complete
     Loaded: loaded (/usr/lib/systemd/system/first-boot-complete.target; static)
     Active: inactive (dead)
  Condition: start condition failed at Sat 2026-01-31 06:09:23 UTC; 3h 11min ago
       Docs: man:systemd.special(7)

Jan 31 06:09:23 localhost systemd[1]: First Boot Complete was skipped because of an unmet condition check (ConditionFirstBoot=yes).

○ getty-pre.target - Preparation for Logins
     Loaded: loaded (/usr/lib/systemd/system/getty-pre.target; static)
     Active: inactive (dead)
       Docs: man:systemd.special(7)
             man:systemd-getty-generator(8)
             http://0pointer.de/blog/projects/serial-console.html

● getty.target - Login Prompts
     Loaded: loaded (/usr/lib/systemd/system/getty.target; static)
     Active: active since Sat 2026-01-31 06:09:34 UTC; 3h 11min ago
      Until: Sat 2026-01-31 06:09:34 UTC; 3h 11min ago
       Docs: man:systemd.special(7)
             man:systemd-getty-generator(8)
             http://0pointer.de/blog/projects/serial-console.html

Jan 31 06:09:34 np0005603608.novalocal systemd[1]: Reached target Login Prompts.

○ graphical.target - Graphical Interface
     Loaded: loaded (/usr/lib/systemd/system/graphical.target; static)
     Active: inactive (dead)
       Docs: man:systemd.special(7)

○ initrd-fs.target - Initrd File Systems
     Loaded: loaded (/usr/lib/systemd/system/initrd-fs.target; static)
     Active: inactive (dead) since Sat 2026-01-31 06:09:23 UTC; 3h 11min ago
       Docs: man:systemd.special(7)

Jan 31 06:09:21 localhost systemd[1]: Reached target Initrd File Systems.

○ initrd-root-device.target - Initrd Root Device
     Loaded: loaded (/usr/lib/systemd/system/initrd-root-device.target; static)
     Active: inactive (dead) since Sat 2026-01-31 06:09:21 UTC; 3h 11min ago
       Docs: man:systemd.special(7)

Jan 31 06:09:19 localhost systemd[1]: Reached target Initrd Root Device.
Jan 31 06:09:21 localhost systemd[1]: Stopped target Initrd Root Device.

○ initrd-root-fs.target - Initrd Root File System
     Loaded: loaded (/usr/lib/systemd/system/initrd-root-fs.target; static)
     Active: inactive (dead) since Sat 2026-01-31 06:09:23 UTC; 3h 11min ago
       Docs: man:systemd.special(7)

Jan 31 06:09:21 localhost systemd[1]: Reached target Initrd Root File System.

○ initrd-switch-root.target - Switch Root
     Loaded: loaded (/usr/lib/systemd/system/initrd-switch-root.target; static)
     Active: inactive (dead) since Sat 2026-01-31 06:09:23 UTC; 3h 11min ago

Jan 31 06:09:21 localhost systemd[1]: Reached target Switch Root.

○ initrd-usr-fs.target - Initrd /usr File System
     Loaded: loaded (/usr/lib/systemd/system/initrd-usr-fs.target; static)
     Active: inactive (dead) since Sat 2026-01-31 06:09:21 UTC; 3h 11min ago
       Docs: man:systemd.special(7)

Jan 31 06:09:21 localhost systemd[1]: Stopped target Initrd /usr File System.
Notice: journal has been rotated since unit was started, output may be incomplete.

○ initrd.target - Initrd Default Target
     Loaded: loaded (/usr/lib/systemd/system/initrd.target; static)
     Active: inactive (dead) since Sat 2026-01-31 06:09:21 UTC; 3h 11min ago
       Docs: man:systemd.special(7)

Jan 31 06:09:21 localhost systemd[1]: Reached target Initrd Default Target.
Jan 31 06:09:21 localhost systemd[1]: Stopped target Initrd Default Target.

● integritysetup.target - Local Integrity Protected Volumes
     Loaded: loaded (/usr/lib/systemd/system/integritysetup.target; static)
     Active: active since Sat 2026-01-31 06:09:23 UTC; 3h 11min ago
      Until: Sat 2026-01-31 06:09:23 UTC; 3h 11min ago
       Docs: man:systemd.special(7)

● local-fs-pre.target - Preparation for Local File Systems
     Loaded: loaded (/usr/lib/systemd/system/local-fs-pre.target; static)
     Active: active since Sat 2026-01-31 06:09:23 UTC; 3h 11min ago
      Until: Sat 2026-01-31 06:09:23 UTC; 3h 11min ago
       Docs: man:systemd.special(7)

Jan 31 06:09:23 localhost systemd[1]: Reached target Preparation for Local File Systems.

● local-fs.target - Local File Systems
     Loaded: loaded (/usr/lib/systemd/system/local-fs.target; static)
     Active: active since Sat 2026-01-31 06:09:23 UTC; 3h 11min ago
      Until: Sat 2026-01-31 06:09:23 UTC; 3h 11min ago
       Docs: man:systemd.special(7)

Jan 31 06:09:23 localhost systemd[1]: Reached target Local File Systems.

● multi-user.target - Multi-User System
     Loaded: loaded (/usr/lib/systemd/system/multi-user.target; indirect; preset: disabled)
     Active: active since Sat 2026-01-31 06:09:34 UTC; 3h 11min ago
      Until: Sat 2026-01-31 06:09:34 UTC; 3h 11min ago
       Docs: man:systemd.special(7)

Jan 31 06:09:34 np0005603608.novalocal systemd[1]: Reached target Multi-User System.

● network-online.target - Network is Online
     Loaded: loaded (/usr/lib/systemd/system/network-online.target; static)
     Active: active since Sat 2026-01-31 06:09:34 UTC; 3h 11min ago
      Until: Sat 2026-01-31 06:09:34 UTC; 3h 11min ago
       Docs: man:systemd.special(7)
             https://systemd.io/NETWORK_ONLINE

Jan 31 06:09:34 np0005603608.novalocal systemd[1]: Reached target Network is Online.

● network-pre.target - Preparation for Network
     Loaded: loaded (/usr/lib/systemd/system/network-pre.target; static)
     Active: active since Sat 2026-01-31 06:09:27 UTC; 3h 11min ago
      Until: Sat 2026-01-31 06:09:27 UTC; 3h 11min ago
       Docs: man:systemd.special(7)
             https://systemd.io/NETWORK_ONLINE

Jan 31 06:09:27 np0005603608.novalocal systemd[1]: Reached target Preparation for Network.

● network.target - Network
     Loaded: loaded (/usr/lib/systemd/system/network.target; static)
     Active: active since Sat 2026-01-31 06:09:27 UTC; 3h 11min ago
      Until: Sat 2026-01-31 06:09:27 UTC; 3h 11min ago
       Docs: man:systemd.special(7)
             https://systemd.io/NETWORK_ONLINE

Jan 31 06:09:27 np0005603608.novalocal systemd[1]: Reached target Network.

● nfs-client.target - NFS client services
     Loaded: loaded (/usr/lib/systemd/system/nfs-client.target; enabled; preset: disabled)
     Active: active since Sat 2026-01-31 06:09:27 UTC; 3h 11min ago
      Until: Sat 2026-01-31 06:09:27 UTC; 3h 11min ago

Jan 31 06:09:27 np0005603608.novalocal systemd[1]: Reached target NFS client services.

○ nss-lookup.target - Host and Network Name Lookups
     Loaded: loaded (/usr/lib/systemd/system/nss-lookup.target; static)
     Active: inactive (dead)
       Docs: man:systemd.special(7)

● nss-user-lookup.target - User and Group Name Lookups
     Loaded: loaded (/usr/lib/systemd/system/nss-user-lookup.target; static)
     Active: active since Sat 2026-01-31 06:09:25 UTC; 3h 11min ago
      Until: Sat 2026-01-31 06:09:25 UTC; 3h 11min ago
       Docs: man:systemd.special(7)

Jan 31 06:09:25 localhost systemd[1]: Reached target User and Group Name Lookups.

● paths.target - Path Units
     Loaded: loaded (/usr/lib/systemd/system/paths.target; static)
     Active: active since Sat 2026-01-31 06:09:23 UTC; 3h 11min ago
      Until: Sat 2026-01-31 06:09:23 UTC; 3h 11min ago
       Docs: man:systemd.special(7)

○ remote-cryptsetup.target - Remote Encrypted Volumes
     Loaded: loaded (/usr/lib/systemd/system/remote-cryptsetup.target; disabled; preset: enabled)
     Active: inactive (dead) since Sat 2026-01-31 06:09:21 UTC; 3h 11min ago
       Docs: man:systemd.special(7)

Jan 31 06:09:20 localhost systemd[1]: Reached target Remote Encrypted Volumes.
Jan 31 06:09:21 localhost systemd[1]: Stopped target Remote Encrypted Volumes.

● remote-fs-pre.target - Preparation for Remote File Systems
     Loaded: loaded (/usr/lib/systemd/system/remote-fs-pre.target; static)
     Active: active since Sat 2026-01-31 06:09:27 UTC; 3h 11min ago
      Until: Sat 2026-01-31 06:09:27 UTC; 3h 11min ago
       Docs: man:systemd.special(7)

Jan 31 06:09:27 np0005603608.novalocal systemd[1]: Reached target Preparation for Remote File Systems.

● remote-fs.target - Remote File Systems
     Loaded: loaded (/usr/lib/systemd/system/remote-fs.target; enabled; preset: enabled)
     Active: active since Sat 2026-01-31 06:09:27 UTC; 3h 11min ago
      Until: Sat 2026-01-31 06:09:27 UTC; 3h 11min ago
       Docs: man:systemd.special(7)

Jan 31 06:09:27 np0005603608.novalocal systemd[1]: Reached target Remote File Systems.

○ remote-veritysetup.target - Remote Verity Protected Volumes
     Loaded: loaded (/usr/lib/systemd/system/remote-veritysetup.target; disabled; preset: disabled)
     Active: inactive (dead)
       Docs: man:systemd.special(7)

○ rescue.target - Rescue Mode
     Loaded: loaded (/usr/lib/systemd/system/rescue.target; static)
     Active: inactive (dead)
       Docs: man:systemd.special(7)

● rpc_pipefs.target
     Loaded: loaded (/usr/lib/systemd/system/rpc_pipefs.target; static)
     Active: active since Sat 2026-01-31 06:09:23 UTC; 3h 11min ago
      Until: Sat 2026-01-31 06:09:23 UTC; 3h 11min ago

● rpcbind.target - RPC Port Mapper
     Loaded: loaded (/usr/lib/systemd/system/rpcbind.target; static)
     Active: active since Sat 2026-01-31 06:09:23 UTC; 3h 11min ago
      Unit syslog.target could not be found.
Until: Sat 2026-01-31 06:09:23 UTC; 3h 11min ago
       Docs: man:systemd.special(7)

○ shutdown.target - System Shutdown
     Loaded: loaded (/usr/lib/systemd/system/shutdown.target; static)
     Active: inactive (dead)
       Docs: man:systemd.special(7)

● slices.target - Slice Units
     Loaded: loaded (/usr/lib/systemd/system/slices.target; static)
     Active: active since Sat 2026-01-31 06:09:23 UTC; 3h 11min ago
      Until: Sat 2026-01-31 06:09:23 UTC; 3h 11min ago
       Docs: man:systemd.special(7)

● sockets.target - Socket Units
     Loaded: loaded (/usr/lib/systemd/system/sockets.target; static)
     Active: active since Sat 2026-01-31 06:09:25 UTC; 3h 11min ago
      Until: Sat 2026-01-31 06:09:25 UTC; 3h 11min ago
       Docs: man:systemd.special(7)

Jan 31 06:09:25 localhost systemd[1]: Reached target Socket Units.

● sshd-keygen.target
     Loaded: loaded (/usr/lib/systemd/system/sshd-keygen.target; static)
     Active: active since Sat 2026-01-31 07:20:41 UTC; 2h 0min ago
      Until: Sat 2026-01-31 07:20:41 UTC; 2h 0min ago

Jan 31 07:20:41 compute-0 systemd[1]: Reached target sshd-keygen.target.

● swap.target - Swaps
     Loaded: loaded (/usr/lib/systemd/system/swap.target; static)
     Active: active since Sat 2026-01-31 06:09:23 UTC; 3h 11min ago
      Until: Sat 2026-01-31 06:09:23 UTC; 3h 11min ago
       Docs: man:systemd.special(7)

● sysinit.target - System Initialization
     Loaded: loaded (/usr/lib/systemd/system/sysinit.target; static)
     Active: active since Sat 2026-01-31 06:09:25 UTC; 3h 11min ago
      Until: Sat 2026-01-31 06:09:25 UTC; 3h 11min ago
       Docs: man:systemd.special(7)

Jan 31 06:09:25 localhost systemd[1]: Reached target System Initialization.

● time-set.target - System Time Set
     Loaded: loaded (/usr/lib/systemd/system/time-set.target; static)
     Active: active since Sat 2026-01-31 07:04:31 UTC; 2h 16min ago
      Until: Sat 2026-01-31 07:04:31 UTC; 2h 16min ago
       Docs: man:systemd.special(7)

Jan 31 07:04:31 compute-0 systemd[1]: Reached target System Time Set.

● time-sync.target - System Time Synchronized
     Loaded: loaded (/usr/lib/systemd/system/time-sync.target; static)
     Active: active since Sat 2026-01-31 07:04:31 UTC; 2h 16min ago
      Until: Sat 2026-01-31 07:04:31 UTC; 2h 16min ago
       Docs: man:systemd.special(7)

Jan 31 07:04:31 compute-0 systemd[1]: Reached target System Time Synchronized.

● timers.target - Timer Units
     Loaded: loaded (/usr/lib/systemd/system/timers.target; static)
     Active: active since Sat 2026-01-31 06:09:25 UTC; 3h 11min ago
      Until: Sat 2026-01-31 06:09:25 UTC; 3h 11min ago
       Docs: man:systemd.special(7)

Jan 31 06:09:25 localhost systemd[1]: Reached target Timer Units.

○ umount.target - Unmount All Filesystems
     Loaded: loaded (/usr/lib/systemd/system/umount.target; static)
     Active: inactive (dead)
       Docs: man:systemd.special(7)

○ veritysetup-pre.target - Local Verity Protected Volumes (Pre)
     Loaded: loaded (/usr/lib/systemd/system/veritysetup-pre.target; static)
     Active: inactive (dead)
       Docs: man:systemd.special(7)

● veritysetup.target - Local Verity Protected Volumes
     Loaded: loaded (/usr/lib/systemd/system/veritysetup.target; static)
     Active: active since Sat 2026-01-31 06:09:23 UTC; 3h 11min ago
      Until: Sat 2026-01-31 06:09:23 UTC; 3h 11min ago
       Docs: man:systemd.special(7)

○ virt-guest-shutdown.target - Libvirt guests shutdown
     Loaded: loaded (/etc/systemd/system/virt-guest-shutdown.target; static)
     Active: inactive (dead)
       Docs: https://libvirt.org

● 1e25cafb71c13c532a40aebd7c2342bf479b9dc985b78d8189396343297af238-518afe6e7402b380.timer - /usr/bin/podman healthcheck run 1e25cafb71c13c532a40aebd7c2342bf479b9dc985b78d8189396343297af238
     Loaded: loaded (/run/systemd/transient/1e25cafb71c13c532a40aebd7c2342bf479b9dc985b78d8189396343297af238-518afe6e7402b380.timer; transient)
  Transient: yes
     Active: active (waiting) since Sat 2026-01-31 07:16:32 UTC; 2h 4min ago
      Until: Sat 2026-01-31 07:16:32 UTC; 2h 4min ago
    Trigger: Sat 2026-01-31 09:21:37 UTC; 23s left
   Triggers: ● 1e25cafb71c13c532a40aebd7c2342bf479b9dc985b78d8189396343297af238-518afe6e7402b380.service

Jan 31 07:16:32 compute-0 systemd[1]: Started /usr/bin/podman healthcheck run 1e25cafb71c13c532a40aebd7c2342bf479b9dc985b78d8189396343297af238.

● 4f21208ce36b9cf814979a8ea23334150fc70d101a3b284bd29d2af80b4ea3b1-61765080bc59b28e.timer - /usr/bin/podman healthcheck run 4f21208ce36b9cf814979a8ea23334150fc70d101a3b284bd29d2af80b4ea3b1
     Loaded: loaded (/run/systemd/transient/4f21208ce36b9cf814979a8ea23334150fc70d101a3b284bd29d2af80b4ea3b1-61765080bc59b28e.timer; transient)
  Transient: yes
     Active: active (waiting) since Sat 2026-01-31 07:18:08 UTC; 2h 3min ago
      Until: Sat 2026-01-31 07:18:08 UTC; 2h 3min ago
    Trigger: Sat 2026-01-31 09:21:24 UTC; 10s left
   Triggers: ● 4f21208ce36b9cf814979a8ea23334150fc70d101a3b284bd29d2af80b4ea3b1-61765080bc59b28e.service

Jan 31 07:18:08 compute-0 systemd[1]: Started /usr/bin/podman healthcheck run 4f21208ce36b9cf814979a8ea23334150fc70d101a3b284bd29d2af80b4ea3b1.

● dnf-makecache.timer - dnf makecache --timer
     Loaded: loaded (/usr/lib/systemd/system/dnf-makecache.timer; enabled; preset: enabled)
     Active: active (waiting) since Sat 2026-01-31 06:09:25 UTC; 3h 11min ago
      Until: Sat 2026-01-31 06:09:25 UTC; 3h 11min ago
    Trigger: Sat 2026-01-31 09:23:48 UTC; 2min 33s left
   Triggers: ● dnf-makecache.service

Jan 31 06:09:25 localhost systemd[1]: Started dnf makecache --timer.

● logrotate.timer - Daily rotation of log files
     Loaded: loaded (/usr/lib/systemd/system/logrotate.timer; enabled; preset: enabled)
     Active: active (waiting) since Sat 2026-01-31 06:09:25 UTC; 3h 11min ago
      Until: Sat 2026-01-31 06:09:25 UTC; 3h 11min ago
    Trigger: Sun 2026-02-01 00:00:00 UTC; 14h left
   Triggers: ● logrotate.service
       Docs: man:logrotate(8)
             man:logrotate.conf(5)

Jan 31 06:09:25 localhost systemd[1]: Started Daily rotation of log files.

○ raid-check.timer - Weekly RAID setup health check
     Loaded: loaded (/usr/lib/systemd/system/raid-check.timer; enabled; preset: enabled)
     Active: inactive (dead)
    Trigger: n/a
   Triggers: ● raid-check.service

○ sysstat-collect.timer - Run system activity accounting tool every 10 minutes
     Loaded: loaded (/usr/lib/systemd/system/sysstat-collect.timer; enabled; preset: disabled)
     Active: inactive (dead)
    Trigger: n/a
   Triggers: ● sysstat-collect.service

○ sysstat-summary.timer - Generate summary of yesterday's process accounting
     Loaded: loaded (/usr/lib/systemd/system/sysstat-summary.timer; enabled; preset: disabled)
     Active: inactive (dead)
    Trigger: n/a
   Triggers: ● sysstat-summary.service

● systemd-tmpfiles-clean.timer - Daily Cleanup of Temporary Directories
     Loaded: loaded (/usr/lib/systemd/system/systemd-tmpfiles-clean.timer; static)
     Active: active (waiting) since Sat 2026-01-31 06:09:25 UTC; 3h 11min ago
      Until: Sat 2026-01-31 06:09:25 UTC; 3h 11min ago
    Trigger: Sun 2026-02-01 06:24:48 UTC; 21h left
   Triggers: ● systemd-tmpfiles-clean.service
       Docs: man:tmpfiles.d(5)
             man:systemd-tmpfiles(8)

Jan 31 06:09:25 localhost systemd[1]: Started Daily Cleanup of Temporary Directories.

● unbound-anchor.timer - daily update of the root trust anchor for DNSSEC
     Loaded: loaded (/usr/lib/systemd/system/unbound-anchor.timer; enabled; preset: enabled)
     Active: active (waiting) since Sat 2026-01-31 07:00:16 UTC; 2h 20min ago
      Until: Sat 2026-01-31 07:00:16 UTC; 2h 20min ago
    Trigger: Sun 2026-02-01 00:00:00 UTC; 14h left
   Triggers: ● unbound-anchor.service
       Docs: man:unbound-anchor(8)

Jan 31 07:00:16 compute-0 systemd[1]: Started daily update of the root trust anchor for DNSSEC.
