● compute-0
    State: running
    Units: 475 loaded (incl. loaded aliases)
     Jobs: 0 queued
   Failed: 0 units
    Since: Mon 2026-02-02 10:52:38 UTC; 1h 32min ago
  systemd: 252-64.el9
   CGroup: /
           ├─293712 turbostat --debug sleep 10
           ├─293719 sleep 10
           ├─init.scope
           │ └─1 /usr/lib/systemd/systemd --switched-root --system --deserialize 31
           ├─machine.slice
           │ ├─libpod-dd42911bf41e885c3ba4077012f09a191888946e3867784418685a91ce34a059.scope
           │ │ └─container
           │ │   ├─145058 dumb-init --single-child -- kolla_start
           │ │   └─145061 /usr/bin/ovn-controller --pidfile unix:/run/openvswitch/db.sock -p /etc/pki/tls/private/ovndb.key -c /etc/pki/tls/certs/ovndb.crt -C /etc/pki/tls/certs/ovndbca.crt
           │ ├─libpod-e8469079de5f6cf853327e20582fc7412a39dd166f8e8fc6edb7e70c21cf9b07.scope
           │ │ └─container
           │ │   ├─238885 dumb-init --single-child -- kolla_start
           │ │   ├─238887 /usr/bin/python3 /usr/bin/nova-compute
           │ │   ├─245133 /usr/bin/python3 /bin/privsep-helper --config-file /etc/nova/nova.conf --config-file /etc/nova/nova-compute.conf --config-dir /etc/nova/nova.conf.d --privsep_context vif_plug_ovs.privsep.vif_plug --privsep_sock_path /tmp/tmp8j6j0ldg/privsep.sock
           │ │   └─249642 /usr/bin/python3 /bin/privsep-helper --config-file /etc/nova/nova.conf --config-file /etc/nova/nova-compute.conf --config-dir /etc/nova/nova.conf.d --privsep_context os_brick.privileged.default --privsep_sock_path /tmp/tmpney6i09_/privsep.sock
           │ └─libpod-f7a8a7d56deab4622312f47586dba2f4884d78e46b23bfb226b684327aab18c9.scope
           │   └─container
           │     ├─155008 dumb-init --single-child -- kolla_start
           │     ├─155011 "neutron-ovn-metadata-agent (/usr/bin/python3 /usr/bin/neutron-ovn-metadata-agent)"
           │     ├─155570 "neutron-ovn-metadata-agent (/usr/bin/python3 /usr/bin/neutron-ovn-metadata-agent)"
           │     ├─155575 /usr/bin/python3 /bin/privsep-helper --config-file /etc/neutron/neutron.conf --config-dir /etc/neutron.conf.d --privsep_context neutron.privileged.namespace_cmd --privsep_sock_path /tmp/tmpsq_kuak1/privsep.sock
           │     ├─245329 /usr/bin/python3 /bin/privsep-helper --config-file /etc/neutron/neutron.conf --config-dir /etc/neutron.conf.d --privsep_context neutron.privileged.default --privsep_sock_path /tmp/tmpk6kanhyo/privsep.sock
           │     └─245414 /usr/bin/python3 /bin/privsep-helper --config-file /etc/neutron/neutron.conf --config-dir /etc/neutron.conf.d --privsep_context neutron.privileged.link_cmd --privsep_sock_path /tmp/tmpuet8pyad/privsep.sock
           ├─system.slice
           │ ├─NetworkManager.service
           │ │ └─49093 /usr/sbin/NetworkManager --no-daemon
           │ ├─auditd.service
           │ │ ├─704 /sbin/auditd
           │ │ └─706 /usr/sbin/sedispatch
           │ ├─chronyd.service
           │ │ └─58660 /usr/sbin/chronyd -F 2
           │ ├─crond.service
           │ │ ├─1013 /usr/sbin/crond -n
           │ │ └─7490 /usr/sbin/anacron -s
           │ ├─dbus-broker.service
           │ │ ├─768 /usr/bin/dbus-broker-launch --scope system --audit
           │ │ └─775 dbus-broker --log 4 --controller 9 --machine-id bf0bc0bb03de29b24cba1cc9599cf5d0 --max-bytes 536870912 --max-fds 4096 --max-matches 131072 --audit
           │ ├─edpm_nova_compute.service
           │ │ └─238883 /usr/bin/conmon --api-version 1 -c e8469079de5f6cf853327e20582fc7412a39dd166f8e8fc6edb7e70c21cf9b07 -u e8469079de5f6cf853327e20582fc7412a39dd166f8e8fc6edb7e70c21cf9b07 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/e8469079de5f6cf853327e20582fc7412a39dd166f8e8fc6edb7e70c21cf9b07/userdata -p /run/containers/storage/overlay-containers/e8469079de5f6cf853327e20582fc7412a39dd166f8e8fc6edb7e70c21cf9b07/userdata/pidfile -n nova_compute --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/e8469079de5f6cf853327e20582fc7412a39dd166f8e8fc6edb7e70c21cf9b07 --full-attach -s -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/e8469079de5f6cf853327e20582fc7412a39dd166f8e8fc6edb7e70c21cf9b07/userdata/oci-log --conmon-pidfile /run/nova_compute.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg e8469079de5f6cf853327e20582fc7412a39dd166f8e8fc6edb7e70c21cf9b07
           │ ├─edpm_ovn_controller.service
           │ │ └─145056 /usr/bin/conmon --api-version 1 -c dd42911bf41e885c3ba4077012f09a191888946e3867784418685a91ce34a059 -u dd42911bf41e885c3ba4077012f09a191888946e3867784418685a91ce34a059 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/dd42911bf41e885c3ba4077012f09a191888946e3867784418685a91ce34a059/userdata -p /run/containers/storage/overlay-containers/dd42911bf41e885c3ba4077012f09a191888946e3867784418685a91ce34a059/userdata/pidfile -n ovn_controller --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/dd42911bf41e885c3ba4077012f09a191888946e3867784418685a91ce34a059 --full-attach -s -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/dd42911bf41e885c3ba4077012f09a191888946e3867784418685a91ce34a059/userdata/oci-log --conmon-pidfile /run/ovn_controller.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg dd42911bf41e885c3ba4077012f09a191888946e3867784418685a91ce34a059
           │ ├─edpm_ovn_metadata_agent.service
           │ │ └─155006 /usr/bin/conmon --api-version 1 -c f7a8a7d56deab4622312f47586dba2f4884d78e46b23bfb226b684327aab18c9 -u f7a8a7d56deab4622312f47586dba2f4884d78e46b23bfb226b684327aab18c9 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/f7a8a7d56deab4622312f47586dba2f4884d78e46b23bfb226b684327aab18c9/userdata -p /run/containers/storage/overlay-containers/f7a8a7d56deab4622312f47586dba2f4884d78e46b23bfb226b684327aab18c9/userdata/pidfile -n ovn_metadata_agent --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/f7a8a7d56deab4622312f47586dba2f4884d78e46b23bfb226b684327aab18c9 --full-attach -s -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/f7a8a7d56deab4622312f47586dba2f4884d78e46b23bfb226b684327aab18c9/userdata/oci-log --conmon-pidfile /run/ovn_metadata_agent.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg f7a8a7d56deab4622312f47586dba2f4884d78e46b23bfb226b684327aab18c9
           │ ├─gssproxy.service
           │ │ └─876 /usr/sbin/gssproxy -D
           │ ├─irqbalance.service
           │ │ └─782 /usr/sbin/irqbalance
           │ ├─iscsid.service
           │ │ └─223533 /usr/sbin/iscsid -f
           │ ├─multipathd.service
           │ │ └─223815 /sbin/multipathd -d -s
           │ ├─ovs-vswitchd.service
           │ │ └─47398 ovs-vswitchd unix:/var/run/openvswitch/db.sock -vconsole:emer -vsyslog:err -vfile:info --mlockall --user openvswitch:hugetlbfs --no-chdir --log-file=/var/log/openvswitch/ovs-vswitchd.log --pidfile=/var/run/openvswitch/ovs-vswitchd.pid --detach
           │ ├─ovsdb-server.service
           │ │ └─47317 ovsdb-server /etc/openvswitch/conf.db -vconsole:emer -vsyslog:err -vfile:info --remote=punix:/var/run/openvswitch/db.sock --private-key=db:Open_vSwitch,SSL,private_key --certificate=db:Open_vSwitch,SSL,certificate --bootstrap-ca-cert=db:Open_vSwitch,SSL,ca_cert --user openvswitch:hugetlbfs --no-chdir --log-file=/var/log/openvswitch/ovsdb-server.log --pidfile=/var/run/openvswitch/ovsdb-server.pid --detach
           │ ├─polkit.service
           │ │ └─43582 /usr/lib/polkit-1/polkitd --no-debug
           │ ├─rpcbind.service
           │ │ └─702 /usr/bin/rpcbind -w -f
           │ ├─rsyslog.service
           │ │ └─1009 /usr/sbin/rsyslogd -n
           │ ├─sshd.service
           │ │ └─181254 "sshd: /usr/sbin/sshd -D [listener] 0 of 10-100 startups"
           │ ├─system-ceph\x2d4548a36b\x2d7cdc\x2d5e3e\x2da814\x2d4e1571be1fae.slice
           │ │ ├─ceph-4548a36b-7cdc-5e3e-a814-4e1571be1fae@crash.compute-0.service
           │ │ │ ├─libpod-payload-61b0483497dcb2f7f58b3253d407131b9b241fdd95d0c46b378db53811c0e7e0
           │ │ │ │ ├─80410 /run/podman-init -- /usr/bin/ceph-crash -n client.crash.compute-0
           │ │ │ │ └─80412 /usr/bin/python3 -s /usr/bin/ceph-crash -n client.crash.compute-0
           │ │ │ └─runtime
           │ │ │   └─80408 /usr/bin/conmon --api-version 1 -c 61b0483497dcb2f7f58b3253d407131b9b241fdd95d0c46b378db53811c0e7e0 -u 61b0483497dcb2f7f58b3253d407131b9b241fdd95d0c46b378db53811c0e7e0 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/61b0483497dcb2f7f58b3253d407131b9b241fdd95d0c46b378db53811c0e7e0/userdata -p /run/containers/storage/overlay-containers/61b0483497dcb2f7f58b3253d407131b9b241fdd95d0c46b378db53811c0e7e0/userdata/pidfile -n ceph-4548a36b-7cdc-5e3e-a814-4e1571be1fae-crash-compute-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/61b0483497dcb2f7f58b3253d407131b9b241fdd95d0c46b378db53811c0e7e0 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/61b0483497dcb2f7f58b3253d407131b9b241fdd95d0c46b378db53811c0e7e0/userdata/oci-log --conmon-pidfile /run/ceph-4548a36b-7cdc-5e3e-a814-4e1571be1fae@crash.compute-0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 61b0483497dcb2f7f58b3253d407131b9b241fdd95d0c46b378db53811c0e7e0
           │ │ ├─ceph-4548a36b-7cdc-5e3e-a814-4e1571be1fae@mds.cephfs.compute-0.mldrue.service
           │ │ │ ├─libpod-payload-2fd942fa7e2a914f99986cfceb092b754d16f7807e8ec91f52763c7e36375409
           │ │ │ │ ├─95496 /run/podman-init -- /usr/bin/ceph-mds -n mds.cephfs.compute-0.mldrue -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
           │ │ │ │ └─95505 /usr/bin/ceph-mds -n mds.cephfs.compute-0.mldrue -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
           │ │ │ └─runtime
           │ │ │   └─95494 /usr/bin/conmon --api-version 1 -c 2fd942fa7e2a914f99986cfceb092b754d16f7807e8ec91f52763c7e36375409 -u 2fd942fa7e2a914f99986cfceb092b754d16f7807e8ec91f52763c7e36375409 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/2fd942fa7e2a914f99986cfceb092b754d16f7807e8ec91f52763c7e36375409/userdata -p /run/containers/storage/overlay-containers/2fd942fa7e2a914f99986cfceb092b754d16f7807e8ec91f52763c7e36375409/userdata/pidfile -n ceph-4548a36b-7cdc-5e3e-a814-4e1571be1fae-mds-cephfs-compute-0-mldrue --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/2fd942fa7e2a914f99986cfceb092b754d16f7807e8ec91f52763c7e36375409 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/2fd942fa7e2a914f99986cfceb092b754d16f7807e8ec91f52763c7e36375409/userdata/oci-log --conmon-pidfile /run/ceph-4548a36b-7cdc-5e3e-a814-4e1571be1fae@mds.cephfs.compute-0.mldrue.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 2fd942fa7e2a914f99986cfceb092b754d16f7807e8ec91f52763c7e36375409
           │ │ ├─ceph-4548a36b-7cdc-5e3e-a814-4e1571be1fae@mgr.compute-0.twcemg.service
           │ │ │ ├─libpod-payload-e108912e9f7d348f2198e623df71406e8b579a2b0383329619c90634f2e480e3
           │ │ │ │ ├─75556 /run/podman-init -- /usr/bin/ceph-mgr -n mgr.compute-0.twcemg -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
           │ │ │ │ └─75558 /usr/bin/ceph-mgr -n mgr.compute-0.twcemg -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
           │ │ │ └─runtime
           │ │ │   └─75554 /usr/bin/conmon --api-version 1 -c e108912e9f7d348f2198e623df71406e8b579a2b0383329619c90634f2e480e3 -u e108912e9f7d348f2198e623df71406e8b579a2b0383329619c90634f2e480e3 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/e108912e9f7d348f2198e623df71406e8b579a2b0383329619c90634f2e480e3/userdata -p /run/containers/storage/overlay-containers/e108912e9f7d348f2198e623df71406e8b579a2b0383329619c90634f2e480e3/userdata/pidfile -n ceph-4548a36b-7cdc-5e3e-a814-4e1571be1fae-mgr-compute-0-twcemg --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/e108912e9f7d348f2198e623df71406e8b579a2b0383329619c90634f2e480e3 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/e108912e9f7d348f2198e623df71406e8b579a2b0383329619c90634f2e480e3/userdata/oci-log --conmon-pidfile /run/ceph-4548a36b-7cdc-5e3e-a814-4e1571be1fae@mgr.compute-0.twcemg.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg e108912e9f7d348f2198e623df71406e8b579a2b0383329619c90634f2e480e3
           │ │ ├─ceph-4548a36b-7cdc-5e3e-a814-4e1571be1fae@mon.compute-0.service
           │ │ │ ├─libpod-payload-fffb528e321276c0c3873a515991dd68a346504106615c6708abcd60682ada04
           │ │ │ │ ├─75269 /run/podman-init -- /usr/bin/ceph-mon -n mon.compute-0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false
           │ │ │ │ └─75271 /usr/bin/ceph-mon -n mon.compute-0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false
           │ │ │ └─runtime
           │ │ │   └─75267 /usr/bin/conmon --api-version 1 -c fffb528e321276c0c3873a515991dd68a346504106615c6708abcd60682ada04 -u fffb528e321276c0c3873a515991dd68a346504106615c6708abcd60682ada04 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/fffb528e321276c0c3873a515991dd68a346504106615c6708abcd60682ada04/userdata -p /run/containers/storage/overlay-containers/fffb528e321276c0c3873a515991dd68a346504106615c6708abcd60682ada04/userdata/pidfile -n ceph-4548a36b-7cdc-5e3e-a814-4e1571be1fae-mon-compute-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/fffb528e321276c0c3873a515991dd68a346504106615c6708abcd60682ada04 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/fffb528e321276c0c3873a515991dd68a346504106615c6708abcd60682ada04/userdata/oci-log --conmon-pidfile /run/ceph-4548a36b-7cdc-5e3e-a814-4e1571be1fae@mon.compute-0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg fffb528e321276c0c3873a515991dd68a346504106615c6708abcd60682ada04
           │ │ ├─ceph-4548a36b-7cdc-5e3e-a814-4e1571be1fae@osd.0.service
           │ │ │ ├─libpod-payload-409c17664cc02736f3933d20ee5231e4ffe9782f6c64da4adfb864281bfbf962
           │ │ │ │ ├─86142 /run/podman-init -- /usr/bin/ceph-osd -n osd.0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --osd-objectstore=bluestore
           │ │ │ │ └─86144 /usr/bin/ceph-osd -n osd.0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --osd-objectstore=bluestore
           │ │ │ └─runtime
           │ │ │   └─86140 /usr/bin/conmon --api-version 1 -c 409c17664cc02736f3933d20ee5231e4ffe9782f6c64da4adfb864281bfbf962 -u 409c17664cc02736f3933d20ee5231e4ffe9782f6c64da4adfb864281bfbf962 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/409c17664cc02736f3933d20ee5231e4ffe9782f6c64da4adfb864281bfbf962/userdata -p /run/containers/storage/overlay-containers/409c17664cc02736f3933d20ee5231e4ffe9782f6c64da4adfb864281bfbf962/userdata/pidfile -n ceph-4548a36b-7cdc-5e3e-a814-4e1571be1fae-osd-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/409c17664cc02736f3933d20ee5231e4ffe9782f6c64da4adfb864281bfbf962 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/409c17664cc02736f3933d20ee5231e4ffe9782f6c64da4adfb864281bfbf962/userdata/oci-log --conmon-pidfile /run/ceph-4548a36b-7cdc-5e3e-a814-4e1571be1fae@osd.0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 409c17664cc02736f3933d20ee5231e4ffe9782f6c64da4adfb864281bfbf962
           │ │ ├─ceph-4548a36b-7cdc-5e3e-a814-4e1571be1fae@osd.1.service
           │ │ │ ├─libpod-payload-8937a933e50696556fe35cb340d4ec6a1d14744b5d0b97eff32c66fdd41a97e0
           │ │ │ │ ├─87190 /run/podman-init -- /usr/bin/ceph-osd -n osd.1 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --osd-objectstore=bluestore
           │ │ │ │ └─87192 /usr/bin/ceph-osd -n osd.1 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --osd-objectstore=bluestore
           │ │ │ └─runtime
           │ │ │   └─87188 /usr/bin/conmon --api-version 1 -c 8937a933e50696556fe35cb340d4ec6a1d14744b5d0b97eff32c66fdd41a97e0 -u 8937a933e50696556fe35cb340d4ec6a1d14744b5d0b97eff32c66fdd41a97e0 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/8937a933e50696556fe35cb340d4ec6a1d14744b5d0b97eff32c66fdd41a97e0/userdata -p /run/containers/storage/overlay-containers/8937a933e50696556fe35cb340d4ec6a1d14744b5d0b97eff32c66fdd41a97e0/userdata/pidfile -n ceph-4548a36b-7cdc-5e3e-a814-4e1571be1fae-osd-1 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/8937a933e50696556fe35cb340d4ec6a1d14744b5d0b97eff32c66fdd41a97e0 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/8937a933e50696556fe35cb340d4ec6a1d14744b5d0b97eff32c66fdd41a97e0/userdata/oci-log --conmon-pidfile /run/ceph-4548a36b-7cdc-5e3e-a814-4e1571be1fae@osd.1.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 8937a933e50696556fe35cb340d4ec6a1d14744b5d0b97eff32c66fdd41a97e0
           │ │ ├─ceph-4548a36b-7cdc-5e3e-a814-4e1571be1fae@osd.2.service
           │ │ │ ├─libpod-payload-599aa4410c1f164fe63289f8e57a829ab4dd98bf68fe6ed58d4ebf68bc2ecffd
           │ │ │ │ ├─88234 /run/podman-init -- /usr/bin/ceph-osd -n osd.2 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --osd-objectstore=bluestore
           │ │ │ │ └─88236 /usr/bin/ceph-osd -n osd.2 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --osd-objectstore=bluestore
           │ │ │ └─runtime
           │ │ │   └─88232 /usr/bin/conmon --api-version 1 -c 599aa4410c1f164fe63289f8e57a829ab4dd98bf68fe6ed58d4ebf68bc2ecffd -u 599aa4410c1f164fe63289f8e57a829ab4dd98bf68fe6ed58d4ebf68bc2ecffd -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/599aa4410c1f164fe63289f8e57a829ab4dd98bf68fe6ed58d4ebf68bc2ecffd/userdata -p /run/containers/storage/overlay-containers/599aa4410c1f164fe63289f8e57a829ab4dd98bf68fe6ed58d4ebf68bc2ecffd/userdata/pidfile -n ceph-4548a36b-7cdc-5e3e-a814-4e1571be1fae-osd-2 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/599aa4410c1f164fe63289f8e57a829ab4dd98bf68fe6ed58d4ebf68bc2ecffd --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/599aa4410c1f164fe63289f8e57a829ab4dd98bf68fe6ed58d4ebf68bc2ecffd/userdata/oci-log --conmon-pidfile /run/ceph-4548a36b-7cdc-5e3e-a814-4e1571be1fae@osd.2.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 599aa4410c1f164fe63289f8e57a829ab4dd98bf68fe6ed58d4ebf68bc2ecffd
           │ │ └─ceph-4548a36b-7cdc-5e3e-a814-4e1571be1fae@rgw.rgw.compute-0.ctqttb.service
           │ │   ├─libpod-payload-e99f14eeb73d2875924ac314fa66315a78e51bc47ad69dfc1b512fee8f93bf90
           │ │   │ ├─94941 /run/podman-init -- /usr/bin/radosgw -n client.rgw.rgw.compute-0.ctqttb -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --stop-timeout=120
           │ │   │ └─94979 /usr/bin/radosgw -n client.rgw.rgw.compute-0.ctqttb -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --stop-timeout=120
           │ │   └─runtime
           │ │     └─94939 /usr/bin/conmon --api-version 1 -c e99f14eeb73d2875924ac314fa66315a78e51bc47ad69dfc1b512fee8f93bf90 -u e99f14eeb73d2875924ac314fa66315a78e51bc47ad69dfc1b512fee8f93bf90 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/e99f14eeb73d2875924ac314fa66315a78e51bc47ad69dfc1b512fee8f93bf90/userdata -p /run/containers/storage/overlay-containers/e99f14eeb73d2875924ac314fa66315a78e51bc47ad69dfc1b512fee8f93bf90/userdata/pidfile -n ceph-4548a36b-7cdc-5e3e-a814-4e1571be1fae-rgw-rgw-compute-0-ctqttb --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/e99f14eeb73d2875924ac314fa66315a78e51bc47ad69dfc1b512fee8f93bf90 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/e99f14eeb73d2875924ac314fa66315a78e51bc47ad69dfc1b512fee8f93bf90/userdata/oci-log --conmon-pidfile /run/ceph-4548a36b-7cdc-5e3e-a814-4e1571be1fae@rgw.rgw.compute-0.ctqttb.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg e99f14eeb73d2875924ac314fa66315a78e51bc47ad69dfc1b512fee8f93bf90
           │ ├─system-getty.slice
           │ │ └─getty@tty1.service
           │ │   └─1014 /sbin/agetty -o "-p -- \\u" --noclear - linux
           │ ├─system-serial\x2dgetty.slice
           │ │ └─serial-getty@ttyS0.service
           │ │   └─1015 /sbin/agetty -o "-p -- \\u" --keep-baud 115200,57600,38400,9600 - vt220
           │ ├─systemd-hostnamed.service
           │ │ └─291011 /usr/lib/systemd/systemd-hostnamed
           │ ├─systemd-journald.service
           │ │ └─680 /usr/lib/systemd/systemd-journald
           │ ├─systemd-logind.service
           │ │ └─786 /usr/lib/systemd/systemd-logind
           │ ├─systemd-machined.service
           │ │ └─206973 /usr/lib/systemd/systemd-machined
           │ ├─systemd-udevd.service
           │ │ └─udev
           │ │   └─732 /usr/lib/systemd/systemd-udevd
           │ ├─tuned.service
           │ │ └─105965 /usr/bin/python3 -Es /usr/sbin/tuned -l -P
           │ ├─virtlogd.service
           │ │ └─206344 /usr/sbin/virtlogd
           │ ├─virtnodedevd.service
           │ │ └─239207 /usr/sbin/virtnodedevd --timeout 120
           │ ├─virtqemud.service
           │ │ └─238654 /usr/sbin/virtqemud --timeout 120
           │ └─virtsecretd.service
           │   └─245219 /usr/sbin/virtsecretd --timeout 120
           └─user.slice
             ├─user-1000.slice
             │ ├─session-1.scope
             │ │ └─4519 /usr/bin/python3
             │ ├─session-54.scope
             │ │ ├─287512 "sshd-session: zuul [priv]"
             │ │ ├─287564 "sshd-session: zuul@notty"
             │ │ ├─287565 sudo bash -c "rm -rf /var/tmp/sos-osp && mkdir /var/tmp/sos-osp && sos report --Unit boot.automount could not be found.
batch --all-logs --tmp-dir=/var/tmp/sos-osp  -p container,openstack_edpm,system,storage,virt"
             │ │ ├─287589 /usr/bin/python3 -s /sbin/sos report --batch --all-logs --tmp-dir=/var/tmp/sos-osp -p container,openstack_edpm,system,storage,virt
             │ │ ├─293711 timeout 15s turbostat --debug sleep 10
             │ │ ├─294029 timeout 300s ceph osd blocked-by --format json-pretty
             │ │ ├─294030 /usr/bin/python3 -s /usr/bin/ceph osd blocked-by --format json-pretty
             │ │ ├─294054 timeout 300s semanage node -l
             │ │ ├─294055 /usr/bin/python3 -EsI /usr/sbin/semanage node -l
             │ │ ├─294058 timeout 300s systemctl status --all
             │ │ └─294059 systemctl status --all
             │ └─user@1000.service
             │   ├─app.slice
             │   │ └─dbus-broker.service
             │   │   ├─13969 /usr/bin/dbus-broker-launch --scope user
             │   │   └─13970 dbus-broker --log 4 --controller 9 --machine-id bf0bc0bb03de29b24cba1cc9599cf5d0 --max-bytes 100000000000000 --max-fds 25000000000000 --max-matches 5000000000
             │   ├─init.scope
             │   │ ├─4309 /usr/lib/systemd/systemd --user
             │   │ └─4311 "(sd-pam)"
             │   └─user.slice
             │     └─podman-pause-2465a80e.scope
             │       └─13952 catatonit -P
             └─user-42477.slice
               ├─session-20.scope
               │ ├─76636 "sshd-session: ceph-admin [priv]"
               │ └─76657 "sshd-session: ceph-admin"
               ├─session-22.scope
               │ ├─76656 "sshd-session: ceph-admin [priv]"
               │ └─76660 "sshd-session: ceph-admin@notty"
               ├─session-23.scope
               │ ├─76686 "sshd-session: ceph-admin [priv]"
               │ └─76689 "sshd-session: ceph-admin@notty"
               ├─session-24.scope
               │ ├─76715 "sshd-session: ceph-admin [priv]"
               │ └─76718 "sshd-session: ceph-admin@notty"
               ├─session-25.scope
               │ ├─76744 "sshd-session: ceph-admin [priv]"
               │ └─76747 "sshd-session: ceph-admin@notty"
               ├─session-26.scope
               │ ├─76773 "sshd-session: ceph-admin [priv]"
               │ └─76776 "sshd-session: ceph-admin@notty"
               ├─session-27.scope
               │ ├─76802 "sshd-session: ceph-admin [priv]"
               │ └─76805 "sshd-session: ceph-admin@notty"
               ├─session-28.scope
               │ ├─76831 "sshd-session: ceph-admin [priv]"
               │ └─76834 "sshd-session: ceph-admin@notty"
               ├─session-29.scope
               │ ├─76860 "sshd-session: ceph-admin [priv]"
               │ └─76863 "sshd-session: ceph-admin@notty"
               ├─session-30.scope
               │ ├─76889 "sshd-session: ceph-admin [priv]"
               │ └─76892 "sshd-session: ceph-admin@notty"
               ├─session-31.scope
               │ ├─76916 "sshd-session: ceph-admin [priv]"
               │ └─76919 "sshd-session: ceph-admin@notty"
               ├─session-32.scope
               │ ├─76945 "sshd-session: ceph-admin [priv]"
               │ └─76948 "sshd-session: ceph-admin@notty"
               └─user@42477.service
                 └─init.scope
                   ├─76640 /usr/lib/systemd/systemd --user
                   └─76642 "(sd-pam)"

● proc-sys-fs-binfmt_misc.automount - Arbitrary Executable File Formats File System Automount Point
     Loaded: loaded (/usr/lib/systemd/system/proc-sys-fs-binfmt_misc.automount; static)
     Active: active (running) since Mon 2026-02-02 10:52:38 UTC; 1h 32min ago
      Until: Mon 2026-02-02 10:52:38 UTC; 1h 32min ago
   Triggers: ● proc-sys-fs-binfmt_misc.mount
      Where: /proc/sys/fs/binfmt_misc
       Docs: https://docs.kernel.org/admin-guide/binfmt-misc.html
             https://www.freedesktop.org/wiki/Software/systemd/APIFileSystems

Feb 02 11:31:24 compute-0 systemd[1]: proc-sys-fs-binfmt_misc.automount: Got automount request for /proc/sys/fs/binfmt_misc, triggered by 77716 (sysctl)

● dev-cdrom.device - QEMU_DVD-ROM config-2
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:01.1-ata1-host0-target0:0:0-0:0:0:0-block-sr0.device
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 10:52:39 UTC; 1h 32min ago
      Until: Mon 2026-02-02 10:52:39 UTC; 1h 32min ago
     Device: /sys/devices/pci0000:00/0000:00:01.1/ata1/host0/target0:0:0/0:0:0:0/block/sr0

● dev-ceph_vg0-ceph_lv0.device - /dev/ceph_vg0/ceph_lv0
    Follows: unit currently follows state of sys-devices-virtual-block-dm\x2d0.device
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 11:30:02 UTC; 54min ago
      Until: Mon 2026-02-02 11:30:02 UTC; 54min ago
     Device: /sys/devices/virtual/block/dm-0

● dev-ceph_vg1-ceph_lv1.device - /dev/ceph_vg1/ceph_lv1
    Follows: unit currently follows state of sys-devices-virtual-block-dm\x2d1.device
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 11:30:06 UTC; 54min ago
      Until: Mon 2026-02-02 11:30:06 UTC; 54min ago
     Device: /sys/devices/virtual/block/dm-1

● dev-ceph_vg2-ceph_lv2.device - /dev/ceph_vg2/ceph_lv2
    Follows: unit currently follows state of sys-devices-virtual-block-dm\x2d2.device
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 11:30:11 UTC; 54min ago
      Until: Mon 2026-02-02 11:30:11 UTC; 54min ago
     Device: /sys/devices/virtual/block/dm-2

● dev-disk-by\x2ddiskseq-1.device - /dev/disk/by-diskseq/1
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:04.0-virtio2-block-vda.device
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 10:52:39 UTC; 1h 32min ago
      Until: Mon 2026-02-02 10:52:39 UTC; 1h 32min ago
     Device: /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda

● dev-disk-by\x2ddiskseq-11.device - /dev/disk/by-diskseq/11
    Follows: unit currently follows state of sys-devices-virtual-block-loop5.device
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 11:30:10 UTC; 54min ago
      Until: Mon 2026-02-02 11:30:10 UTC; 54min ago
     Device: /sys/devices/virtual/block/loop5

● dev-disk-by\x2ddiskseq-12.device - /dev/disk/by-diskseq/12
    Follows: unit currently follows state of sys-devices-virtual-block-dm\x2d2.device
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 11:30:11 UTC; 54min ago
      Until: Mon 2026-02-02 11:30:11 UTC; 54min ago
     Device: /sys/devices/virtual/block/dm-2

● dev-disk-by\x2ddiskseq-3.device - QEMU_DVD-ROM config-2
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:01.1-ata1-host0-target0:0:0-0:0:0:0-block-sr0.device
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 10:52:39 UTC; 1h 32min ago
      Until: Mon 2026-02-02 10:52:39 UTC; 1h 32min ago
     Device: /sys/devices/pci0000:00/0000:00:01.1/ata1/host0/target0:0:0/0:0:0:0/block/sr0

● dev-disk-by\x2ddiskseq-5.device - /dev/disk/by-diskseq/5
    Follows: unit currently follows state of sys-devices-virtual-block-loop3.device
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 11:30:01 UTC; 54min ago
      Until: Mon 2026-02-02 11:30:01 UTC; 54min ago
     Device: /sys/devices/virtual/block/loop3

● dev-disk-by\x2ddiskseq-6.device - /dev/disk/by-diskseq/6
    Follows: unit currently follows state of sys-devices-virtual-block-dm\x2d0.device
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 11:30:02 UTC; 54min ago
      Until: Mon 2026-02-02 11:30:02 UTC; 54min ago
     Device: /sys/devices/virtual/block/dm-0

● dev-disk-by\x2ddiskseq-8.device - /dev/disk/by-diskseq/8
    Follows: unit currently follows state of sys-devices-virtual-block-loop4.device
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 11:30:06 UTC; 54min ago
      Until: Mon 2026-02-02 11:30:06 UTC; 54min ago
     Device: /sys/devices/virtual/block/loop4

● dev-disk-by\x2ddiskseq-9.device - /dev/disk/by-diskseq/9
    Follows: unit currently follows state of sys-devices-virtual-block-dm\x2d1.device
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 11:30:06 UTC; 54min ago
      Until: Mon 2026-02-02 11:30:06 UTC; 54min ago
     Device: /sys/devices/virtual/block/dm-1

● dev-disk-by\x2did-ata\x2dQEMU_DVD\x2dROM_QM00001.device - QEMU_DVD-ROM config-2
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:01.1-ata1-host0-target0:0:0-0:0:0:0-block-sr0.device
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 10:52:39 UTC; 1h 32min ago
      Until: Mon 2026-02-02 10:52:39 UTC; 1h 32min ago
     Device: /sys/devices/pci0000:00/0000:00:01.1/ata1/host0/target0:0:0/0:0:0:0/block/sr0

● dev-disk-by\x2did-dm\x2dname\x2dceph_vg0\x2dceph_lv0.device - /dev/disk/by-id/dm-name-ceph_vg0-ceph_lv0
    Follows: unit currently follows state of sys-devices-virtual-block-dm\x2d0.device
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 11:30:02 UTC; 54min ago
      Until: Mon 2026-02-02 11:30:02 UTC; 54min ago
     Device: /sys/devices/virtual/block/dm-0

● dev-disk-by\x2did-dm\x2dname\x2dceph_vg1\x2dceph_lv1.device - /dev/disk/by-id/dm-name-ceph_vg1-ceph_lv1
    Follows: unit currently follows state of sys-devices-virtual-block-dm\x2d1.device
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 11:30:06 UTC; 54min ago
      Until: Mon 2026-02-02 11:30:06 UTC; 54min ago
     Device: /sys/devices/virtual/block/dm-1

● dev-disk-by\x2did-dm\x2dname\x2dceph_vg2\x2dceph_lv2.device - /dev/disk/by-id/dm-name-ceph_vg2-ceph_lv2
    Follows: unit currently follows state of sys-devices-virtual-block-dm\x2d2.device
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 11:30:11 UTC; 54min ago
      Until: Mon 2026-02-02 11:30:11 UTC; 54min ago
     Device: /sys/devices/virtual/block/dm-2

● dev-disk-by\x2did-dm\x2duuid\x2dLVM\x2ddgAMjLsbpokakky5ll8XIIC6iySUZUOv3KHTourQZY5gdzxEUaaSB376szOSuddp.device - /dev/disk/by-id/dm-uuid-LVM-dgAMjLsbpokakky5ll8XIIC6iySUZUOv3KHTourQZY5gdzxEUaaSB376szOSuddp
    Follows: unit currently follows state of sys-devices-virtual-block-dm\x2d1.device
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 11:30:06 UTC; 54min ago
      Until: Mon 2026-02-02 11:30:06 UTC; 54min ago
     Device: /sys/devices/virtual/block/dm-1

● dev-disk-by\x2did-dm\x2duuid\x2dLVM\x2ddVZP21Pe6nfQ8VQp9PK4zeWVWeeIxIrXdvAs6IYv3yKh6aWaaFFkK1HUdKMhaoM0.device - /dev/disk/by-id/dm-uuid-LVM-dVZP21Pe6nfQ8VQp9PK4zeWVWeeIxIrXdvAs6IYv3yKh6aWaaFFkK1HUdKMhaoM0
    Follows: unit currently follows state of sys-devices-virtual-block-dm\x2d0.device
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 11:30:02 UTC; 54min ago
      Until: Mon 2026-02-02 11:30:02 UTC; 54min ago
     Device: /sys/devices/virtual/block/dm-0

● dev-disk-by\x2did-dm\x2duuid\x2dLVM\x2dzw3nLDmVrBX1fw4VC8i1ZxmSmyoKuwKeK6jOCK7eci1XP3fGW31UXdky2hnJVIyw.device - /dev/disk/by-id/dm-uuid-LVM-zw3nLDmVrBX1fw4VC8i1ZxmSmyoKuwKeK6jOCK7eci1XP3fGW31UXdky2hnJVIyw
    Follows: unit currently follows state of sys-devices-virtual-block-dm\x2d2.device
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 11:30:11 UTC; 54min ago
      Until: Mon 2026-02-02 11:30:11 UTC; 54min ago
     Device: /sys/devices/virtual/block/dm-2

● dev-disk-by\x2did-lvm\x2dpv\x2duuid\x2dipmVQm\x2dVTKF\x2djWF0\x2dj1RU\x2dPVFK\x2djTyS\x2dwU6DHS.device - /dev/disk/by-id/lvm-pv-uuid-ipmVQm-VTKF-jWF0-j1RU-PVFK-jTyS-wU6DHS
    Follows: unit currently follows state of sys-devices-virtual-block-loop4.device
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 11:30:06 UTC; 54min ago
      Until: Mon 2026-02-02 11:30:06 UTC; 54min ago
     Device: /sys/devices/virtual/block/loop4

● dev-disk-by\x2did-lvm\x2dpv\x2duuid\x2dOujmde\x2dSwf6\x2dADJ4\x2drsku\x2dmCqV\x2dbivY\x2dGNvlyn.device - /dev/disk/by-id/lvm-pv-uuid-Oujmde-Swf6-ADJ4-rsku-mCqV-bivY-GNvlyn
    Follows: unit currently follows state of sys-devices-virtual-block-loop5.device
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 11:30:11 UTC; 54min ago
      Until: Mon 2026-02-02 11:30:11 UTC; 54min ago
     Device: /sys/devices/virtual/block/loop5

● dev-disk-by\x2did-lvm\x2dpv\x2duuid\x2dvcIWnm\x2dzlSx\x2dbFAa\x2dPvR1\x2dEBVb\x2dO3ZT\x2dj9hsYb.device - /dev/disk/by-id/lvm-pv-uuid-vcIWnm-zlSx-bFAa-PvR1-EBVb-O3ZT-j9hsYb
    Follows: unit currently follows state of sys-devices-virtual-block-loop3.device
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 11:30:02 UTC; 54min ago
      Until: Mon 2026-02-02 11:30:02 UTC; 54min ago
     Device: /sys/devices/virtual/block/loop3

● dev-disk-by\x2dlabel-config\x2d2.device - QEMU_DVD-ROM config-2
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:01.1-ata1-host0-target0:0:0-0:0:0:0-block-sr0.device
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 10:52:39 UTC; 1h 32min ago
      Until: Mon 2026-02-02 10:52:39 UTC; 1h 32min ago
     Device: /sys/devices/pci0000:00/0000:00:01.1/ata1/host0/target0:0:0/0:0:0:0/block/sr0

● dev-disk-by\x2dpartuuid-714daac1\x2d01.device - /dev/disk/by-partuuid/714daac1-01
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:04.0-virtio2-block-vda-vda1.device
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 10:52:39 UTC; 1h 32min ago
      Until: Mon 2026-02-02 10:52:39 UTC; 1h 32min ago
     Device: /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda/vda1

● dev-disk-by\x2dpath-pci\x2d0000:00:01.1\x2data\x2d1.0.device - QEMU_DVD-ROM config-2
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:01.1-ata1-host0-target0:0:0-0:0:0:0-block-sr0.device
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 10:52:39 UTC; 1h 32min ago
      Until: Mon 2026-02-02 10:52:39 UTC; 1h 32min ago
     Device: /sys/devices/pci0000:00/0000:00:01.1/ata1/host0/target0:0:0/0:0:0:0/block/sr0

● dev-disk-by\x2dpath-pci\x2d0000:00:01.1\x2data\x2d1.device - QEMU_DVD-ROM config-2
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:01.1-ata1-host0-target0:0:0-0:0:0:0-block-sr0.device
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 10:52:39 UTC; 1h 32min ago
      Until: Mon 2026-02-02 10:52:39 UTC; 1h 32min ago
     Device: /sys/devices/pci0000:00/0000:00:01.1/ata1/host0/target0:0:0/0:0:0:0/block/sr0

● dev-disk-by\x2dpath-pci\x2d0000:00:04.0.device - /dev/disk/by-path/pci-0000:00:04.0
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:04.0-virtio2-block-vda.device
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 10:52:39 UTC; 1h 32min ago
      Until: Mon 2026-02-02 10:52:39 UTC; 1h 32min ago
     Device: /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda

● dev-disk-by\x2dpath-pci\x2d0000:00:04.0\x2dpart1.device - /dev/disk/by-path/pci-0000:00:04.0-part1
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:04.0-virtio2-block-vda-vda1.device
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 10:52:39 UTC; 1h 32min ago
      Until: Mon 2026-02-02 10:52:39 UTC; 1h 32min ago
     Device: /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda/vda1

● dev-disk-by\x2dpath-virtio\x2dpci\x2d0000:00:04.0.device - /dev/disk/by-path/virtio-pci-0000:00:04.0
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:04.0-virtio2-block-vda.device
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 10:52:39 UTC; 1h 32min ago
      Until: Mon 2026-02-02 10:52:39 UTC; 1h 32min ago
     Device: /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda

● dev-disk-by\x2dpath-virtio\x2dpci\x2d0000:00:04.0\x2dpart1.device - /dev/disk/by-path/virtio-pci-0000:00:04.0-part1
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:04.0-virtio2-block-vda-vda1.device
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 10:52:39 UTC; 1h 32min ago
      Until: Mon 2026-02-02 10:52:39 UTC; 1h 32min ago
     Device: /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda/vda1

● dev-disk-by\x2duuid-2026\x2d02\x2d02\x2d10\x2d52\x2d18\x2d00.device - QEMU_DVD-ROM config-2
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:01.1-ata1-host0-target0:0:0-0:0:0:0-block-sr0.device
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 10:52:39 UTC; 1h 32min ago
      Until: Mon 2026-02-02 10:52:39 UTC; 1h 32min ago
     Device: /sys/devices/pci0000:00/0000:00:01.1/ata1/host0/target0:0:0/0:0:0:0/block/sr0

● dev-disk-by\x2duuid-822f14ea\x2d6e7e\x2d41df\x2db0d8\x2dfbe282d9ded8.device - /dev/disk/by-uuid/822f14ea-6e7e-41df-b0d8-fbe282d9ded8
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:04.0-virtio2-block-vda-vda1.device
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 10:52:39 UTC; 1h 32min ago
      Until: Mon 2026-02-02 10:52:39 UTC; 1h 32min ago
     Device: /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda/vda1

Feb 02 10:52:36 localhost systemd[1]: Found device /dev/disk/by-uuid/822f14ea-6e7e-41df-b0d8-fbe282d9ded8.

● dev-dm\x2d0.device - /dev/dm-0
    Follows: unit currently follows state of sys-devices-virtual-block-dm\x2d0.device
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 11:30:02 UTC; 54min ago
      Until: Mon 2026-02-02 11:30:02 UTC; 54min ago
     Device: /sys/devices/virtual/block/dm-0

● dev-dm\x2d1.device - /dev/dm-1
    Follows: unit currently follows state of sys-devices-virtual-block-dm\x2d1.device
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 11:30:06 UTC; 54min ago
      Until: Mon 2026-02-02 11:30:06 UTC; 54min ago
     Device: /sys/devices/virtual/block/dm-1

● dev-dm\x2d2.device - /dev/dm-2
    Follows: unit currently follows state of sys-devices-virtual-block-dm\x2d2.device
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 11:30:11 UTC; 54min ago
      Until: Mon 2026-02-02 11:30:11 UTC; 54min ago
     Device: /sys/devices/virtual/block/dm-2

● dev-loop3.device - /dev/loop3
    Follows: unit currently follows state of sys-devices-virtual-block-loop3.device
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 11:30:01 UTC; 54min ago
      Until: Mon 2026-02-02 11:30:01 UTC; 54min ago
     Device: /sys/devices/virtual/block/loop3

● dev-loop4.device - /dev/loop4
    Follows: unit currently follows state of sys-devices-virtual-block-loop4.device
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 11:30:06 UTC; 54min ago
      Until: Mon 2026-02-02 11:30:06 UTC; 54min ago
     Device: /sys/devices/virtual/block/loop4

● dev-loop5.device - /dev/loop5
    Follows: unit currently follows state of sys-devices-virtual-block-loop5.device
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 11:30:10 UTC; 54min ago
      Until: Mon 2026-02-02 11:30:10 UTC; 54min ago
     Device: /sys/devices/virtual/block/loop5

● dev-mapper-ceph_vg0\x2dceph_lv0.device - /dev/mapper/ceph_vg0-ceph_lv0
    Follows: unit currently follows state of sys-devices-virtual-block-dm\x2d0.device
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 11:30:02 UTC; 54min ago
      Until: Mon 2026-02-02 11:30:02 UTC; 54min ago
     Device: /sys/devices/virtual/block/dm-0

● dev-mapper-ceph_vg1\x2dceph_lv1.device - /dev/mapper/ceph_vg1-ceph_lv1
    Follows: unit currently follows state of sys-devices-virtual-block-dm\x2d1.device
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 11:30:06 UTC; 54min ago
      Until: Mon 2026-02-02 11:30:06 UTC; 54min ago
     Device: /sys/devices/virtual/block/dm-1

● dev-mapper-ceph_vg2\x2dceph_lv2.device - /dev/mapper/ceph_vg2-ceph_lv2
    Follows: unit currently follows state of sys-devices-virtual-block-dm\x2d2.device
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 11:30:11 UTC; 54min ago
      Until: Mon 2026-02-02 11:30:11 UTC; 54min ago
     Device: /sys/devices/virtual/block/dm-2

● dev-rfkill.device - /dev/rfkill
    Follows: unit currently follows state of sys-devices-virtual-misc-rfkill.device
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 10:52:41 UTC; 1h 32min ago
      Until: Mon 2026-02-02 10:52:41 UTC; 1h 32min ago
     Device: /sys/devices/virtual/misc/rfkill

● dev-sr0.device - QEMU_DVD-ROM config-2
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:01.1-ata1-host0-target0:0:0-0:0:0:0-block-sr0.device
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 10:52:39 UTC; 1h 32min ago
      Until: Mon 2026-02-02 10:52:39 UTC; 1h 32min ago
     Device: /sys/devices/pci0000:00/0000:00:01.1/ata1/host0/target0:0:0/0:0:0:0/block/sr0

● dev-ttyS0.device - /dev/ttyS0
    Follows: unit currently follows state of sys-devices-pnp0-00:00-tty-ttyS0.device
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 10:52:39 UTC; 1h 32min ago
      Until: Mon 2026-02-02 10:52:39 UTC; 1h 32min ago
     Device: /sys/devices/pnp0/00:00/tty/ttyS0

Feb 02 10:52:39 localhost systemd[1]: Condition check resulted in /dev/ttyS0 being skipped.

● dev-ttyS1.device - /dev/ttyS1
    Follows: unit currently follows state of sys-devices-platform-serial8250-tty-ttyS1.device
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 10:52:39 UTC; 1h 32min ago
      Until: Mon 2026-02-02 10:52:39 UTC; 1h 32min ago
     Device: /sys/devices/platform/serial8250/tty/ttyS1

● dev-ttyS2.device - /dev/ttyS2
    Follows: unit currently follows state of sys-devices-platform-serial8250-tty-ttyS2.device
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 10:52:39 UTC; 1h 32min ago
      Until: Mon 2026-02-02 10:52:39 UTC; 1h 32min ago
     Device: /sys/devices/platform/serial8250/tty/ttyS2

● dev-ttyS3.device - /dev/ttyS3
    Follows: unit currently follows state of sys-devices-platform-serial8250-tty-ttyS3.device
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 10:52:39 UTC; 1h 32min ago
      Until: Mon 2026-02-02 10:52:39 UTC; 1h 32min ago
     Device: /sys/devices/platform/serial8250/tty/ttyS3

● dev-vda.device - /dev/vda
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:04.0-virtio2-block-vda.device
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 10:52:39 UTC; 1h 32min ago
      Until: Mon 2026-02-02 10:52:39 UTC; 1h 32min ago
     Device: /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda

● dev-vda1.device - /dev/vda1
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:04.0-virtio2-block-vda-vda1.device
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 10:52:39 UTC; 1h 32min ago
      Until: Mon 2026-02-02 10:52:39 UTC; 1h 32min ago
     Device: /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda/vda1

● sys-devices-pci0000:00-0000:00:01.1-ata1-host0-target0:0:0-0:0:0:0-block-sr0.device - QEMU_DVD-ROM config-2
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 10:52:39 UTC; 1h 32min ago
      Until: Mon 2026-02-02 10:52:39 UTC; 1h 32min ago
     Device: /sys/devices/pci0000:00/0000:00:01.1/ata1/host0/target0:0:0/0:0:0:0/block/sr0

● sys-devices-pci0000:00-0000:00:03.0-virtio1-net-eth0.device - Virtio network device
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 10:52:39 UTC; 1h 32min ago
      Until: Mon 2026-02-02 10:52:39 UTC; 1h 32min ago
     Device: /sys/devices/pci0000:00/0000:00:03.0/virtio1/net/eth0

● sys-devices-pci0000:00-0000:00:04.0-virtio2-block-vda-vda1.device - /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda/vda1
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 10:52:39 UTC; 1h 32min ago
      Until: Mon 2026-02-02 10:52:39 UTC; 1h 32min ago
     Device: /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda/vda1

● sys-devices-pci0000:00-0000:00:04.0-virtio2-block-vda.device - /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 10:52:39 UTC; 1h 32min ago
      Until: Mon 2026-02-02 10:52:39 UTC; 1h 32min ago
     Device: /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda

● sys-devices-pci0000:00-0000:00:07.0-virtio5-net-eth1.device - Virtio network device
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 10:54:35 UTC; 1h 30min ago
      Until: Mon 2026-02-02 10:54:35 UTC; 1h 30min ago
     Device: /sys/devices/pci0000:00/0000:00:07.0/virtio5/net/eth1

● sys-devices-platform-serial8250-tty-ttyS1.device - /sys/devices/platform/serial8250/tty/ttyS1
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 10:52:39 UTC; 1h 32min ago
      Until: Mon 2026-02-02 10:52:39 UTC; 1h 32min ago
     Device: /sys/devices/platform/serial8250/tty/ttyS1

● sys-devices-platform-serial8250-tty-ttyS2.device - /sys/devices/platform/serial8250/tty/ttyS2
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 10:52:39 UTC; 1h 32min ago
      Until: Mon 2026-02-02 10:52:39 UTC; 1h 32min ago
     Device: /sys/devices/platform/serial8250/tty/ttyS2

● sys-devices-platform-serial8250-tty-ttyS3.device - /sys/devices/platform/serial8250/tty/ttyS3
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 10:52:39 UTC; 1h 32min ago
      Until: Mon 2026-02-02 10:52:39 UTC; 1h 32min ago
     Device: /sys/devices/platform/serial8250/tty/ttyS3

● sys-devices-pnp0-00:00-tty-ttyS0.device - /sys/devices/pnp0/00:00/tty/ttyS0
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 10:52:39 UTC; 1h 32min ago
      Until: Mon 2026-02-02 10:52:39 UTC; 1h 32min ago
     Device: /sys/devices/pnp0/00:00/tty/ttyS0

● sys-devices-virtual-block-dm\x2d0.device - /sys/devices/virtual/block/dm-0
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 11:30:02 UTC; 54min ago
      Until: Mon 2026-02-02 11:30:02 UTC; 54min ago
     Device: /sys/devices/virtual/block/dm-0

● sys-devices-virtual-block-dm\x2d1.device - /sys/devices/virtual/block/dm-1
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 11:30:06 UTC; 54min ago
      Until: Mon 2026-02-02 11:30:06 UTC; 54min ago
     Device: /sys/devices/virtual/block/dm-1

● sys-devices-virtual-block-dm\x2d2.device - /sys/devices/virtual/block/dm-2
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 11:30:11 UTC; 54min ago
      Until: Mon 2026-02-02 11:30:11 UTC; 54min ago
     Device: /sys/devices/virtual/block/dm-2

● sys-devices-virtual-block-loop3.device - /sys/devices/virtual/block/loop3
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 11:30:01 UTC; 54min ago
      Until: Mon 2026-02-02 11:30:01 UTC; 54min ago
     Device: /sys/devices/virtual/block/loop3

● sys-devices-virtual-block-loop4.device - /sys/devices/virtual/block/loop4
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 11:30:06 UTC; 54min ago
      Until: Mon 2026-02-02 11:30:06 UTC; 54min ago
     Device: /sys/devices/virtual/block/loop4

● sys-devices-virtual-block-loop5.device - /sys/devices/virtual/block/loop5
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 11:30:10 UTC; 54min ago
      Until: Mon 2026-02-02 11:30:10 UTC; 54min ago
     Device: /sys/devices/virtual/block/loop5

● sys-devices-virtual-misc-rfkill.device - /sys/devices/virtual/misc/rfkill
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 10:52:41 UTC; 1h 32min ago
      Until: Mon 2026-02-02 10:52:41 UTC; 1h 32min ago
     Device: /sys/devices/virtual/misc/rfkill

● sys-devices-virtual-net-br\x2dex.device - /sys/devices/virtual/net/br-ex
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 11:27:13 UTC; 57min ago
      Until: Mon 2026-02-02 11:27:13 UTC; 57min ago
     Device: /sys/devices/virtual/net/br-ex

● sys-devices-virtual-net-br\x2dint.device - /sys/devices/virtual/net/br-int
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 11:40:09 UTC; 44min ago
      Until: Mon 2026-02-02 11:40:09 UTC; 44min ago
     Device: /sys/devices/virtual/net/br-int

● sys-devices-virtual-net-genev_sys_6081.device - /sys/devices/virtual/net/genev_sys_6081
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 11:40:09 UTC; 44min ago
      Until: Mon 2026-02-02 11:40:09 UTC; 44min ago
     Device: /sys/devices/virtual/net/genev_sys_6081

● sys-devices-virtual-net-ovs\x2dsystem.device - /sys/devices/virtual/net/ovs-system
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 11:27:13 UTC; 57min ago
      Until: Mon 2026-02-02 11:27:13 UTC; 57min ago
     Device: /sys/devices/virtual/net/ovs-system

● sys-devices-virtual-net-vlan20.device - /sys/devices/virtual/net/vlan20
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 11:27:13 UTC; 57min ago
      Until: Mon 2026-02-02 11:27:13 UTC; 57min ago
     Device: /sys/devices/virtual/net/vlan20

● sys-devices-virtual-net-vlan21.device - /sys/devices/virtual/net/vlan21
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 11:27:13 UTC; 57min ago
      Until: Mon 2026-02-02 11:27:13 UTC; 57min ago
     Device: /sys/devices/virtual/net/vlan21

● sys-devices-virtual-net-vlan22.device - /sys/devices/virtual/net/vlan22
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 11:27:13 UTC; 57min ago
      Until: Mon 2026-02-02 11:27:13 UTC; 57min ago
     Device: /sys/devices/virtual/net/vlan22

● sys-devices-virtual-net-vlan23.device - /sys/devices/virtual/net/vlan23
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 11:27:13 UTC; 57min ago
      Until: Mon 2026-02-02 11:27:13 UTC; 57min ago
     Device: /sys/devices/virtual/net/vlan23

● sys-module-configfs.device - /sys/module/configfs
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 10:52:39 UTC; 1h 32min ago
      Until: Mon 2026-02-02 10:52:39 UTC; 1h 32min ago
     Device: /sys/module/configfs

● sys-module-fuse.device - /sys/module/fuse
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 10:52:39 UTC; 1h 32min ago
      Until: Mon 2026-02-02 10:52:39 UTC; 1h 32min ago
     Device: /sys/module/fuse

● sys-subsystem-net-devices-br\x2dex.device - /sys/subsystem/net/devices/br-ex
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 11:27:13 UTC; 57min ago
      Until: Mon 2026-02-02 11:27:13 UTC; 57min ago
     Device: /sys/devices/virtual/net/br-ex

● sys-subsystem-net-devices-br\x2dint.device - /sys/subsystem/net/devices/br-int
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 11:40:09 UTC; 44min ago
      Until: Mon 2026-02-02 11:40:09 UTC; 44min ago
     Device: /sys/devices/virtual/net/br-int

● sys-subsystem-net-devices-eth0.device - Virtio network device
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 10:52:39 UTC; 1h 32min ago
      Until: Mon 2026-02-02 10:52:39 UTC; 1h 32min ago
     Device: /sys/devices/pci0000:00/0000:00:03.0/virtio1/net/eth0

● sys-subsystem-net-devices-eth1.device - Virtio network device
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 10:54:35 UTC; 1h 30min ago
      Until: Mon 2026-02-02 10:54:35 UTC; 1h 30min ago
     Device: /sys/devices/pci0000:00/0000:00:07.0/virtio5/net/eth1

● sys-subsystem-net-devices-genev_sys_6081.device - /sys/subsystem/net/devices/genev_sys_6081
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 11:40:09 UTC; 44min ago
      Until: Mon 2026-02-02 11:40:09 UTC; 44min ago
     Device: /sys/devices/virtual/net/genev_sys_6081

● sys-subsystem-net-devices-ovs\x2dsystem.device - /sys/subsystem/net/devices/ovs-system
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 11:27:13 UTC; 57min ago
      Until: Mon 2026-02-02 11:27:13 UTC; 57min ago
     Device: /sys/devices/virtual/net/ovs-system

● sys-subsystem-net-devices-vlUnit boot.mount could not be found.
Unit home.mount could not be found.
an20.device - /sys/subsystem/net/devices/vlan20
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 11:27:13 UTC; 57min ago
      Until: Mon 2026-02-02 11:27:13 UTC; 57min ago
     Device: /sys/devices/virtual/net/vlan20

● sys-subsystem-net-devices-vlan21.device - /sys/subsystem/net/devices/vlan21
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 11:27:13 UTC; 57min ago
      Until: Mon 2026-02-02 11:27:13 UTC; 57min ago
     Device: /sys/devices/virtual/net/vlan21

● sys-subsystem-net-devices-vlan22.device - /sys/subsystem/net/devices/vlan22
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 11:27:13 UTC; 57min ago
      Until: Mon 2026-02-02 11:27:13 UTC; 57min ago
     Device: /sys/devices/virtual/net/vlan22

● sys-subsystem-net-devices-vlan23.device - /sys/subsystem/net/devices/vlan23
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 11:27:13 UTC; 57min ago
      Until: Mon 2026-02-02 11:27:13 UTC; 57min ago
     Device: /sys/devices/virtual/net/vlan23

● -.mount - Root Mount
     Loaded: loaded (/etc/fstab; generated)
     Active: active (mounted)
      Where: /
       What: /dev/vda1
       Docs: man:fstab(5)
             man:systemd-fstab-generator(8)

● dev-hugepages.mount - Huge Pages File System
     Loaded: loaded (/usr/lib/systemd/system/dev-hugepages.mount; static)
     Active: active (mounted) since Mon 2026-02-02 10:52:38 UTC; 1h 32min ago
      Until: Mon 2026-02-02 10:52:38 UTC; 1h 32min ago
      Where: /dev/hugepages
       What: hugetlbfs
       Docs: https://docs.kernel.org/admin-guide/mm/hugetlbpage.html
             https://www.freedesktop.org/wiki/Software/systemd/APIFileSystems
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48560)
     Memory: 52.0K (peak: 556.0K)
        CPU: 6ms
     CGroup: /dev-hugepages.mount

Feb 02 10:52:38 localhost systemd[1]: Mounted Huge Pages File System.

● dev-hugepages1G.mount - /dev/hugepages1G
     Loaded: loaded (/etc/fstab; generated)
     Active: active (mounted) since Mon 2026-02-02 11:29:09 UTC; 55min ago
      Until: Mon 2026-02-02 11:29:09 UTC; 55min ago
      Where: /dev/hugepages1G
       What: none
       Docs: man:fstab(5)
             man:systemd-fstab-generator(8)

● dev-hugepages2M.mount - /dev/hugepages2M
     Loaded: loaded (/etc/fstab; generated)
     Active: active (mounted) since Mon 2026-02-02 11:29:10 UTC; 55min ago
      Until: Mon 2026-02-02 11:29:10 UTC; 55min ago
      Where: /dev/hugepages2M
       What: none
       Docs: man:fstab(5)
             man:systemd-fstab-generator(8)

● dev-mqueue.mount - POSIX Message Queue File System
     Loaded: loaded (/usr/lib/systemd/system/dev-mqueue.mount; static)
     Active: active (mounted) since Mon 2026-02-02 10:52:38 UTC; 1h 32min ago
      Until: Mon 2026-02-02 10:52:38 UTC; 1h 32min ago
      Where: /dev/mqueue
       What: mqueue
       Docs: man:mq_overview(7)
             https://www.freedesktop.org/wiki/Software/systemd/APIFileSystems
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48560)
     Memory: 4.0K (peak: 536.0K)
        CPU: 4ms
     CGroup: /dev-mqueue.mount

Feb 02 10:52:38 localhost systemd[1]: Mounted POSIX Message Queue File System.

○ proc-fs-nfsd.mount - NFSD configuration filesystem
     Loaded: loaded (/usr/lib/systemd/system/proc-fs-nfsd.mount; static)
     Active: inactive (dead)
      Where: /proc/fs/nfsd
       What: nfsd

● proc-sys-fs-binfmt_misc.mount - Arbitrary Executable File Formats File System
     Loaded: loaded (/usr/lib/systemd/system/proc-sys-fs-binfmt_misc.mount; disabled; preset: disabled)
     Active: active (mounted) since Mon 2026-02-02 11:31:24 UTC; 53min ago
      Until: Mon 2026-02-02 11:31:24 UTC; 53min ago
TriggeredBy: ● proc-sys-fs-binfmt_misc.automount
      Where: /proc/sys/fs/binfmt_misc
       What: binfmt_misc
       Docs: https://docs.kernel.org/admin-guide/binfmt-misc.html
             https://www.freedesktop.org/wiki/Software/systemd/APIFileSystems
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48560)
     Memory: 8.0K (peak: 548.0K)
        CPU: 7ms
     CGroup: /proc-sys-fs-binfmt_misc.mount

Feb 02 11:31:24 compute-0 systemd[1]: Mounting Arbitrary Executable File Formats File System...
Feb 02 11:31:24 compute-0 systemd[1]: Mounted Arbitrary Executable File Formats File System.

● run-credentials-systemd\x2dsysctl.service.mount - /run/credentials/systemd-sysctl.service
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Mon 2026-02-02 11:26:12 UTC; 58min ago
      Until: Mon 2026-02-02 11:26:12 UTC; 58min ago
      Where: /run/credentials/systemd-sysctl.service
       What: none

● run-credentials-systemd\x2dsysusers.service.mount - /run/credentials/systemd-sysusers.service
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Mon 2026-02-02 10:52:38 UTC; 1h 32min ago
      Until: Mon 2026-02-02 10:52:38 UTC; 1h 32min ago
      Where: /run/credentials/systemd-sysusers.service
       What: none

● run-credentials-systemd\x2dtmpfiles\x2dsetup.service.mount - /run/credentials/systemd-tmpfiles-setup.service
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Mon 2026-02-02 10:52:39 UTC; 1h 32min ago
      Until: Mon 2026-02-02 10:52:39 UTC; 1h 32min ago
      Where: /run/credentials/systemd-tmpfiles-setup.service
       What: none

● run-credentials-systemd\x2dtmpfiles\x2dsetup\x2ddev.service.mount - /run/credentials/systemd-tmpfiles-setup-dev.service
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Mon 2026-02-02 10:52:38 UTC; 1h 32min ago
      Until: Mon 2026-02-02 10:52:38 UTC; 1h 32min ago
      Where: /run/credentials/systemd-tmpfiles-setup-dev.service
       What: none

● run-netns.mount - /run/netns
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Mon 2026-02-02 11:28:41 UTC; 56min ago
      Until: Mon 2026-02-02 11:28:41 UTC; 56min ago
      Where: /run/netns
       What: tmpfs

● run-user-1000.mount - /run/user/1000
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Mon 2026-02-02 10:52:58 UTC; 1h 31min ago
      Until: Mon 2026-02-02 10:52:58 UTC; 1h 31min ago
      Where: /run/user/1000
       What: tmpfs

● run-user-42477.mount - /run/user/42477
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Mon 2026-02-02 11:31:15 UTC; 53min ago
      Until: Mon 2026-02-02 11:31:15 UTC; 53min ago
      Where: /run/user/42477
       What: tmpfs

● sys-fs-fuse-connections.mount - FUSE Control File System
     Loaded: loaded (/usr/lib/systemd/system/sys-fs-fuse-connections.mount; static)
     Active: active (mounted) since Mon 2026-02-02 10:52:38 UTC; 1h 32min ago
      Until: Mon 2026-02-02 10:52:38 UTC; 1h 32min ago
      Where: /sys/fs/fuse/connections
       What: fusectl
       Docs: https://docs.kernel.org/filesystems/fuse.html
             https://www.freedesktop.org/wiki/Software/systemd/APIFileSystems
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48560)
     Memory: 4.0K (peak: 540.0K)
        CPU: 3ms
     CGroup: /sys-fs-fuse-connections.mount

Feb 02 10:52:38 localhost systemd[1]: Mounting FUSE Control File System...
Feb 02 10:52:38 localhost systemd[1]: Mounted FUSE Control File System.

● sys-kernel-config.mount - Kernel Configuration File System
     Loaded: loaded (/proc/self/mountinfo; static)
     Active: active (mounted) since Mon 2026-02-02 10:52:38 UTC; 1h 32min ago
      Until: Mon 2026-02-02 10:52:38 UTC; 1h 32min ago
      Where: /sys/kernel/config
       What: configfs
       Docs: https://docs.kernel.org/filesystems/configfs.html
             https://www.freedesktop.org/wiki/Software/systemd/APIFileSystems

● sys-kernel-debug-tracing.mount - /sys/kernel/debug/tracing
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Mon 2026-02-02 12:14:48 UTC; 10min ago
      Until: Mon 2026-02-02 12:14:48 UTC; 10min ago
      Where: /sys/kernel/debug/tracing
       What: tracefs

● sys-kernel-debug.mount - Kernel Debug File System
     Loaded: loaded (/usr/lib/systemdUnit sysroot.mount could not be found.
/system/sys-kernel-debug.mount; static)
     Active: active (mounted) since Mon 2026-02-02 10:52:38 UTC; 1h 32min ago
      Until: Mon 2026-02-02 10:52:38 UTC; 1h 32min ago
      Where: /sys/kernel/debug
       What: debugfs
       Docs: https://docs.kernel.org/filesystems/debugfs.html
             https://www.freedesktop.org/wiki/Software/systemd/APIFileSystems
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48560)
     Memory: 4.0K (peak: 540.0K)
        CPU: 4ms
     CGroup: /sys-kernel-debug.mount

Feb 02 10:52:38 localhost systemd[1]: Mounted Kernel Debug File System.

● sys-kernel-tracing.mount - Kernel Trace File System
     Loaded: loaded (/usr/lib/systemd/system/sys-kernel-tracing.mount; static)
     Active: active (mounted) since Mon 2026-02-02 10:52:38 UTC; 1h 32min ago
      Until: Mon 2026-02-02 10:52:38 UTC; 1h 32min ago
      Where: /sys/kernel/tracing
       What: tracefs
       Docs: https://docs.kernel.org/trace/ftrace.html
             https://www.freedesktop.org/wiki/Software/systemd/APIFileSystems
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48560)
     Memory: 4.0K (peak: 540.0K)
        CPU: 3ms
     CGroup: /sys-kernel-tracing.mount

Feb 02 10:52:38 localhost systemd[1]: Mounted Kernel Trace File System.

○ tmp.mount - Temporary Directory /tmp
     Loaded: loaded (/usr/lib/systemd/system/tmp.mount; disabled; preset: disabled)
     Active: inactive (dead)
      Where: /tmp
       What: tmpfs
       Docs: https://systemd.io/TEMPORARY_DIRECTORIES
             man:file-hierarchy(7)
             https://www.freedesktop.org/wiki/Software/systemd/APIFileSystems

● var-lib-containers-storage-overlay-16fb1364ed7b5d9d589b8cddf9155cefe5a84933a55b185272f06719082c72bc-merged.mount - /var/lib/containers/storage/overlay/16fb1364ed7b5d9d589b8cddf9155cefe5a84933a55b185272f06719082c72bc/merged
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Mon 2026-02-02 11:32:00 UTC; 52min ago
      Until: Mon 2026-02-02 11:32:00 UTC; 52min ago
      Where: /var/lib/containers/storage/overlay/16fb1364ed7b5d9d589b8cddf9155cefe5a84933a55b185272f06719082c72bc/merged
       What: overlay

● var-lib-containers-storage-overlay-1a64f6f7b79961cf555e70f1e1cba521849f0814f4f805b139c10e64115ac7d0-merged.mount - /var/lib/containers/storage/overlay/1a64f6f7b79961cf555e70f1e1cba521849f0814f4f805b139c10e64115ac7d0/merged
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Mon 2026-02-02 11:47:18 UTC; 37min ago
      Until: Mon 2026-02-02 11:47:18 UTC; 37min ago
      Where: /var/lib/containers/storage/overlay/1a64f6f7b79961cf555e70f1e1cba521849f0814f4f805b139c10e64115ac7d0/merged
       What: overlay

● var-lib-containers-storage-overlay-419fe5696336b414acb6689bf176f8fb4ea53130542549a4584ae7ea825cd301-merged.mount - /var/lib/containers/storage/overlay/419fe5696336b414acb6689bf176f8fb4ea53130542549a4584ae7ea825cd301/merged
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Mon 2026-02-02 11:31:53 UTC; 52min ago
      Until: Mon 2026-02-02 11:31:53 UTC; 52min ago
      Where: /var/lib/containers/storage/overlay/419fe5696336b414acb6689bf176f8fb4ea53130542549a4584ae7ea825cd301/merged
       What: overlay

● var-lib-containers-storage-overlay-66828488d6a738970c08a788842396543666ffb837cadd7b3c21a2f6f66275e5-merged.mount - /var/lib/containers/storage/overlay/66828488d6a738970c08a788842396543666ffb837cadd7b3c21a2f6f66275e5/merged
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Mon 2026-02-02 11:41:07 UTC; 43min ago
      Until: Mon 2026-02-02 11:41:07 UTC; 43min ago
      Where: /var/lib/containers/storage/overlay/66828488d6a738970c08a788842396543666ffb837cadd7b3c21a2f6f66275e5/merged
       What: overlay

● var-lib-containers-storage-overlay-7ac1f82a2d35df53911fd9c77142b2264c8717db91494bc4629e033e4a4a7809-merged.mount - /var/lib/containers/storage/overlay/7ac1f82a2d35df53911fd9c77142b2264c8717db91494bc4629e033e4a4a7809/merged
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Mon 2026-02-02 11:31:57 UTC; 52min ago
      Until: Mon 2026-02-02 11:31:57 UTC; 52min ago
      Where: /var/lib/containers/storage/overlay/7ac1f82a2d35df53911fd9c77142b2264c8717db91494bc4629e033e4a4a7809/merged
       What: overlay

● var-lib-containers-storage-overlay-8422e6552c0ced62d9720d717acc360d2492d2fbf1e81dcfad6c88bbcd8d449c-merged.mount - /var/lib/containers/storage/overlay/8422e6552c0ced62d9720d717acc360d2492d2fbf1e81dcfad6c88bbcd8d449c/merged
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Mon 2026-02-02 11:32:41 UTC; 52min ago
      Until: Mon 2026-02-02 11:32:41 UTC; 52min ago
      Where: /var/lib/containers/storage/overlay/8422e6552c0ced62d9720d717acc360d2492d2fbf1e81dcfad6c88bbcd8d449c/merged
       What: overlay

● var-lib-containers-storage-overlay-8f03d7f06f2160d053bffff722c287214a7ee4cb2856ca91947d8156b2bd569d-merged.mount - /var/lib/containers/storage/overlay/8f03d7f06f2160d053bffff722c287214a7ee4cb2856ca91947d8156b2bd569d/merged
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Mon 2026-02-02 11:32:43 UTC; 52min ago
      Until: Mon 2026-02-02 11:32:43 UTC; 52min ago
      Where: /var/lib/containers/storage/overlay/8f03d7f06f2160d053bffff722c287214a7ee4cb2856ca91947d8156b2bd569d/merged
       What: overlay

● var-lib-containers-storage-overlay-90c8e788c6d04014e4e74a6c8045d0882f4eaf94f1f7d567d237c4e2fdca572c-merged.mount - /var/lib/containers/storage/overlay/90c8e788c6d04014e4e74a6c8045d0882f4eaf94f1f7d567d237c4e2fdca572c/merged
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Mon 2026-02-02 11:30:53 UTC; 53min ago
      Until: Mon 2026-02-02 11:30:53 UTC; 53min ago
      Where: /var/lib/containers/storage/overlay/90c8e788c6d04014e4e74a6c8045d0882f4eaf94f1f7d567d237c4e2fdca572c/merged
       What: overlay

● var-lib-containers-storage-overlay-9853a0e16c6435719fe32bd61ebb93037a2d91a40d261a4f51ee7b08167f64f5-merged.mount - /var/lib/containers/storage/overlay/9853a0e16c6435719fe32bd61ebb93037a2d91a40d261a4f51ee7b08167f64f5/merged
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Mon 2026-02-02 11:30:51 UTC; 53min ago
      Until: Mon 2026-02-02 11:30:51 UTC; 53min ago
      Where: /var/lib/containers/storage/overlay/9853a0e16c6435719fe32bd61ebb93037a2d91a40d261a4f51ee7b08167f64f5/merged
       What: overlay

● var-lib-containers-storage-overlay-9e0c70f67ce5d3d1e60f45fe4f202579317f457f6bff89c7a5fc4897816948b9-merged.mount - /var/lib/containers/storage/overlay/9e0c70f67ce5d3d1e60f45fe4f202579317f457f6bff89c7a5fc4897816948b9/merged
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Mon 2026-02-02 11:31:31 UTC; 53min ago
      Until: Mon 2026-02-02 11:31:31 UTC; 53min ago
      Where: /var/lib/containers/storage/overlay/9e0c70f67ce5d3d1e60f45fe4f202579317f457f6bff89c7a5fc4897816948b9/merged
       What: overlay

● var-lib-containers-storage-overlay-fcd0b9c59c19976d8d1152cedbd6da157fc7947d760fae19b5fd07fe4dc07631-merged.mount - /var/lib/containers/storage/overlay/fcd0b9c59c19976d8d1152cedbd6da157fc7947d760fae19b5fd07fe4dc07631/merged
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Mon 2026-02-02 11:40:08 UTC; 44min ago
      Until: Mon 2026-02-02 11:40:08 UTC; 44min ago
      Where: /var/lib/containers/storage/overlay/fcd0b9c59c19976d8d1152cedbd6da157fc7947d760fae19b5fd07fe4dc07631/merged
       What: overlay

● var-lib-containers-storage-overlay.mount - /var/lib/containers/storage/overlay
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Mon 2026-02-02 11:30:51 UTC; 53min ago
      Until: Mon 2026-02-02 11:30:51 UTC; 53min ago
      Where: /var/lib/containers/storage/overlay
       What: /dev/vda1

● var-lib-containers-storage-overlay\x2dcontainers-dd42911bf41e885c3ba4077012f09a191888946e3867784418685a91ce34a059-userdata-shm.mount - /var/lib/containers/storage/overlay-containers/dd42911bf41e885c3ba4077012f09a191888946e3867784418685a91ce34a059/userdata/shm
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Mon 2026-02-02 11:40:08 UTC; 44min ago
      Until: Mon 2026-02-02 11:40:08 UTC; 44min ago
      Where: /var/lib/containers/storage/overlay-containers/dd42911bf41e885c3ba4077012f09a191888946e3867784418685a91ce34a059/userdata/shm
       What: shm

● var-lib-containers-storage-overlay\x2dcontainers-e8469079de5f6cf853327e20582fc7412a39dd166f8e8fc6edb7e70c21cf9b07-userdata-shm.mount - /var/lib/containers/storage/overlay-containers/e8469079de5f6cf853327e20582fc7412a39dd166f8e8fc6edb7e70c21cf9b07/userdata/shm
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Mon 2026-02-02 11:47:18 UTC; 37min ago
      Until: Mon 2026-02-02 11:47:18 UTC; 37min ago
      Where: /var/lib/containers/storage/overlay-containers/e8469079de5f6cf853327e20582fc7412a39dd166f8e8fc6edb7e70c21cf9b07/userdata/shm
       What: shm

● var-lib-containers-storage-overlay\x2dcontainers-f7a8a7d56deab4622312f47586dba2f4884d78e46b23bfb226b684327aab18c9-userdata-shm.mount - /var/lib/containers/storage/overlay-containers/f7a8a7d56deab4622312f47586dba2f4884d78e46b23bfb226b684327aab18c9/userdata/shm
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Mon 2026-02-02 11:41:07 UTC; 43min ago
      Until: Mon 2026-02-02 11:41:07 UTC; 43min ago
      Where: /var/lib/containers/storage/overlay-containers/f7a8a7d56deab4622312f47586dba2f4884d78e46b23bfb226b684327aab18c9/userdata/shm
       What: shm

○ var-lib-machines.mount - Virtual Machine and Container Storage (Compatibility)
     Loaded: loaded (/usr/lib/systemd/system/var-lib-machines.mount; static)
     Active: inactive (dead)
  Condition: start condition failed at Mon 2026-02-02 11:44:36 UTC; 40min ago
      Where: /var/lib/machines
       What: /var/lib/machines.raw

Feb 02 11:44:36 compute-0 systemd[1]: Virtual Machine and Container Storage (Compatibility) was skipped because of an unmet condition check (ConditionPathExists=/var/lib/machines.raw).

● var-lib-nfs-rpc_pipefs.mount - RPC Pipe File System
     Loaded: loaded (/proc/self/mountinfo; static)
     Active: active (mounted) since Mon 2026-02-02 10:52:38 UTC; 1h 32min ago
      Until: Mon 2026-02-02 10:52:38 UTC; 1h 32min ago
      Where: /var/lib/nfs/rpc_pipefs
       What: rpc_pipefs

● systemd-ask-password-console.path - Dispatch Password Requests to Console Directory Watch
     Loaded: loaded (/usr/lib/systemd/system/systemd-ask-password-console.path; static)
     Active: active (waiting) since Mon 2026-02-02 10:52:38 UTC; 1h 32min ago
      Until: Mon 2026-02-02 10:52:38 UTC; 1h 32min ago
   Triggers: ● systemd-ask-password-console.service
       Docs: man:systemd-ask-password-console.path(8)

● systemd-ask-password-wall.path - Forward Password Requests to Wall Directory Watch
     Loaded: loaded (/usr/lib/systemd/system/systemd-ask-password-wall.path; static)
     Active: active (waiting) since Mon 2026-02-02 10:52:38 UTC; 1h 32min ago
      Until: Mon 2026-02-02 10:52:38 UTC; 1h 32min ago
   Triggers: ● systemd-ask-password-wall.service
       Docs: man:systemd-ask-password-wall.path(8)

● init.scope - System and Service Manager
     Loaded: loaded
  Transient: yes
     Active: active (running) since Mon 2026-02-02 10:52:35 UTC; 1h 32min ago
       Docs: man:systemd(1)
         IO: 896.0K read, 0B written
      Tasks: 1 (limit: 48560)
     Memory: 48.9M (peak: 68.0M)
        CPU: 1min 1.648s
     CGroup: /init.scope
             └─1 /usr/lib/systemd/systemd --switched-root --system --deserialize 31

Feb 02 12:24:09 compute-0 systemd[1]: var-lib-containers-storage-overlay-5ed0001bc2e2cb2e19179ce2f6bcc066a8d965c602ba81a457e115e8cbd3b0cf-merged.mount: Deactivated successfully.
Feb 02 12:24:09 compute-0 systemd[1]: libpod-conmon-1ba1ee714d09a353f5a65858421b1cae55451ab980161c30b840ddbda2570181.scope: Deactivated successfully.
Feb 02 12:24:10 compute-0 systemd[1]: Started libpod-conmon-9f68c28eb02450c3fcbb3c231eb7d9458d1ce4bca9b4288ef57689472ceb0ddf.scope.
Feb 02 12:24:10 compute-0 systemd[1]: Started libcrun container.
Feb 02 12:24:11 compute-0 systemd[1]: libpod-9f68c28eb02450c3fcbb3c231eb7d9458d1ce4bca9b4288ef57689472ceb0ddf.scope: Deactivated successfully.
Feb 02 12:24:11 compute-0 systemd[1]: libpod-9f68c28eb02450c3fcbb3c231eb7d9458d1ce4bca9b4288ef57689472ceb0ddf.scope: Consumed 1.354s CPU time.
Feb 02 12:24:11 compute-0 systemd[1]: var-lib-containers-storage-overlay-e4cc7b5666d7e0a93bf541eecfb5edb4790b8e5813fc7478a8951e2f7b50513c-merged.mount: Deactivated successfully.
Feb 02 12:24:11 compute-0 systemd[1]: libpod-conmon-9f68c28eb02450c3fcbb3c231eb7d9458d1ce4bca9b4288ef57689472ceb0ddf.scope: Deactivated successfully.
Feb 02 12:24:33 compute-0 systemd[1]: Starting Hostname Service...
Feb 02 12:24:33 compute-0 systemd[1]: Started Hostname Service.

● libpod-dd42911bf41e885c3ba4077012f09a191888946e3867784418685a91ce34a059.scope - libcrun container
     Loaded: loaded (/run/systemd/transient/libpod-dd42911bf41e885c3ba4077012f09a191888946e3867784418685a91ce34a059.scope; transient)
  Transient: yes
    Drop-In: /run/systemd/transient/libpod-dd42911bf41e885c3ba4077012f09a191888946e3867784418685a91ce34a059.scope.d
             └─dep.conf
     Active: active (running) since Mon 2026-02-02 11:40:08 UTC; 44min ago
         IO: 816.0K read, 4.0K written
      Tasks: 6 (limit: 4096)
     Memory: 14.7M (peak: 17.6M)
        CPU: 6.349s
     CGroup: /machine.slice/libpod-dd42911bf41e885c3ba4077012f09a191888946e3867784418685a91ce34a059.scope
             └─container
               ├─145058 dumb-init --single-child -- kolla_start
               └─145061 /usr/bin/ovn-controller --pidfile unix:/run/openvswitch/db.sock -p /etc/pki/tls/private/ovndb.key -c /etc/pki/tls/certs/ovndb.crt -C /etc/pki/tls/certs/ovndbca.crt

Feb 02 11:40:08 compute-0 systemd[1]: Started libcrun container.

● libpod-e8469079de5f6cf853327e20582fc7412a39dd166f8e8fc6edb7e70c21cf9b07.scope - libcrun container
     Loaded: loaded (/run/systemd/transient/libpod-e8469079de5f6cf853327e20582fc7412a39dd166f8e8fc6edb7e70c21cf9b07.scope; transient)
  Transient: yes
     Active: active (running) since Mon 2026-02-02 11:47:18 UTC; 37min ago
         IO: 36.3M read, 42.1M written
      Tasks: 27 (limit: 4096)
     Memory: 423.9M (peak: 502.4M)
        CPU: 2min 37.392s
     CGroup: /machine.slice/libpod-e8469079de5f6cf853327e20582fc7412a39dd166f8e8fc6edb7e70c21cf9b07.scope
             └─container
               ├─238885 dumb-init --single-child -- kolla_start
               ├─238887 /usr/bin/python3 /usr/bin/nova-compute
               ├─245133 /usr/bin/python3 /bin/privsep-helper --config-file /etc/nova/nova.conf --config-file /etc/nova/nova-compute.conf --config-dir /etc/nova/nova.conf.d --privsep_context vif_plug_ovs.privsep.vif_plug --privsep_sock_path /tmp/tmp8j6j0ldg/privsep.sock
               └─249642 /usr/bin/python3 /bin/privsep-helper --config-file /etc/nova/nova.conf --config-file /etc/nova/nova-compute.conf --config-dir /etc/nova/nova.conf.d --privsep_context os_brick.privileged.default --privsep_sock_path /tmp/tmpney6i09_/privsep.sock

Feb 02 11:47:18 compute-0 systemd[1]: Started libcrun container.

● libpod-f7a8a7d56deab4622312f47586dba2f4884d78e46b23bfb226b684327aab18c9.scope - libcrun container
     Loaded: loaded (/run/systemd/transient/libpod-f7a8a7d56deab4622312f47586dba2f4884d78e46b23bfb226b684327aab18c9.scope; transient)
  Transient: yes
    Drop-In: /run/systemd/transient/libpod-f7a8a7d56deab4622312f47586dba2f4884d78e46b23bfb226b684327aab18c9.scope.d
             └─dep.conf
     Active: active (running) since Mon 2026-02-02 11:41:07 UTC; 43min ago
         IO: 34.7M read, 22.2M written
      Tasks: 11 (limit: 4096)
     Memory: 451.6M (peak: 499.9M)
        CPU: 39.664s
     CGroup: /machine.slice/libpod-f7a8a7d56deab4622312f47586dba2f4884d78e46b23bfb226b684327aab18c9.scope
             └─container
               ├─155008 dumb-init --single-child -- kolla_start
               ├─155011 "neutron-ovn-metadata-agent (/usr/bin/python3 /usr/bin/neutron-ovn-metadata-agent)"
               ├─155570 "neutron-ovn-metadata-agent (/usr/bin/python3 /usr/bin/neutron-ovn-metadata-agent)"
               ├─155575 /usr/bin/python3 /bin/privsep-helper --config-file /etc/neutron/neutron.conf --config-dir /etc/neutron.conf.d --privsep_context neutron.privileged.namespace_cmd --privsep_sock_path /tmp/tmpsq_kuak1/privsep.sock
               ├─245329 /usr/bin/python3 /bin/privsep-helper --config-file /etc/neutron/neutron.conf --config-dir /etc/neutron.conf.d --privsep_context neutron.privileged.default --privsep_sock_path /tmp/tmpk6kanhyo/privsep.sock
               └─245414 /usr/bin/python3 /bin/privsep-helper --config-file /etc/neutron/neutron.conf --config-dir /etc/neutron.conf.d --privsep_context neutron.privileged.link_cmd --privsep_sock_path /tmp/tmpuet8pyad/privsep.sock

Feb 02 12:10:28 compute-0 podman[270443]: 2026-02-02 12:10:28.736564458 +0000 UTC m=+0.058036572 container died 98eaaf6bdeb7a14b4bc96b00e3a062ea09a704a29c1d377b9ab71c74f55ea809 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=neutron-haproxy-ovnmeta-efa24ae1-9962-44ca-882a-8d146356fcca, io.buildah.version=1.41.3, org.label-schema.vendor=CentOS, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20260127, org.label-schema.name=CentOS Stream 9 Base Image, tcib_managed=true, org.label-schema.license=GPLv2, tcib_build_tag=b85d0548925081ae8c6bdd697658cec4, org.label-schema.schema-version=1.0)
Feb 02 12:10:28 compute-0 podman[270443]: 2026-02-02 12:10:28.783688693 +0000 UTC m=+0.105160807 container cleanup 98eaaf6bdeb7a14b4bc96b00e3a062ea09a704a29c1d377b9ab71c74f55ea809 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=neutron-haproxy-ovnmeta-efa24ae1-9962-44ca-882a-8d146356fcca, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, tcib_managed=true, io.buildah.version=1.41.3, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, tcib_build_tag=b85d0548925081ae8c6bdd697658cec4, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20260127)
Feb 02 12:10:28 compute-0 podman[270480]: 2026-02-02 12:10:28.862784686 +0000 UTC m=+0.051100100 container remove 98eaaf6bdeb7a14b4bc96b00e3a062ea09a704a29c1d377b9ab71c74f55ea809 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=neutron-haproxy-ovnmeta-efa24ae1-9962-44ca-882a-8d146356fcca, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, tcib_build_tag=b85d0548925081ae8c6bdd697658cec4, org.label-schema.build-date=20260127, tcib_managed=true, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2)
Feb 02 12:12:27 compute-0 podman[272457]: 2026-02-02 12:12:27.029788769 +0000 UTC m=+0.048821267 container create 02a6c1f23f4427298a1f170a49b434ef7ecd02774ca000a52f312d4bc0bcd093 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=neutron-haproxy-ovnmeta-c59f5e49-0a3a-410a-8325-47d3dec9f7b5, org.label-schema.schema-version=1.0, tcib_build_tag=b85d0548925081ae8c6bdd697658cec4, tcib_managed=true, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, maintainer=OpenStack Kubernetes Operator team, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.41.3, org.label-schema.build-date=20260127)
Feb 02 12:12:27 compute-0 podman[272457]: 2026-02-02 12:12:27.005532435 +0000 UTC m=+0.024564943 image pull 19964fda6b912d3d57e21b0bcc221725d936e513025030cb508474fe04b06af8 quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified
Feb 02 12:12:27 compute-0 podman[272457]: 2026-02-02 12:12:27.117220166 +0000 UTC m=+0.136252684 container init 02a6c1f23f4427298a1f170a49b434ef7ecd02774ca000a52f312d4bc0bcd093 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=neutron-haproxy-ovnmeta-c59f5e49-0a3a-410a-8325-47d3dec9f7b5, tcib_managed=true, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, tcib_build_tag=b85d0548925081ae8c6bdd697658cec4, maintainer=OpenStack Kubernetes Operator team, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.41.3, org.label-schema.build-date=20260127, org.label-schema.schema-version=1.0)
Feb 02 12:12:27 compute-0 podman[272457]: 2026-02-02 12:12:27.122755115 +0000 UTC m=+0.141787613 container start 02a6c1f23f4427298a1f170a49b434ef7ecd02774ca000a52f312d4bc0bcd093 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=neutron-haproxy-ovnmeta-c59f5e49-0a3a-410a-8325-47d3dec9f7b5, tcib_managed=true, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.41.3, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20260127, tcib_build_tag=b85d0548925081ae8c6bdd697658cec4)
Feb 02 12:13:32 compute-0 podman[273380]: 2026-02-02 12:13:32.78854001 +0000 UTC m=+0.049799623 container died 02a6c1f23f4427298a1f170a49b434ef7ecd02774ca000a52f312d4bc0bcd093 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=neutron-haproxy-ovnmeta-c59f5e49-0a3a-410a-8325-47d3dec9f7b5, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, tcib_managed=true, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20260127, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, io.buildah.version=1.41.3, tcib_build_tag=b85d0548925081ae8c6bdd697658cec4)
Feb 02 12:13:32 compute-0 podman[273380]: 2026-02-02 12:13:32.835816455 +0000 UTC m=+0.097076058 container cleanup 02a6c1f23f4427298a1f170a49b434ef7ecd02774ca000a52f312d4bc0bcd093 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=neutron-haproxy-ovnmeta-c59f5e49-0a3a-410a-8325-47d3dec9f7b5, org.label-schema.build-date=20260127, tcib_build_tag=b85d0548925081ae8c6bdd697658cec4, tcib_managed=true, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team)
Feb 02 12:13:32 compute-0 podman[273422]: 2026-02-02 12:13:32.902597725 +0000 UTC m=+0.044573953 container remove 02a6c1f23f4427298a1f170a49b434ef7ecd02774ca000a52f312d4bc0bcd093 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=neutron-haproxy-ovnmeta-c59f5e49-0a3a-410a-8325-47d3dec9f7b5, io.buildah.version=1.41.3, org.label-schema.build-date=20260127, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, tcib_managed=true, maintainer=OpenStack Kubernetes Operator team, org.label-schema.vendor=CentOS, tcib_build_tag=b85d0548925081ae8c6bdd697658cec4, org.label-schema.schema-version=1.0)

● session-1.scope - Session 1 of User zuul
     Loaded: loaded (/run/systemd/transient/session-1.scope; transient)
  Transient: yes
     Active: active (abandoned) since Mon 2026-02-02 10:52:58 UTC; 1h 31min ago
         IO: 0B read, 0B written
      Tasks: 1
     Memory: 17.1M (peak: 39.0M)
        CPU: 1min 20.333s
     CGroup: /user.slice/user-1000.slice/session-1.scope
             └─4519 /usr/bin/python3

Feb 02 10:55:44 np0005604943.novalocal sudo[7371]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Feb 02 10:55:44 np0005604943.novalocal python3[7373]: ansible-ansible.legacy.stat Invoked with path=/etc/ci/env/networking-info.yml follow=False get_checksum=True checksum_algorithm=sha1 get_md5=False get_mime=True get_attributes=True
Feb 02 10:55:44 np0005604943.novalocal sudo[7371]: pam_unix(sudo:session): session closed for user root
Feb 02 10:55:44 np0005604943.novalocal sudo[7444]:     zuul : PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-nyxmpvmyjbxsydjthdlqnzljnsmtyyqz ; OS_CLOUD=vexxhost /usr/bin/python3'
Feb 02 10:55:44 np0005604943.novalocal sudo[7444]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Feb 02 10:55:44 np0005604943.novalocal python3[7446]: ansible-ansible.legacy.copy Invoked with dest=/etc/ci/env/networking-info.yml owner=root group=root mode=0644 src=/home/zuul/.ansible/tmp/ansible-tmp-1770029744.3708994-267-192545410168167/source _original_basename=tmpzt68unfw follow=False checksum=e4c393ff94986f8a93327c2207a14275aca333c2 backup=False force=True unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None remote_src=None local_follow=None seuser=None serole=None selevel=None setype=None attributes=None
Feb 02 10:55:44 np0005604943.novalocal sudo[7444]: pam_unix(sudo:session): session closed for user root
Feb 02 10:56:45 np0005604943.novalocal sshd-session[4318]: Received disconnect from 38.102.83.114 port 60470:11: disconnected by user
Feb 02 10:56:45 np0005604943.novalocal sshd-session[4318]: Disconnected from user zuul 38.102.83.114 port 60470
Feb 02 10:56:45 np0005604943.novalocal sshd-session[4305]: pam_unix(sshd:session): session closed for user zuul

● session-20.scope - Session 20 of User ceph-admin
     Loaded: loaded (/run/systemd/transient/session-20.scope; transient)
  Transient: yes
     Active: active (running) since Mon 2026-02-02 11:31:16 UTC; 53min ago
         IO: 0B read, 0B written
      Tasks: 2
     Memory: 1.1M (peak: 1.3M)
        CPU: 114ms
     CGroup: /user.slice/user-42477.slice/session-20.scope
             ├─76636 "sshd-session: ceph-admin [priv]"
             └─76657 "sshd-session: ceph-admin"

Feb 02 11:31:16 compute-0 systemd[1]: Started Session 20 of User ceph-admin.

● session-22.scope - Session 22 of User ceph-admin
     Loaded: loaded (/run/systemd/transient/session-22.scope; transient)
  Transient: yes
     Active: active (running) since Mon 2026-02-02 11:31:16 UTC; 53min ago
         IO: 0B read, 0B written
      Tasks: 2
     Memory: 1.1M (peak: 4.2M)
        CPU: 161ms
     CGroup: /user.slice/user-42477.slice/session-22.scope
             ├─76656 "sshd-session: ceph-admin [priv]"
             └─76660 "sshd-session: ceph-admin@notty"

Feb 02 11:31:16 compute-0 systemd[1]: Started Session 22 of User ceph-admin.
Feb 02 11:31:16 compute-0 sudo[76661]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Feb 02 11:31:16 compute-0 sudo[76661]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Feb 02 11:31:16 compute-0 sudo[76661]: pam_unix(sudo:session): session closed for user root

● session-23.scope - Session 23 of User ceph-admin
     Loaded: loaded (/run/systemd/transient/session-23.scope; transient)
  Transient: yes
     Active: active (running) since Mon 2026-02-02 11:31:16 UTC; 53min ago
         IO: 0B read, 0B written
      Tasks: 2
     Memory: 1.1M (peak: 6.1M)
        CPU: 168ms
     CGroup: /user.slice/user-42477.slice/session-23.scope
             ├─76686 "sshd-session: ceph-admin [priv]"
             └─76689 "sshd-session: ceph-admin@notty"

Feb 02 11:31:16 compute-0 systemd[1]: Started Session 23 of User ceph-admin.
Feb 02 11:31:16 compute-0 sudo[76690]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/4548a36b-7cdc-5e3e-a814-4e1571be1fae/cephadm.ed5a13ad26f7f55dd30e9b63855e4e581fd86973bec1d21a12ed0bb26af19c8b --timeout 895 check-host --expect-hostname compute-0
Feb 02 11:31:16 compute-0 sudo[76690]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Feb 02 11:31:16 compute-0 sudo[76690]: pam_unix(sudo:session): session closed for user root

● session-24.scope - Session 24 of User ceph-admin
     Loaded: loaded (/run/systemd/transient/session-24.scope; transient)
  Transient: yes
     Active: active (running) since Mon 2026-02-02 11:31:16 UTC; 53min ago
         IO: 0B read, 0B written
      Tasks: 2
     Memory: 1.1M (peak: 4.1M)
        CPU: 126ms
     CGroup: /user.slice/user-42477.slice/session-24.scope
             ├─76715 "sshd-session: ceph-admin [priv]"
             └─76718 "sshd-session: ceph-admin@notty"

Feb 02 11:31:16 compute-0 systemd[1]: Started Session 24 of User ceph-admin.
Feb 02 11:31:16 compute-0 sudo[76719]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/ls /var/lib/ceph/4548a36b-7cdc-5e3e-a814-4e1571be1fae/cephadm.ed5a13ad26f7f55dd30e9b63855e4e581fd86973bec1d21a12ed0bb26af19c8b
Feb 02 11:31:16 compute-0 sudo[76719]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Feb 02 11:31:16 compute-0 sudo[76719]: pam_unix(sudo:session): session closed for user root

● session-25.scope - Session 25 of User ceph-admin
     Loaded: loaded (/run/systemd/transient/session-25.scope; transient)
  Transient: yes
     Active: active (running) since Mon 2026-02-02 11:31:17 UTC; 53min ago
         IO: 0B read, 0B written
      Tasks: 2
     Memory: 1.1M (peak: 4.2M)
        CPU: 114ms
     CGroup: /user.slice/user-42477.slice/session-25.scope
             ├─76744 "sshd-session: ceph-admin [priv]"
             └─76747 "sshd-session: ceph-admin@notty"

Feb 02 11:31:17 compute-0 systemd[1]: Started Session 25 of User ceph-admin.
Feb 02 11:31:17 compute-0 sudo[76748]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/mkdir -p /var/lib/ceph/4548a36b-7cdc-5e3e-a814-4e1571be1fae
Feb 02 11:31:17 compute-0 sudo[76748]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Feb 02 11:31:17 compute-0 sudo[76748]: pam_unix(sudo:session): session closed for user root

● session-26.scope - Session 26 of User ceph-admin
     Loaded: loaded (/run/systemd/transient/session-26.scope; transient)
  Transient: yes
     Active: active (running) since Mon 2026-02-02 11:31:17 UTC; 53min ago
         IO: 0B read, 0B written
      Tasks: 2
     Memory: 1.2M (peak: 4.4M)
        CPU: 131ms
     CGroup: /user.slice/user-42477.slice/session-26.scope
             ├─76773 "sshd-session: ceph-admin [priv]"
             └─76776 "sshd-session: ceph-admin@notty"

Feb 02 11:31:17 compute-0 systemd[1]: Started Session 26 of User ceph-admin.
Feb 02 11:31:17 compute-0 sudo[76777]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/mkdir -p /tmp/cephadm-4548a36b-7cdc-5e3e-a814-4e1571be1fae/var/lib/ceph/4548a36b-7cdc-5e3e-a814-4e1571be1fae
Feb 02 11:31:17 compute-0 sudo[76777]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Feb 02 11:31:17 compute-0 sudo[76777]: pam_unix(sudo:session): session closed for user root

● session-27.scope - Session 27 of User ceph-admin
     Loaded: loaded (/run/systemd/transient/session-27.scope; transient)
  Transient: yes
     Active: active (running) since Mon 2026-02-02 11:31:17 UTC; 53min ago
         IO: 0B read, 0B written
      Tasks: 2
     Memory: 1.1M (peak: 3.9M)
        CPU: 154ms
     CGroup: /user.slice/user-42477.slice/session-27.scope
             ├─76802 "sshd-session: ceph-admin [priv]"
             └─76805 "sshd-session: ceph-admin@notty"

Feb 02 11:31:17 compute-0 systemd[1]: Started Session 27 of User ceph-admin.
Feb 02 11:31:17 compute-0 sudo[76806]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/touch /tmp/cephadm-4548a36b-7cdc-5e3e-a814-4e1571be1fae/var/lib/ceph/4548a36b-7cdc-5e3e-a814-4e1571be1fae/cephadm.ed5a13ad26f7f55dd30e9b63855e4e581fd86973bec1d21a12ed0bb26af19c8b.new
Feb 02 11:31:17 compute-0 sudo[76806]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Feb 02 11:31:17 compute-0 sudo[76806]: pam_unix(sudo:session): session closed for user root

● session-28.scope - Session 28 of User ceph-admin
     Loaded: loaded (/run/systemd/transient/session-28.scope; transient)
  Transient: yes
     Active: active (running) since Mon 2026-02-02 11:31:18 UTC; 53min ago
         IO: 0B read, 0B written
      Tasks: 2
     Memory: 1.2M (peak: 3.9M)
        CPU: 148ms
     CGroup: /user.slice/user-42477.slice/session-28.scope
             ├─76831 "sshd-session: ceph-admin [priv]"
             └─76834 "sshd-session: ceph-admin@notty"

Feb 02 11:31:18 compute-0 systemd[1]: Started Session 28 of User ceph-admin.
Feb 02 11:31:18 compute-0 sudo[76835]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/chown -R ceph-admin /tmp/cephadm-4548a36b-7cdc-5e3e-a814-4e1571be1fae
Feb 02 11:31:18 compute-0 sudo[76835]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Feb 02 11:31:18 compute-0 sudo[76835]: pam_unix(sudo:session): session closed for user root

● session-29.scope - Session 29 of User ceph-admin
     Loaded: loaded (/run/systemd/transient/session-29.scope; transient)
  Transient: yes
     Active: active (running) since Mon 2026-02-02 11:31:18 UTC; 53min ago
         IO: 0B read, 0B written
      Tasks: 2
     Memory: 1.2M (peak: 4.2M)
        CPU: 143ms
     CGroup: /user.slice/user-42477.slice/session-29.scope
             ├─76860 "sshd-session: ceph-admin [priv]"
             └─76863 "sshd-session: ceph-admin@notty"

Feb 02 11:31:18 compute-0 systemd[1]: Started Session 29 of User ceph-admin.
Feb 02 11:31:18 compute-0 sudo[76864]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/chmod 644 /tmp/cephadm-4548a36b-7cdc-5e3e-a814-4e1571be1fae/var/lib/ceph/4548a36b-7cdc-5e3e-a814-4e1571be1fae/cephadm.ed5a13ad26f7f55dd30e9b63855e4e581fd86973bec1d21a12ed0bb26af19c8b.new
Feb 02 11:31:18 compute-0 sudo[76864]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Feb 02 11:31:18 compute-0 sudo[76864]: pam_unix(sudo:session): session closed for user root

● session-30.scope - Session 30 of User ceph-admin
     Loaded: loaded (/run/systemd/transient/session-30.scope; transient)
  Transient: yes
     Active: active (running) since Mon 2026-02-02 11:31:18 UTC; 53min ago
         IO: 0B read, 1016.0K written
      Tasks: 2
     Memory: 2.2M (peak: 3.5M)
        CPU: 161ms
     CGroup: /user.slice/user-42477.slice/session-30.scope
             ├─76889 "sshd-session: ceph-admin [priv]"
             └─76892 "sshd-session: ceph-admin@notty"

Feb 02 11:31:18 compute-0 systemd[1]: Started Session 30 of User ceph-admin.

● session-31.scope - Session 31 of User ceph-admin
     Loaded: loaded (/run/systemd/transient/session-31.scope; transient)
  Transient: yes
     Active: active (running) since Mon 2026-02-02 11:31:20 UTC; 53min ago
         IO: 0B read, 0B written
      Tasks: 2
     Memory: 1.2M (peak: 5.7M)
        CPU: 175ms
     CGroup: /user.slice/user-42477.slice/session-31.scope
             ├─76916 "sshd-session: ceph-admin [priv]"
             └─76919 "sshd-session: ceph-admin@notty"

Feb 02 11:31:20 compute-0 systemd[1]: Started Session 31 of User ceph-admin.
Feb 02 11:31:20 compute-0 sudo[76920]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/mv -Z /tmp/cephadm-4548a36b-7cdc-5e3e-a814-4e1571be1fae/var/lib/ceph/4548a36b-7cdc-5e3e-a814-4e1571be1fae/cephadm.ed5a13ad26f7f55dd30e9b63855e4e581fd86973bec1d21a12ed0bb26af19c8b.new /var/lib/ceph/4548a36b-7cdc-5e3e-a814-4e1571be1fae/cephadm.ed5a13ad26f7f55dd30e9b63855e4e581fd86973bec1d21a12ed0bb26af19c8b
Feb 02 11:31:20 compute-0 sudo[76920]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Feb 02 11:31:20 compute-0 sudo[76920]: pam_unix(sudo:session): session closed for user root

● session-32.scope - Session 32 of User ceph-admin
     Loaded: loaded (/run/systemd/transient/session-32.scope; transient)
  Transient: yes
     Active: active (running) since Mon 2026-02-02 11:31:20 UTC; 53min ago
         IO: 1.8M read, 176.1M written
      Tasks: 2
     Memory: 6.7M (peak: 55.5M)
        CPU: 3min 5.786s
     CGroup: /user.slice/user-42477.slice/session-32.scope
             ├─76945 "sshd-session: ceph-admin [priv]"
             └─76948 "sshd-session: ceph-admin@notty"

Feb 02 12:24:10 compute-0 podman[288152]: 2026-02-02 12:24:10.01373312 +0000 UTC m=+0.023877447 image pull 524f3da276461682bec27427fb8a63b5139c40ad4185939aede197474a6817b3 quay.io/ceph/ceph@sha256:1228c3d05e45fbc068a8c33614e4409b6dac688bcc77369b06009b5830fa8d86
Feb 02 12:24:10 compute-0 podman[288152]: 2026-02-02 12:24:10.130612926 +0000 UTC m=+0.140757253 container init 9f68c28eb02450c3fcbb3c231eb7d9458d1ce4bca9b4288ef57689472ceb0ddf (image=quay.io/ceph/ceph@sha256:1228c3d05e45fbc068a8c33614e4409b6dac688bcc77369b06009b5830fa8d86, name=modest_banach, org.label-schema.schema-version=1.0, CEPH_REF=tentacle, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, CEPH_SHA1=69f84cc2651aa259a15bc192ddaabd3baba07489, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.41.3, org.label-schema.build-date=20251030)
Feb 02 12:24:10 compute-0 podman[288152]: 2026-02-02 12:24:10.138662451 +0000 UTC m=+0.148806748 container start 9f68c28eb02450c3fcbb3c231eb7d9458d1ce4bca9b4288ef57689472ceb0ddf (image=quay.io/ceph/ceph@sha256:1228c3d05e45fbc068a8c33614e4409b6dac688bcc77369b06009b5830fa8d86, name=modest_banach, org.label-schema.build-date=20251030, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=tentacle, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=69f84cc2651aa259a15bc192ddaabd3baba07489, ceph=True, org.label-schema.schema-version=1.0, io.buildah.version=1.41.3)
Feb 02 12:24:10 compute-0 podman[288152]: 2026-02-02 12:24:10.143490899 +0000 UTC m=+0.153635216 container attach 9f68c28eb02450c3fcbb3c231eb7d9458d1ce4bca9b4288ef57689472ceb0ddf (image=quay.io/ceph/ceph@sha256:1228c3d05e45fbc068a8c33614e4409b6dac688bcc77369b06009b5830fa8d86, name=modest_banach, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=tentacle, OSD_FLAVOR=default, io.buildah.version=1.41.3, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20251030, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=69f84cc2651aa259a15bc192ddaabd3baba07489)
Feb 02 12:24:11 compute-0 podman[288152]: 2026-02-02 12:24:11.036501625 +0000 UTC m=+1.046645942 container died 9f68c28eb02450c3fcbb3c231eb7d9458d1ce4bca9b4288ef57689472ceb0ddf (image=quay.io/ceph/ceph@sha256:1228c3d05e45fbc068a8c33614e4409b6dac688bcc77369b06009b5830fa8d86, name=modest_banach, org.label-schema.vendor=CentOS, ceph=True, io.buildah.version=1.41.3, org.label-schema.build-date=20251030, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=tentacle, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=69f84cc2651aa259a15bc192ddaabd3baba07489, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0)
Feb 02 12:24:11 compute-0 podman[288152]: 2026-02-02 12:24:11.089633501 +0000 UTC m=+1.099777798 container remove 9f68c28eb02450c3fcbb3c231eb7d9458d1ce4bca9b4288ef57689472ceb0ddf (image=quay.io/ceph/ceph@sha256:1228c3d05e45fbc068a8c33614e4409b6dac688bcc77369b06009b5830fa8d86, name=modest_banach, FROM_IMAGE=quay.io/ceUnit apparmor.service could not be found.
Unit apt-daily.service could not be found.
ntos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=tentacle, org.label-schema.build-date=20251030, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, ceph=True, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, io.buildah.version=1.41.3, CEPH_SHA1=69f84cc2651aa259a15bc192ddaabd3baba07489)
Feb 02 12:24:11 compute-0 sudo[288052]: pam_unix(sudo:session): session closed for user root
Feb 02 12:24:11 compute-0 sudo[288270]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/ls /etc/sysctl.d
Feb 02 12:24:11 compute-0 sudo[288270]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Feb 02 12:24:11 compute-0 sudo[288270]: pam_unix(sudo:session): session closed for user root

● session-54.scope - Session 54 of User zuul
     Loaded: loaded (/run/systemd/transient/session-54.scope; transient)
  Transient: yes
     Active: active (running) since Mon 2026-02-02 12:24:05 UTC; 45s ago
         IO: 60.5M read, 77.5M written
      Tasks: 31
     Memory: 535.8M (peak: 590.3M)
        CPU: 2min 13.876s
     CGroup: /user.slice/user-1000.slice/session-54.scope
             ├─287512 "sshd-session: zuul [priv]"
             ├─287564 "sshd-session: zuul@notty"
             ├─287565 sudo bash -c "rm -rf /var/tmp/sos-osp && mkdir /var/tmp/sos-osp && sos report --batch --all-logs --tmp-dir=/var/tmp/sos-osp  -p container,openstack_edpm,system,storage,virt"
             ├─287589 /usr/bin/python3 -s /sbin/sos report --batch --all-logs --tmp-dir=/var/tmp/sos-osp -p container,openstack_edpm,system,storage,virt
             ├─293711 timeout 15s turbostat --debug sleep 10
             ├─294058 timeout 300s systemctl status --all
             ├─294059 systemctl status --all
             ├─294062 timeout 300s ceph osd blocklist ls --format json-pretty
             ├─294063 /usr/bin/python3 -s /usr/bin/ceph osd blocklist ls --format json-pretty
             ├─294083 timeout 300s semanage interface -l
             └─294084 /usr/bin/python3 -EsI /usr/sbin/semanage interface -l

Feb 02 12:24:05 compute-0 systemd[1]: Started Session 54 of User zuul.
Feb 02 12:24:06 compute-0 sudo[287565]:     zuul : PWD=/home/zuul ; USER=root ; COMMAND=/bin/bash -c 'rm -rf /var/tmp/sos-osp && mkdir /var/tmp/sos-osp && sos report --batch --all-logs --tmp-dir=/var/tmp/sos-osp  -p container,openstack_edpm,system,storage,virt'
Feb 02 12:24:06 compute-0 sudo[287565]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Feb 02 12:24:12 compute-0 ovs-vsctl[288324]: ovs|00001|db_ctl_base|ERR|no key "dpdk-init" in Open_vSwitch record "." column other_config
Feb 02 12:24:43 compute-0 ovs-appctl[292799]: ovs|00001|daemon_unix|WARN|/var/run/openvswitch/ovs-monitor-ipsec.pid: open: No such file or directory
Feb 02 12:24:43 compute-0 ovs-appctl[292804]: ovs|00001|daemon_unix|WARN|/var/run/openvswitch/ovs-monitor-ipsec.pid: open: No such file or directory

● auditd.service - Security Auditing Service
     Loaded: loaded (/usr/lib/systemd/system/auditd.service; enabled; preset: enabled)
     Active: active (running) since Mon 2026-02-02 10:52:39 UTC; 1h 32min ago
       Docs: man:auditd(8)
             https://github.com/linux-audit/audit-documentation
   Main PID: 704 (auditd)
         IO: 0B read, 23.9M written
      Tasks: 4 (limit: 48560)
     Memory: 16.6M (peak: 17.1M)
        CPU: 4.961s
     CGroup: /system.slice/auditd.service
             ├─704 /sbin/auditd
             └─706 /usr/sbin/sedispatch

Feb 02 10:52:39 localhost augenrules[724]: failure 1
Feb 02 10:52:39 localhost augenrules[724]: pid 704
Feb 02 10:52:39 localhost augenrules[724]: rate_limit 0
Feb 02 10:52:39 localhost augenrules[724]: backlog_limit 8192
FeUnit auto-cpufreq.service could not be found.
Unit autofs.service could not be found.
b 02 10:52:39 localhost augenrules[724]: lost 0
Feb 02 10:52:39 localhost augenrules[724]: backlog 3
Feb 02 10:52:39 localhost augenrules[724]: backlog_wait_time 60000
Feb 02 10:52:39 localhost augenrules[724]: backlog_wait_time_actual 0
Feb 02 10:52:39 localhost systemd[1]: Started Security Auditing Service.
Feb 02 11:44:28 compute-0 auditd[704]: Audit daemon rotating log files

○ auth-rpcgss-module.service - Kernel Module supporting RPCSEC_GSS
     Loaded: loaded (/usr/lib/systemd/system/auth-rpcgss-module.service; static)
     Active: inactive (dead)
  Condition: start condition failed at Mon 2026-02-02 10:52:38 UTC; 1h 32min ago

○ blk-availability.service - Availability of block devices
     Loaded: loaded (/usr/lib/systemd/system/blk-availability.service; disabled; preset: disabled)
     Active: inactive (dead)

● ceph-4548a36b-7cdc-5e3e-a814-4e1571be1fae@crash.compute-0.service - Ceph crash.compute-0 for 4548a36b-7cdc-5e3e-a814-4e1571be1fae
     Loaded: loaded (/etc/systemd/system/ceph-4548a36b-7cdc-5e3e-a814-4e1571be1fae@.service; enabled; preset: disabled)
     Active: active (running) since Mon 2026-02-02 11:31:31 UTC; 53min ago
   Main PID: 80408 (conmon)
         IO: 0B read, 172.0K written
      Tasks: 3 (limit: 48560)
     Memory: 7.7M (peak: 24.4M)
        CPU: 547ms
     CGroup: /system.slice/system-ceph\x2d4548a36b\x2d7cdc\x2d5e3e\x2da814\x2d4e1571be1fae.slice/ceph-4548a36b-7cdc-5e3e-a814-4e1571be1fae@crash.compute-0.service
             ├─libpod-payload-61b0483497dcb2f7f58b3253d407131b9b241fdd95d0c46b378db53811c0e7e0
             │ ├─80410 /run/podman-init -- /usr/bin/ceph-crash -n client.crash.compute-0
             │ └─80412 /usr/bin/python3 -s /usr/bin/ceph-crash -n client.crash.compute-0
             └─runtime
               └─80408 /usr/bin/conmon --api-version 1 -c 61b0483497dcb2f7f58b3253d407131b9b241fdd95d0c46b378db53811c0e7e0 -u 61b0483497dcb2f7f58b3253d407131b9b241fdd95d0c46b378db53811c0e7e0 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/61b0483497dcb2f7f58b3253d407131b9b241fdd95d0c46b378db53811c0e7e0/userdata -p /run/containers/storage/overlay-containers/61b0483497dcb2f7f58b3253d407131b9b241fdd95d0c46b378db53811c0e7e0/userdata/pidfile -n ceph-4548a36b-7cdc-5e3e-a814-4e1571be1fae-crash-compute-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/61b0483497dcb2f7f58b3253d407131b9b241fdd95d0c46b378db53811c0e7e0 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/61b0483497dcb2f7f58b3253d407131b9b241fdd95d0c46b378db53811c0e7e0/userdata/oci-log --conmon-pidfile /run/ceph-4548a36b-7cdc-5e3e-a814-4e1571be1fae@crash.compute-0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 61b0483497dcb2f7f58b3253d407131b9b241fdd95d0c46b378db53811c0e7e0

Feb 02 11:31:31 compute-0 systemd[1]: Started Ceph crash.compute-0 for 4548a36b-7cdc-5e3e-a814-4e1571be1fae.
Feb 02 11:31:31 compute-0 ceph-4548a36b-7cdc-5e3e-a814-4e1571be1fae-crash-compute-0[80408]: INFO:ceph-crash:pinging cluster to exercise our key
Feb 02 11:31:31 compute-0 ceph-4548a36b-7cdc-5e3e-a814-4e1571be1fae-crash-compute-0[80408]: 2026-02-02T11:31:31.404+0000 7fed15abd640 -1 auth: unable to find a keyring on /etc/ceph/ceph.client.admin.keyring,/etc/ceph/ceph.keyring,/etc/ceph/keyring,/etc/ceph/keyring.bin: (2) No such file or directory
Feb 02 11:31:31 compute-0 ceph-4548a36b-7cdc-5e3e-a814-4e1571be1fae-crash-compute-0[80408]: 2026-02-02T11:31:31.404+0000 7fed15abd640 -1 AuthRegistry(0x7fed10052930) no keyring found at /etc/ceph/ceph.client.admin.keyring,/etc/ceph/ceph.keyring,/etc/ceph/keyring,/etc/ceph/keyring.bin, disabling cephx
Feb 02 11:31:31 compute-0 ceph-4548a36b-7cdc-5e3e-a814-4e1571be1fae-crash-compute-0[80408]: 2026-02-02T11:31:31.408+0000 7fed15abd640 -1 auth: unable to find a keyring on /etc/ceph/ceph.client.admin.keyring,/etc/ceph/ceph.keyring,/etc/ceph/keyring,/etc/ceph/keyring.bin: (2) No such file or directory
Feb 02 11:31:31 compute-0 ceph-4548a36b-7cdc-5e3e-a814-4e1571be1fae-crash-compute-0[80408]: 2026-02-02T11:31:31.408+0000 7fed15abd640 -1 AuthRegistry(0x7fed15abbfe0) no keyring found at /etc/ceph/ceph.client.admin.keyring,/etc/ceph/ceph.keyring,/etc/ceph/keyring,/etc/ceph/keyring.bin, disabling cephx
Feb 02 11:31:31 compute-0 ceph-4548a36b-7cdc-5e3e-a814-4e1571be1fae-crash-compute-0[80408]: 2026-02-02T11:31:31.409+0000 7fed0f7fe640 -1 monclient(hunting): handle_auth_bad_method server allowed_methods [2] but i only support [1]
Feb 02 11:31:31 compute-0 ceph-4548a36b-7cdc-5e3e-a814-4e1571be1fae-crash-compute-0[80408]: 2026-02-02T11:31:31.410+0000 7fed15abd640 -1 monclient: authenticate NOTE: no keyring found; disabled cephx authentication
Feb 02 11:31:31 compute-0 ceph-4548a36b-7cdc-5e3e-a814-4e1571be1fae-crash-compute-0[80408]: [errno 13] RADOS permission denied (error connecting to the cluster)
Feb 02 11:31:31 compute-0 ceph-4548a36b-7cdc-5e3e-a814-4e1571be1fae-crash-compute-0[80408]: INFO:ceph-crash:monitoring path /var/lib/ceph/crash, delay 600s

● ceph-4548a36b-7cdc-5e3e-a814-4e1571be1fae@mds.cephfs.compute-0.mldrue.service - Ceph mds.cephfs.compute-0.mldrue for 4548a36b-7cdc-5e3e-a814-4e1571be1fae
     Loaded: loaded (/etc/systemd/system/ceph-4548a36b-7cdc-5e3e-a814-4e1571be1fae@.service; enabled; preset: disabled)
     Active: active (running) since Mon 2026-02-02 11:32:43 UTC; 52min ago
   Main PID: 95494 (conmon)
         IO: 0B read, 201.5K written
      Tasks: 31 (limit: 48560)
     Memory: 26.5M (peak: 27.3M)
        CPU: 5.221s
     CGroup: /system.slice/system-ceph\x2d4548a36b\x2d7cdc\x2d5e3e\x2da814\x2d4e1571be1fae.slice/ceph-4548a36b-7cdc-5e3e-a814-4e1571be1fae@mds.cephfs.compute-0.mldrue.service
             ├─libpod-payload-2fd942fa7e2a914f99986cfceb092b754d16f7807e8ec91f52763c7e36375409
             │ ├─95496 /run/podman-init -- /usr/bin/ceph-mds -n mds.cephfs.compute-0.mldrue -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ └─95505 /usr/bin/ceph-mds -n mds.cephfs.compute-0.mldrue -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             └─runtime
               └─95494 /usr/bin/conmon --api-version 1 -c 2fd942fa7e2a914f99986cfceb092b754d16f7807e8ec91f52763c7e36375409 -u 2fd942fa7e2a914f99986cfceb092b754d16f7807e8ec91f52763c7e36375409 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/2fd942fa7e2a914f99986cfceb092b754d16f7807e8ec91f52763c7e36375409/userdata -p /run/containers/storage/overlay-containers/2fd942fa7e2a914f99986cfceb092b754d16f7807e8ec91f52763c7e36375409/userdata/pidfile -n ceph-4548a36b-7cdc-5e3e-a814-4e1571be1fae-mds-cephfs-compute-0-mldrue --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/2fd942fa7e2a914f99986cfceb092b754d16f7807e8ec91f52763c7e36375409 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/2fd942fa7e2a914f99986cfceb092b754d16f7807e8ec91f52763c7e36375409/userdata/oci-log --conmon-pidfile /run/ceph-4548a36b-7cdc-5e3e-a814-4e1571be1fae@mds.cephfs.compute-0.mldrue.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 2fd942fa7e2a914f99986cfceb092b754d16f7807e8ec91f52763c7e36375409

Feb 02 12:24:15 compute-0 ceph-mds[95505]: mds.cephfs.compute-0.mldrue asok_command: dump loads {prefix=dump loads} (starting...)
Feb 02 12:24:15 compute-0 ceph-mds[95505]: mds.cephfs.compute-0.mldrue asok_command: dump tree {prefix=dump tree,root=/} (starting...)
Feb 02 12:24:15 compute-0 ceph-mds[95505]: mds.cephfs.compute-0.mldrue asok_command: dump_blocked_ops {prefix=dump_blocked_ops} (starting...)
Feb 02 12:24:15 compute-0 ceph-mds[95505]: mds.cephfs.compute-0.mldrue asok_command: dump_historic_ops {prefix=dump_historic_ops} (starting...)
Feb 02 12:24:15 compute-0 ceph-mds[95505]: mds.cephfs.compute-0.mldrue asok_command: dump_historic_ops_by_duration {prefix=dump_historic_ops_by_duration} (starting...)
Feb 02 12:24:15 compute-0 ceph-mds[95505]: mds.cephfs.compute-0.mldrue asok_command: dump_ops_in_flight {prefix=dump_ops_in_flight} (starting...)
Feb 02 12:24:15 compute-0 ceph-mds[95505]: mds.cephfs.compute-0.mldrue asok_command: get subtrees {prefix=get subtrees} (starting...)
Feb 02 12:24:16 compute-0 ceph-mds[95505]: mds.cephfs.compute-0.mldrue asok_command: ops {prefix=ops} (starting...)
Feb 02 12:24:16 compute-0 ceph-mds[95505]: mds.cephfs.compute-0.mldrue asok_command: session ls {prefix=session ls} (starting...)
Feb 02 12:24:17 compute-0 ceph-mds[95505]: mds.cephfs.compute-0.mldrue asok_command: status {prefix=status} (starting...)

● ceph-4548a36b-7cdc-5e3e-a814-4e1571be1fae@mgr.compute-0.twcemg.service - Ceph mgr.compute-0.twcemg for 4548a36b-7cdc-5e3e-a814-4e1571be1fae
     Loaded: loaded (/etc/systemd/system/ceph-4548a36b-7cdc-5e3e-a814-4e1571be1fae@.service; enabled; preset: disabled)
     Active: active (running) since Mon 2026-02-02 11:30:53 UTC; 53min ago
   Main PID: 75554 (conmon)
         IO: 0B read, 2.9M written
      Tasks: 144 (limit: 48560)
     Memory: 530.1M (peak: 531.3M)
        CPU: 56.361s
     CGroup: /system.slice/system-ceph\x2d4548a36b\x2d7cdc\x2d5e3e\x2da814\x2d4e1571be1fae.slice/ceph-4548a36b-7cdc-5e3e-a814-4e1571be1fae@mgr.compute-0.twcemg.service
             ├─libpod-payload-e108912e9f7d348f2198e623df71406e8b579a2b0383329619c90634f2e480e3
             │ ├─75556 /run/podman-init -- /usr/bin/ceph-mgr -n mgr.compute-0.twcemg -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ └─75558 /usr/bin/ceph-mgr -n mgr.compute-0.twcemg -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             └─runtime
               └─75554 /usr/bin/conmon --api-version 1 -c e108912e9f7d348f2198e623df71406e8b579a2b0383329619c90634f2e480e3 -u e108912e9f7d348f2198e623df71406e8b579a2b0383329619c90634f2e480e3 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/e108912e9f7d348f2198e623df71406e8b579a2b0383329619c90634f2e480e3/userdata -p /run/containers/storage/overlay-containers/e108912e9f7d348f2198e623df71406e8b579a2b0383329619c90634f2e480e3/userdata/pidfile -n ceph-4548a36b-7cdc-5e3e-a814-4e1571be1fae-mgr-compute-0-twcemg --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/e108912e9f7d348f2198e623df71406e8b579a2b0383329619c90634f2e480e3 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/e108912e9f7d348f2198e623df71406e8b579a2b0383329619c90634f2e480e3/userdata/oci-log --conmon-pidfile /run/ceph-4548a36b-7cdc-5e3e-a814-4e1571be1fae@mgr.compute-0.twcemg.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg e108912e9f7d348f2198e623df71406e8b579a2b0383329619c90634f2e480e3

Feb 02 12:24:42 compute-0 ceph-mgr[75558]: [pg_autoscaler INFO root] Pool 'default.rgw.meta' root_id -1 using 1.2718141564107572e-07 of space, bias 4.0, pg target 0.00015261769876929088 quantized to 32 (current 32)
Feb 02 12:24:42 compute-0 ceph-mgr[75558]: log_channel(cluster) log [DBG] : pgmap v2099: 305 pgs: 305 active+clean; 271 MiB data, 633 MiB used, 59 GiB / 60 GiB avail
Feb 02 12:24:43 compute-0 ceph-mgr[75558]: log_channel(audit) log [DBG] : from='client.19606 -' entity='client.admin' cmd=[{"prefix": "pg dump", "target": ["mon-mgr", ""]}]: dispatch
Feb 02 12:24:44 compute-0 ceph-mgr[75558]: log_channel(audit) log [DBG] : from='client.19610 -' entity='client.admin' cmd=[{"prefix": "pg stat", "target": ["mon-mgr", ""]}]: dispatch
Feb 02 12:24:44 compute-0 ceph-mgr[75558]: log_channel(cluster) log [DBG] : pgmap v2100: 305 pgs: 305 active+clean; 271 MiB data, 633 MiB used, 59 GiB / 60 GiB avail
Feb 02 12:24:46 compute-0 ceph-mgr[75558]: log_channel(audit) log [DBG] : from='client.19624 -' entity='client.admin' cmd=[{"prefix": "device ls", "target": ["mon-mgr", ""], "format": "json-pretty"}]: dispatch
Feb 02 12:24:46 compute-0 ceph-mgr[75558]: log_channel(cluster) log [DBG] : pgmap v2101: 305 pgs: 305 active+clean; 271 MiB data, 633 MiB used, 59 GiB / 60 GiB avail
Feb 02 12:24:48 compute-0 ceph-mgr[75558]: log_channel(cluster) log [DBG] : pgmap v2102: 305 pgs: 305 active+clean; 271 MiB data, 633 MiB used, 59 GiB / 60 GiB avail
Feb 02 12:24:49 compute-0 ceph-mgr[75558]: log_channel(audit) log [DBG] : from='client.19634 -' entity='client.admin' cmd=[{"prefix": "fs status", "target": ["mon-mgr", ""], "format": "json-pretty"}]: dispatch
Feb 02 12:24:50 compute-0 ceph-mgr[75558]: log_channel(audit) log [DBG] : from='client.19640 -' entity='client.admin' cmd=[{"prefix": "osd blocked-by", "target": ["mon-mgr", ""], "format": "json-pretty"}]: dispatch

● ceph-4548a36b-7cdc-5e3e-a814-4e1571be1fae@mon.compute-0.service - Ceph mon.compute-0 for 4548a36b-7cdc-5e3e-a814-4e1571be1fae
     Loaded: loaded (/etc/systemd/system/ceph-4548a36b-7cdc-5e3e-a814-4e1571be1fae@.service; enabled; preset: disabled)
     Active: active (running) since Mon 2026-02-02 11:30:51 UTC; 53min ago
   Main PID: 75267 (conmon)
         IO: 2.0M read, 414.2M written
      Tasks: 27 (limit: 48560)
     Memory: 108.5M (peak: 119.1M)
        CPU: 45.351s
     CGroup: /system.slice/system-ceph\x2d4548a36b\x2d7cdc\x2d5e3e\x2da814\x2d4e1571be1fae.slice/ceph-4548a36b-7cdc-5e3e-a814-4e1571be1fae@mon.compute-0.service
             ├─libpod-payload-fffb528e321276c0c3873a515991dd68a346504106615c6708abcd60682ada04
             │ ├─75269 /run/podman-init -- /usr/bin/ceph-mon -n mon.compute-0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false
             │ └─75271 /usr/bin/ceph-mon -n mon.compute-0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false
             └─runtime
               └─75267 /usr/bin/conmon --api-version 1 -c fffb528e321276c0c3873a515991dd68a346504106615c6708abcd60682ada04 -u fffb528e321276c0c3873a515991dd68a346504106615c6708abcd60682ada04 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/fffb528e321276c0c3873a515991dd68a346504106615c6708abcd60682ada04/userdata -p /run/containers/storage/overlay-containers/fffb528e321276c0c3873a515991dd68a346504106615c6708abcd60682ada04/userdata/pidfile -n ceph-4548a36b-7cdc-5e3e-a814-4e1571be1fae-mon-compute-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/fffb528e321276c0c3873a515991dd68a346504106615c6708abcd60682ada04 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/fffb528e321276c0c3873a515991dd68a346504106615c6708abcd60682ada04/userdata/oci-log --conmon-pidfile /run/ceph-4548a36b-7cdc-5e3e-a814-4e1571be1fae@mon.compute-0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg fffb528e321276c0c3873a515991dd68a346504106615c6708abcd60682ada04

Feb 02 12:24:48 compute-0 ceph-mon[75271]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "fs ls", "format": "json-pretty"} v 0)
Feb 02 12:24:48 compute-0 ceph-mon[75271]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/3465098829' entity='client.admin' cmd={"prefix": "fs ls", "format": "json-pretty"} : dispatch
Feb 02 12:24:49 compute-0 ceph-mon[75271]: from='client.? 192.168.122.100:0/3465098829' entity='client.admin' cmd={"prefix": "fs ls", "format": "json-pretty"} : dispatch
Feb 02 12:24:49 compute-0 ceph-mon[75271]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "mds stat", "format": "json-pretty"} v 0)
Feb 02 12:24:49 compute-0 ceph-mon[75271]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/3703384699' entity='client.admin' cmd={"prefix": "mds stat", "format": "json-pretty"} : dispatch
Feb 02 12:24:50 compute-0 ceph-mon[75271]: pgmap v2102: 305 pgs: 305 active+clean; 271 MiB data, 633 MiB used, 59 GiB / 60 GiB avail
Feb 02 12:24:50 compute-0 ceph-mon[75271]: from='client.19634 -' entity='client.admin' cmd=[{"prefix": "fs status", "target": ["mon-mgr", ""], "format": "json-pretty"}]: dispatch
Feb 02 12:24:50 compute-0 ceph-mon[75271]: from='client.? 192.168.122.100:0/3703384699' entity='client.admin' cmd={"prefix": "mds stat", "format": "json-pretty"} : dispatch
Feb 02 12:24:50 compute-0 ceph-mon[75271]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "mon dump", "format": "json-pretty"} v 0)
Feb 02 12:24:50 compute-0 ceph-mon[75271]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/462666043' entity='client.admin' cmd={"prefix": "mon dump", "format": "json-pretty"} : dispatch

● ceph-4548a36b-7cdc-5e3e-a814-4e1571be1fae@osd.0.service - Ceph osd.0 for 4548a36b-7cdc-5e3e-a814-4e1571be1fae
     Loaded: loaded (/etc/systemd/system/ceph-4548a36b-7cdc-5e3e-a814-4e1571be1fae@.service; enabled; preset: disabled)
     Active: active (running) since Mon 2026-02-02 11:31:53 UTC; 52min ago
   Main PID: 86140 (conmon)
         IO: 566.1M read, 8.6G written
      Tasks: 61 (limit: 48560)
     Memory: 969.2M (peak: 1.3G)
        CPU: 49.010s
     CGroup: /system.slice/system-ceph\x2d4548a36b\x2d7cdc\x2d5e3e\x2da814\x2d4e1571be1fae.slice/ceph-4548a36b-7cdc-5e3e-a814-4e1571be1fae@osd.0.service
             ├─libpod-payload-409c17664cc02736f3933d20ee5231e4ffe9782f6c64da4adfb864281bfbf962
             │ ├─86142 /run/podman-init -- /usr/bin/ceph-osd -n osd.0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --osd-objectstore=bluestore
             │ └─86144 /usr/bin/ceph-osd -n osd.0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --osd-objectstore=bluestore
             └─runtime
               └─86140 /usr/bin/conmon --api-version 1 -c 409c17664cc02736f3933d20ee5231e4ffe9782f6c64da4adfb864281bfbf962 -u 409c17664cc02736f3933d20ee5231e4ffe9782f6c64da4adfb864281bfbf962 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/409c17664cc02736f3933d20ee5231e4ffe9782f6c64da4adfb864281bfbf962/userdata -p /run/containers/storage/overlay-containers/409c17664cc02736f3933d20ee5231e4ffe9782f6c64da4adfb864281bfbf962/userdata/pidfile -n ceph-4548a36b-7cdc-5e3e-a814-4e1571be1fae-osd-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/409c17664cc02736f3933d20ee5231e4ffe9782f6c64da4adfb864281bfbf962 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/409c17664cc02736f3933d20ee5231e4ffe9782f6c64da4adfb864281bfbf962/userdata/oci-log --conmon-pidfile /run/ceph-4548a36b-7cdc-5e3e-a814-4e1571be1fae@osd.0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 409c17664cc02736f3933d20ee5231e4ffe9782f6c64da4adfb864281bfbf962

Feb 02 12:24:30 compute-0 ceph-osd[86144]: do_command 'counter schema' '{prefix=counter schema}' result is 0 bytes
Feb 02 12:24:30 compute-0 ceph-osd[86144]: monclient: tick
Feb 02 12:24:30 compute-0 ceph-osd[86144]: monclient: _check_auth_tickets
Feb 02 12:24:30 compute-0 ceph-osd[86144]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2026-02-02T12:23:58.761778+0000)
Feb 02 12:24:30 compute-0 ceph-osd[86144]: prioritycache tune_memory target: 4294967296 mapped: 219725824 unmapped: 47497216 heap: 267223040 old mem: 2845415832 new mem: 2845415832
Feb 02 12:24:30 compute-0 ceph-osd[86144]: monclient: tick
Feb 02 12:24:30 compute-0 ceph-osd[86144]: monclient: _check_auth_tickets
Feb 02 12:24:30 compute-0 ceph-osd[86144]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2026-02-02T12:23:59.761939+0000)
Feb 02 12:24:30 compute-0 ceph-osd[86144]: prioritycache tune_memory target: 4294967296 mapped: 219537408 unmapped: 47685632 heap: 267223040 old mem: 2845415832 new mem: 2845415832
Feb 02 12:24:30 compute-0 ceph-osd[86144]: do_command 'log dump' '{prefix=log dump}'

● ceph-4548a36b-7cdc-5e3e-a814-4e1571be1fae@osd.1.service - Ceph osd.1 for 4548a36b-7cdc-5e3e-a814-4e1571be1fae
     Loaded: loaded (/etc/systemd/system/ceph-4548a36b-7cdc-5e3e-a814-4e1571be1fae@.service; enabled; preset: disabled)
     Active: active (running) since Mon 2026-02-02 11:31:57 UTC; 52min ago
   Main PID: 87188 (conmon)
         IO: 567.2M read, 8.2G written
      Tasks: 61 (limit: 48560)
     Memory: 889.9M (peak: 1.2G)
        CPU: 48.500s
     CGroup: /system.slice/system-ceph\x2d4548a36b\x2d7cdc\x2d5e3e\x2da814\x2d4e1571be1fae.slice/ceph-4548a36b-7cdc-5e3e-a814-4e1571be1fae@osd.1.service
             ├─libpod-payload-8937a933e50696556fe35cb340d4ec6a1d14744b5d0b97eff32c66fdd41a97e0
             │ ├─87190 /run/podman-init -- /usr/bin/ceph-osd -n osd.1 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --osd-objectstore=bluestore
             │ └─87192 /usr/bin/ceph-osd -n osd.1 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --osd-objectstore=bluestore
             └─runtime
               └─87188 /usr/bin/conmon --api-version 1 -c 8937a933e50696556fe35cb340d4ec6a1d14744b5d0b97eff32c66fdd41a97e0 -u 8937a933e50696556fe35cb340d4ec6a1d14744b5d0b97eff32c66fdd41a97e0 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/8937a933e50696556fe35cb340d4ec6a1d14744b5d0b97eff32c66fdd41a97e0/userdata -p /run/containers/storage/overlay-containers/8937a933e50696556fe35cb340d4ec6a1d14744b5d0b97eff32c66fdd41a97e0/userdata/pidfile -n ceph-4548a36b-7cdc-5e3e-a814-4e1571be1fae-osd-1 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/8937a933e50696556fe35cb340d4ec6a1d14744b5d0b97eff32c66fdd41a97e0 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/8937a933e50696556fe35cb340d4ec6a1d14744b5d0b97eff32c66fdd41a97e0/userdata/oci-log --conmon-pidfile /run/ceph-4548a36b-7cdc-5e3e-a814-4e1571be1fae@osd.1.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 8937a933e50696556fe35cb340d4ec6a1d14744b5d0b97eff32c66fdd41a97e0

Feb 02 12:24:25 compute-0 ceph-osd[87192]: prioritycache tune_memory target: 4294967296 mapped: 214507520 unmapped: 30171136 heap: 244678656 old mem: 2845415832 new mem: 2845415832
Feb 02 12:24:25 compute-0 ceph-osd[87192]: osd.1 502 heartbeat osd_stat(store_statfs(0x4f3aae000/0x0/0x4ffc00000, data 0x3ab1190/0x3d5e000, compress 0x0/0x0/0x0, omap 0x83b39, meta 0x836c4c7), peers [0,2] op hist [])
Feb 02 12:24:25 compute-0 ceph-osd[87192]: monclient: tick
Feb 02 12:24:25 compute-0 ceph-osd[87192]: monclient: _check_auth_tickets
Feb 02 12:24:25 compute-0 ceph-osd[87192]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2026-02-02T12:23:54.418607+0000)
Feb 02 12:24:25 compute-0 ceph-osd[87192]: prioritycache tune_memory target: 4294967296 mapped: 214507520 unmapped: 30171136 heap: 244678656 old mem: 2845415832 new mem: 2845415832
Feb 02 12:24:25 compute-0 ceph-osd[87192]: monclient: tick
Feb 02 12:24:25 compute-0 ceph-osd[87192]: monclient: _check_auth_tickets
Feb 02 12:24:25 compute-0 ceph-osd[87192]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2026-02-02T12:23:55.418787+0000)
Feb 02 12:24:25 compute-0 ceph-osd[87192]: do_command 'log dump' '{prefix=log dump}'

● ceph-4548a36b-7cdc-5e3e-a814-4e1571be1fae@osd.2.service - Ceph osd.2 for 4548a36b-7cdc-5e3e-a814-4e1571be1fae
     Loaded: loaded (/etc/systemd/system/ceph-4548a36b-7cdc-5e3e-a814-4e1571be1fae@.service; enabled; preset: disabled)
     Active: active (running) since Mon 2026-02-02 11:32:01 UTC; 52min ago
   Main PID: 88232 (conmon)
         IO: 552.9M read, 7.7G written
      Tasks: 61 (limit: 48560)
     Memory: 836.0M (peak: 1.1G)
        CPU: 41.817s
     CGroup: /system.slice/system-ceph\x2d4548a36b\x2d7cdc\x2d5e3e\x2da814\x2d4e1571be1fae.slice/ceph-4548a36b-7cdc-5e3e-a814-4e1571be1fae@osd.2.service
             ├─libpod-payload-599aa4410c1f164fe63289f8e57a829ab4dd98bf68fe6ed58d4ebf68bc2ecffd
             │ ├─88234 /run/podman-init -- /usr/bin/ceph-osd -n osd.2 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --osd-objectstore=bluestore
             │ └─88236 /usr/bin/ceph-osd -n osd.2 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --osd-objectstore=bluestore
             └─runtime
               └─88232 /usr/bin/conmon --api-version 1 -c 599aa4410c1f164fe63289f8e57a829ab4dd98bf68fe6ed58d4ebf68bc2ecffd -u 599aa4410c1f164fe63289f8e57a829ab4dd98bf68fe6ed58d4ebf68bc2ecffd -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/599aa4410c1f164fe63289f8e57a829ab4dd98bf68fe6ed58d4ebf68bc2ecffd/userdata -p /run/containers/storage/overlay-containers/599aa4410c1f164fe63289f8e57a829ab4dd98bf68fe6ed58d4ebf68bc2ecffd/userdata/pidfile -n ceph-4548a36b-7cdc-5e3e-a814-4e1571be1fae-osd-2 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/599aa4410c1f164fe63289f8e57a829ab4dd98bf68fe6ed58d4ebf68bc2ecffd --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/599aa4410c1f164fe63289f8e57a829ab4dd98bf68fe6ed58d4ebf68bc2ecffd/userdata/oci-log --conmon-pidfile /run/ceph-4548a36b-7cdc-5e3e-a814-4e1571be1fae@osd.2.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 599aa4410c1f164fe63289f8e57a829ab4dd98bf68fe6ed58d4ebf68bc2ecffd

Feb 02 12:24:21 compute-0 ceph-osd[88236]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2026-02-02T12:23:49.265871+0000)
Feb 02 12:24:21 compute-0 ceph-osd[88236]: prioritycache tune_memory target: 4294967296 mapped: 209534976 unmapped: 33587200 heap: 243122176 old mem: 2845415832 new mem: 2845415832
Feb 02 12:24:21 compute-0 ceph-osd[88236]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Feb 02 12:24:21 compute-0 ceph-osd[88236]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Feb 02 12:24:21 compute-0 ceph-osd[88236]: bluestore.MempoolThread _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 3447375 data_alloc: 218103808 data_used: 23205187
Feb 02 12:24:21 compute-0 ceph-osd[88236]: monclient: tick
Feb 02 12:24:21 compute-0 ceph-osd[88236]: monclient: _check_auth_tickets
Feb 02 12:24:21 compute-0 ceph-osd[88236]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2026-02-02T12:23:50.266022+0000)
Feb 02 12:24:21 compute-0 ceph-osd[88236]: prioritycache tune_memory target: 4294967296 mapped: 209330176 unmapped: 33792000 heap: 243122176 old mem: 2845415832 new mem: 2845415832
Feb 02 12:24:21 compute-0 ceph-osd[88236]: do_command 'log dump' '{prefix=log dump}'

● ceph-4548a36b-7cdc-5e3e-a814-4e1571be1fae@rgw.rgw.compute-0.ctqttb.service - Ceph rgw.rgw.compute-0.ctqttb for 4548a36b-7cdc-5e3e-a814-4e1571be1fae
     Loaded: loaded (/etc/systemd/system/ceph-4548a36b-7cdc-5e3e-a814-4e1571be1fae@.service; enabled; preset: disabled)
     Active: active (running) since Mon 2026-02-02 11:32:42 UTC; 52min ago
   Main PID: 94939 (conmon)
         IO: 0B read, 175.5K written
      Tasks: 614 (limit: 48560)
     Memory: 106.7M (peak: 107.5M)
        CPU: 19.836s
     CGroup: /system.slice/system-ceph\x2d4548a36b\x2d7cdc\x2d5e3e\x2da814\x2d4e1571be1fae.slice/ceph-4548a36b-7cdc-5e3e-a814-4e1571be1fae@rgw.rgw.compute-0.ctqttb.service
             ├─libpod-payload-e99f14eeb73d2875924ac314fa66315a78e51bc47ad69dfc1b512fee8f93bf90
             │ ├─94941 /run/podman-init -- /usr/bin/radosgw -n client.rgw.rgw.compute-0.ctqttb -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --stop-timeout=120
             │ └─94979 /usr/bin/radosgw -n client.rgw.rgw.compute-0.ctqttb -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --stop-timeout=120
             └─runtime
               └─94939 /usr/bin/conmon --api-version 1 -c e99f14eeb73d2875924ac314fa66315a78e51bc47ad69dfc1b512fee8f93bf90 -u e99f14eeb73d2875924ac314fa66315a78e51bc47ad69dfc1b512fee8f93bf90 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/e99f14eeb73d2875924ac314fa66315a78e51bc47ad69dfc1b512fee8f93bf90/userdata -p /run/containers/storage/overlay-containers/e99f14eeb73d2875924ac314fa66315a78e51bc47ad69dfc1b512fee8f93bf90/userdata/pidfile -n ceph-4548a36b-7cdc-5e3e-a814-4e1571be1fae-rgw-rgw-compute-0-ctqttb --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/e99f14eeb73d2875924ac314fa66315a78e51bc47ad69dfc1b512fee8f93bf90 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/e99f14eeb73d2875924ac314fa66315a78e51bc47ad69dfc1b512fee8f93bf90/userdata/oci-log --conmon-pidfile /run/ceph-4548a36b-7cdc-5e3e-a814-4e1571be1fae@rgw.rgw.compute-0.ctqttb.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg e99f14eeb73d2875924ac314fa66315a78e51bc47ad69dfc1b512fee8f93bf90

Feb 02 11:32:42 compute-0 bash[94923]: e99f14eeb73d2875924ac314fa66315a78e51bc47ad69dfc1b512fee8f93bf90
Feb 02 11:32:42 compute-0 systemd[1]: Started Ceph rgw.rgw.compute-0.ctqttb for 4548a36b-7cdc-5e3e-a814-4e1571be1fae.
Feb 02 11:32:53 compute-0 radosgw[94979]: v1 topic migration: starting v1 topic migration..
Feb 02 11:32:53 compute-0 radosgw[94979]: v1 topic migration: finished v1 topic migration
Feb 02 11:32:53 compute-0 radosgw[94979]: framework: beast
Feb 02 11:32:53 compute-0 radosgw[94979]: framework conf key: ssl_certificate, val: config://rgw/cert/$realm/$zone.crt
Feb 02 11:32:53 compute-0 radosgw[94979]: framework conf key: ssl_private_key, val: config://rgw/cert/$realm/$zone.key
Feb 02 11:32:53 compute-0 radosgw[94979]: starting handler: beast
Feb 02 11:32:53 compute-0 radosgw[94979]: set uid:gid to 167:167 (ceph:ceph)
Feb 02 11:32:53 compute-0 radosgw[94979]: mgrc service_daemon_register rgw.14256 metadata {arch=x86_64,ceph_release=tentacle,ceph_version=ceph version 20.2.0 (69f84cc2651aa259a15bc192ddaabd3baba07489) tentacle (stable - RelWithDebInfo),ceph_version_short=20.2.0,container_hostname=compute-0,container_image=quay.io/ceph/ceph@sha256:1228c3d05e45fbc068a8c33614e4409b6dac688bcc77369b06009b5830fa8d86,cpu=AMD EPYC-Rome Processor,distro=centos,distro_description=CentOS Stream 9,distro_version=9,frontend_config#0=beast endpoint=192.168.122.100:8082,frontend_type#0=beast,hostname=compute-0,id=rgw.compute-0.ctqttb,kernel_description=#1 SMP PREEMPT_DYNAMIC Thu Jan 22 12:30:22 UTC 2026,kernel_version=5.14.0-665.el9.x86_64,mem_swap_kb=1048572,mem_total_kb=7864300,num_handles=1,os=Linux,pid=2,realm_id=,realm_name=,zone_id=5146e00b-0a71-4eb0-9811-9c57cf47df3e,zone_name=default,zonegroup_id=cea5580c-4145-447d-b53f-2a852b395f37,zonegroup_name=default}

● ceph-osd-losetup-0.service - Ceph OSD losetup
     Loaded: loaded (/etc/systemd/system/ceph-osd-losetup-0.service; enabled; preset: disabled)
     Active: active (exited) since Mon 2026-02-02 11:30:04 UTC; 54min ago
   Main PID: 72616 (code=exited, status=0/SUCCESS)
        CPU: 21ms

Feb 02 11:30:04 compute-0 systemd[1]: Starting Ceph OSD losetup...
Feb 02 11:30:04 compute-0 bash[72617]: /dev/loop3: [64513]:4194935 (/var/lib/ceph-osd-0.img)
Feb 02 11:30:04 compute-0 systemd[1]: Finished Ceph OSD losetup.

● ceph-osd-losetup-1.service - Ceph OSD losetup
     Loaded: loaded (/etc/systemd/system/ceph-osd-losetup-1.service; enabled; preset: disabled)
     Active: active (exited) since Mon 2026-02-02 11:30:08 UTC; 54min ago
   Main PID: 72985 (code=exited, status=0/SUCCESS)
        CPU: 23ms

Feb 02 11:30:08 compute-0 systemd[1]: Starting Ceph OSD losetup...
Feb 02 11:30:08 compute-0 bash[72986]: /dev/loop4: [64513]:4355818 (/var/lib/ceph-osd-1.img)
Feb 02 11:30:08 compute-0 systemd[1]: Finished Ceph OSD losetup.

● ceph-osd-losetup-2.service - Ceph OSD losetup
     Loaded: loaded (/etc/systemd/system/ceph-osd-losetup-2.service; enabled; preset: disabled)
     Active: active (exited) since Mon 2026-02-02 11:30:12 UTC; 54min ago
   Main PID: 73354 (code=exited, status=0/SUCCESS)
        CPU: 17ms

Feb 02 11:30:12 compute-0 systemd[1]: Starting Ceph OSD losetup...
Feb 02 11:30:12 compute-0 bash[73355]: /dev/loop5: [64513]:4355819 (/var/lib/ceph-osd-2.img)
Feb 02 11:30:12 compute-0 systemd[1]: Finished Ceph OSD losetup.

● chronyd.service - NTP client/server
     Loaded: loaded (/usr/lib/systemd/system/chronyd.service; enabled; preset: enabled)
     Active: active (running) since Mon 2026-02-02 11:28:11 UTC; 56min ago
       Docs: man:chronyd(8)
             man:chrony.conf(5)
   Main PID: 58660 (chronyd)
         IO: 0B read, 0B written
      Tasks: 1 (limit: 48560)
     Memory: 1.0M (peak: 1.9M)
        CPU: 70ms
     CGroup: /system.slice/chronyd.service
             └─58660 /usr/sbin/chronyd -F 2

Feb 02 11:28:11 compute-0 systemd[1]: Starting NTP client/server...
Feb 02 11:28:11 compute-0 chronyd[58660]: chronyd version 4.8 starting (+CMDMON +REFCLOCK +RTC +PRIVDROP +SCFILTER +SIGND +NTS +SECHASH +IPV6 +DEBUG)
Feb 02 11:28:11 compute-0 chronyd[58660]: Frequency -26.447 +/- 0.078 ppm read from /var/lib/chrony/drift
Feb 02 11:28:11 compute-0 chronyd[58660]: Loaded seccomp filter (level 2)
Feb 02 11:28:11 compute-0 systemd[1]: Started NTP client/server.
Feb 02 11:30:21 compute-0 chronyd[58660]: Selected source 149.56.19.163 (pool.ntp.org)

● cloud-config.service - Cloud-init: Config Stage
     Loaded: loaded (/usr/lib/systemd/system/cloud-config.service; enabled; preset: disabled)
     Active: active (exited) since Mon 2026-02-02 10:52:44 UTC; 1h 32min ago
   Main PID: 1006 (code=exited, status=0/SUCCESS)
        CPU: 363ms

Feb 02 10:52:44 np0005604943.novalocal systemd[1]: Starting Cloud-init: Config Stage...
Feb 02 10:52:44 np0005604943.novalocal cloud-init[1186]: Cloud-init v. 24.4-8.el9 running 'modules:config' at Mon, 02 Feb 2026 10:52:44 +0000. Up 9.80 seconds.
Feb 02 10:52:44 np0005604943.novalocal systemd[1]: Finished Cloud-init: Config Stage.

● cloud-final.service - Cloud-init: Final Stage
     Loaded: loaded (/usr/lib/systemd/system/cloud-final.service; enabled; preset: disabled)
     Active: active (exited) since Mon 2026-02-02 10:52:44 UTC; 1h 32min ago
   Main PID: 1229 (code=exited, status=0/SUCCESS)
        CPU: 423ms

Feb 02 10:52:44 np0005604943.novalocal systemd[1]: Starting Cloud-init: Final Stage...
Feb 02 10:52:44 np0005604943.novalocal cloud-init[1339]: Cloud-init v. 24.4-8.el9 running 'modules:final' at Mon, 02 Feb 2026 10:52:44 +0000. Up 10.16 seconds.
Feb 02 10:52:44 np0005604943.novalocal cloud-init[1347]: -----BEGIN SSH HOST KEY FINGERPRINTS-----
Feb 02 10:52:44 np0005604943.novalocal cloud-init[1354]: 3072 SHA256:0URjfKMrpish9/pJR3f9jALpXSET5bz/k7fQQ885suw root@np0005604943.novalocal (RSA)
Feb 02 10:52:44 np0005604943.novalocal cloud-init[1358]: -----END SSH HOST KEY FINGERPRINTS-----
Feb 02 10:52:44 np0005604943.novalocal cloud-init[1359]: #############################################################
Feb 02 10:52:44 np0005604943.novalocal cloud-init[1339]: Cloud-init v. 24.4-8.el9 finished at Mon, 02 Feb 2026 10:52:44 +0000. Datasource DataSourceConfigDrive [net,ver=2][source=/dev/sr0].  Up 10.33 seconds
Feb 02 10:52:44 np0005604943.novalocal systemd[1]: Finished Cloud-init: Final Stage.

● cloud-init-local.service - Cloud-init: Local Stage (pre-network)
     Loaded: loaded (/usr/lib/systemd/system/cloud-init-local.service; enabled; preset: disabled)
     Active: active (exited) since Mon 2026-02-02 10:52:41 UTC; 1h 32min ago
   Main PID: 778 (code=exited, status=0/SUCCESS)
        CPU: 781ms

Feb 02 10:52:39 localhost systemd[1]: Starting Cloud-init: Local Stage (pre-network)...
Feb 02 10:52:40 localhost cloud-init[841]: Cloud-init v. 24.4-8.el9 running 'init-local' at Mon, 02 Feb 2026 10:52:40 +0000. Up 6.07 seconds.
Feb 02 10:52:41 np0005604943.novalocal systemd[1]: Finished Cloud-init: Local Stage (pre-network).

● cloud-init.service - Cloud-init: Network Stage
     Loaded: loaded (/usr/lib/systemd/system/cloud-init.service; enabled; preset: disabled)
     Active: active (exited) since Mon 2026-02-02 10:52:44 UTC; 1h 32min ago
   Main PID: 905 (code=exited, status=0/SUCCESS)
        CPU: 1.013s

Feb 02 10:52:43 np0005604943.novalocal cloud-init[924]: | oo oo.+o.+      |
Feb 02 10:52:43 np0005604943.novalocal cloud-init[924]: |...E.o..oo o     |
Feb 02 10:52:43 np0005604943.novalocal cloud-init[924]: |.o ++  .o.       |
Feb 02 10:52:43 np0005604943.novalocal cloud-init[924]: |o =.ooo.So .     |
Feb 02 10:52:43 np0005604943.novalocal cloud-init[924]: | o ooooo= o .    |
Feb 02 10:52:43 np0005604943.novalocal cloud-init[924]: |   o.+.. o o     |
Feb 02 10:52:43 np0005604943.novalocal cloud-init[924]: |    +     ..o    |
Feb 02 10:52:43 np0005604943.novalocal cloud-init[924]: |         oooo.   |
Feb 02 10:52:43 np0005604943.novalocal cloud-init[924]: +----[SHA256]-----+
Feb 02 10:52:44 np0005604943.novalocal systemd[1]: Finished Cloud-init: Network Stage.

○ cpupower.service - Configure CPU power related settings
     Loaded: loaded (/usr/lib/systemd/system/cpupower.service; disabled; preset: disabled)
     Active: inactive (dead)

● crond.service - Command Scheduler
     Loaded: loaded (/usr/lib/systemd/system/crond.service; enabled; preset: enabled)
     Active: active (running) since Mon 2026-02-02 10:52:44 UTC; 1h 32min ago
   Main PID: 1013 (crond)
         IO: 0B read, 8.0K written
      Tasks: 2 (limit: 48560)
     Memory: 1.5M (peak: 4.9M)
        CPU: 170ms
     CGroup: /system.slice/crond.service
             ├─1013 /usr/sbin/crond -n
             └─7490 /usr/sbin/anacron -s

Feb 02 11:01:01 np0005604943.novalocal anacron[7490]: Will run job `cron.weekly' in 70 min.
Feb 02 11:01:01 np0005604943.novalocal anacron[7490]: Will run job `cron.monthly' in 90 min.
Feb 02 11:01:01 np0005604943.novalocal anacron[7490]: Jobs will be executed sequentially
Feb 02 11:01:01 np0005604943.novalocal CROND[7478]: (root) CMDEND (run-parts /etc/cron.hourly)
Feb 02 11:51:01 compute-0 anacron[7490]: Job `cron.daily' started
Feb 02 11:51:01 compute-0 anacron[7490]: Job `cron.daily' terminated
Feb 02 12:01:01 compute-0 CROND[255634]: (root) CMD (run-parts /etc/cron.hourly)
Feb 02 12:01:01 compute-0 CROND[255633]: (root) CMDEND (run-parts /etc/cron.hourly)
Feb 02 12:11:01 compute-0 anacron[7490]: Job `cron.weekly' started
Feb 02 12:11:01 compute-0 anacron[7490]: Job `cron.weekly' terminated

● dbus-broker.service - D-Bus System Message Bus
     Loaded: loaded (/usr/lib/systemd/system/dbus-broker.service; enabled; preset: enabled)
     Active: active (running) since Mon 2026-02-02 10:52:39 UTC; 1h 32min ago
TriggeredBy: ● dbus.socket
       Docs: man:dbus-broker-launch(1)
   Main PID: 768 (dbus-broker-lau)
         IO: 0B read, 0B written
      Tasks: 2 (limit: 48560)
     Memory: 2.7M (peak: 3.5M)
        CPU: 6.537s
     CGroup: /system.slice/dbus-broker.service
             ├─768 /usr/bin/dbus-broker-launch --scope system --audit
             └─775 dbus-broker --log 4 --controller 9 --machine-id bf0bc0bb03de29b24cba1cc9599cf5d0 --max-bytes 536870912 --max-fdsUnit display-manager.service could not be found.
 4096 --max-matches 131072 --audit

Feb 02 11:25:59 compute-0 dbus-broker-launch[768]: Noticed file-system modification, trigger reload.
Feb 02 11:25:59 compute-0 dbus-broker-launch[768]: Noticed file-system modification, trigger reload.
Feb 02 11:26:38 compute-0 dbus-broker-launch[775]: avc:  op=load_policy lsm=selinux seqno=7 res=1
Feb 02 11:26:46 compute-0 dbus-broker-launch[775]: avc:  op=load_policy lsm=selinux seqno=8 res=1
Feb 02 11:39:17 compute-0 dbus-broker-launch[775]: avc:  op=load_policy lsm=selinux seqno=9 res=1
Feb 02 11:42:39 compute-0 dbus-broker-launch[775]: avc:  op=load_policy lsm=selinux seqno=11 res=1
Feb 02 11:43:19 compute-0 dbus-broker-launch[768]: Noticed file-system modification, trigger reload.
Feb 02 11:43:19 compute-0 dbus-broker-launch[775]: avc:  op=load_policy lsm=selinux seqno=12 res=1
Feb 02 11:43:19 compute-0 dbus-broker-launch[768]: Noticed file-system modification, trigger reload.
Feb 02 11:44:27 compute-0 dbus-broker-launch[775]: avc:  op=load_policy lsm=selinux seqno=13 res=1

○ dd42911bf41e885c3ba4077012f09a191888946e3867784418685a91ce34a059-1f21a3646628ee04.service - /usr/bin/podman healthcheck run dd42911bf41e885c3ba4077012f09a191888946e3867784418685a91ce34a059
     Loaded: loaded (/run/systemd/transient/dd42911bf41e885c3ba4077012f09a191888946e3867784418685a91ce34a059-1f21a3646628ee04.service; transient)
  Transient: yes
     Active: inactive (dead) since Mon 2026-02-02 12:24:45 UTC; 5s ago
   Duration: 109ms
TriggeredBy: ● dd42911bf41e885c3ba4077012f09a191888946e3867784418685a91ce34a059-1f21a3646628ee04.timer
    Process: 293547 ExecStart=/usr/bin/podman healthcheck run dd42911bf41e885c3ba4077012f09a191888946e3867784418685a91ce34a059 (code=exited, status=0/SUCCESS)
   Main PID: 293547 (code=exited, status=0/SUCCESS)
        CPU: 89ms

Feb 02 12:24:45 compute-0 podman[293547]: 2026-02-02 12:24:45.080522176 +0000 UTC m=+0.085193833 container health_status dd42911bf41e885c3ba4077012f09a191888946e3867784418685a91ce34a059 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.name=CentOS Stream 9 Base Image, container_name=ovn_controller, org.label-schema.license=GPLv2, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'EDPM_CONFIG_HASH': 'f753062280449c359ff4c2dce751de4cb0e8717503110c3ea49626eae4ec2b5b-af67468a64521187a3d19b349040e64b8ebe04ca093912f980732f2b3e0883e2-af67468a64521187a3d19b349040e64b8ebe04ca093912f980732f2b3e0883e2-af67468a64521187a3d19b349040e64b8ebe04ca093912f980732f2b3e0883e2'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, org.label-schema.vendor=CentOS, tcib_build_tag=b85d0548925081ae8c6bdd697658cec4, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20260127, org.label-schema.schema-version=1.0, tcib_managed=true, config_id=ovn_controller, io.buildah.version=1.41.3, managed_by=edpm_ansible)

○ dm-event.service - Device-mapper event daemon
     Loaded: loaded (/usr/lib/systemd/system/dm-event.service; static)
     Active: inactive (dead)
TriggeredBy: ● dm-event.socket
       Docs: man:dmeventd(8)

○ dnf-makecache.service - dnf makecache
     Loaded: loaded (/usr/lib/systemd/system/dnf-makecache.service; static)
     Active: inactive (dead) since Mon 2026-02-02 11:12:19 UTC; 1h 12min ago
TriggeredBy: ● dnf-makecache.timer
   Main PID: 30937 (code=exited, status=0/SUCCESS)
        CPU: 24.262s

Feb 02 11:11:55 compute-0 dnf[30937]: NFV SIG OpenvSwitch                              22 MB/s | 461 kB     00:00
Feb 02 11:11:56 compute-0 dnf[30937]: repo-setup-centos-appstream                     132 MB/s |  26 MB     00:00
Feb 02 11:12:01 compute-0 dnf[30937]: repo-setup-centos-baseos                        117 MB/s | 8.9 MB     00:00
Feb 02 11:12:03 compute-0 dnf[30937]: repo-setup-centos-highavailability               22 MB/s | 744 kB     00:00
Feb 02 11:12:03 compute-0 dnf[30937]: repo-setup-centos-powertools                     74 MB/s | 7.6 MB     00:00
Feb 02 11:12:06 compute-0 dnf[30937]: Extra Packages for Enterprise Linux 9 - x86_64   42 MB/s |  20 MB     00:00
Feb 02 11:12:19 compute-0 dnf[30937]: Metadata cache created.
Feb 02 11:12:19 compute-0 systemd[1]: dnf-makecache.service: Deactivated successfully.
Feb 02 11:12:19 compute-0 systemd[1]: Finished dnf makecache.
Feb 02 11:12:19 compute-0 systemd[1]: dnf-makecache.service: Consumed 24.262s CPU time.

○ dracut-cmdline.service - dracut cmdline hook
     Loaded: loaded (/usr/lib/systemd/system/dracut-cmdline.service; static)
     Active: inactive (dead) since Mon 2026-02-02 10:52:37 UTC; 1h 32min ago
   Duration: 1.619s
       Docs: man:dracut-cmdline.service(8)
             man:dracut.bootup(7)
   Main PID: 325 (code=exited, status=0/SUCCESS)
        CPU: 121ms

Feb 02 10:52:36 localhost systemd[1]: Starting dracut cmdline hook...
Feb 02 10:52:36 localhost dracut-cmdline[325]: dracut-9 dracut-057-102.git20250818.el9
Feb 02 10:52:36 localhost dracut-cmdline[325]: Using kernel command line parameters:    BOOT_IMAGE=(hd0,msdos1)/boot/vmlinuz-5.14.0-665.el9.x86_64 root=UUID=822f14ea-6e7e-41df-b0d8-fbe282d9ded8 ro console=ttyS0,115200n8 no_timer_check net.ifnames=0 crashkernel=1G-2G:192M,2G-64G:256M,64G-:512M
Feb 02 10:52:36 localhost systemd[1]: Finished dracut cmdline hook.
Feb 02 10:52:37 localhost systemd[1]: dracut-cmdline.service: Deactivated successfully.
Feb 02 10:52:37 localhost systemd[1]: Stopped dracut cmdline hook.

○ dracut-initqueue.service - dracut initqueue hook
     Loaded: loaded (/usr/lib/systemd/system/dracut-initqueue.service; static)
     Active: inactive (dead) since Mon 2026-02-02 10:52:37 UTC; 1h 32min ago
   Duration: 836ms
       Docs: man:dracut-initqueue.service(8)
             man:dracut.bootup(7)
   Main PID: 500 (code=exited, status=0/SUCCESS)
        CPU: 22ms

Feb 02 10:52:36 localhost systemd[1]: Starting dracut initqueue hook...
Feb 02 10:52:37 localhost systemd[1]: Finished dracut initqueue hook.
Feb 02 10:52:37 localhost systemd[1]: dracut-initqueue.service: Deactivated successfully.
Feb 02 10:52:37 localhost systemd[1]: Stopped dracut initqueue hook.

○ dracut-mount.service - dracut mount hook
     Loaded: loaded (/usr/lib/systemd/system/dracut-mount.service; static)
     Active: inactive (dead) since Mon 2026-02-02 10:52:37 UTC; 1h 32min ago
   Duration: 140ms
       Docs: man:dracut-mount.service(8)
             man:dracut.bootup(7)
   Main PID: 571 (code=exited, status=0/SUCCESS)
        CPU: 10ms

Feb 02 10:52:37 localhost systemd[1]: Starting dracut mount hook...
Feb 02 10:52:37 localhost systemd[1]: Finished dracut mount hook.
Feb 02 10:52:37 localhost systemd[1]: dracut-mount.service: Deactivated successfully.
Feb 02 10:52:37 localhost systemd[1]: Stopped dracut mount hook.

○ dracut-pre-mount.service - dracut pre-mount hook
     Loaded: loaded (/usr/lib/systemd/system/dracut-pre-mount.service; static)
     Active: inactive (dead) since Mon 2026-02-02 10:52:37 UTC; 1h 32min ago
   Duration: 799ms
       Docs: man:dracut-pre-mount.service(8)
             man:dracut.bootup(7)
   Main PID: 548 (code=exited, status=0/SUCCESS)
        CPU: 8ms

Feb 02 10:52:37 localhost systemd[1]: Starting dracut pre-mount hook...
Feb 02 10:52:37 localhost systemd[1]: Finished dracut pre-mount hook.
Feb 02 10:52:37 localhost systemd[1]: dracut-pre-mount.service: Deactivated successfully.
Feb 02 10:52:37 localhost systemd[1]: Stopped dracut pre-mount hook.

○ dracut-pre-pivot.service - dracut pre-pivot and cleanup hook
     Loaded: loaded (/usr/lib/systemd/system/dracut-pre-pivot.service; static)
     Active: inactive (dead) since Mon 2026-02-02 10:52:37 UTC; 1h 32min ago
   Duration: 41ms
       Docs: man:dracut-pre-pivot.service(8)
             man:dracut.bootup(7)
   Main PID: 576 (code=exited, status=0/SUCCESS)
        CPU: 72ms

Feb 02 10:52:37 localhost systemd[1]: Starting dracut pre-pivot and cleanup hook...
Feb 02 10:52:37 localhost systemd[1]: Finished dracut pre-pivot and cleanup hook.
Feb 02 10:52:37 localhost systemd[1]: dracut-pre-pivot.service: Deactivated successfully.
Feb 02 10:52:37 localhost systemd[1]: Stopped dracut pre-pivot and cleanup hook.

○ dracut-pre-trigger.service - dracut pre-trigger hook
     Loaded: loaded (/usr/lib/systemd/system/dracut-pre-trigger.service; static)
     Active: inactive (dead) since Mon 2026-02-02 10:52:37 UTC; 1h 32min ago
   Duration: 1.295s
       Docs: man:dracut-pre-trigger.service(8)
             man:dracut.bootup(7)
   Main PID: 464 (code=exited, status=0/SUCCESS)
        CPU: 14ms

Feb 02 10:52:36 localhost systemd[1]: Starting dracut pre-trigger hook...
Feb 02 10:52:36 localhost systemd[1]: Finished dracut pre-trigger hook.
Feb 02 10:52:37 localhost systemd[1]: dracut-pre-trigger.service: Deactivated successfully.
Feb 02 10:52:37 localhost systemd[1]: Stopped dracut pre-trigger hook.

○ dracut-pre-udev.service - dracut pre-udev hook
     Loaded: loaded (/usr/lib/systemd/system/dracut-pre-udev.service; static)
     Active: inactive (dead) since Mon 2026-02-02 10:52:37 UTC; 1h 32min ago
   Duration: 1.375s
       Docs: man:dracut-pre-udev.service(8)
             man:dracut.bootup(7)
   Main PID: 414 (code=exited, status=0/SUCCESS)
        CPU: 235ms

Feb 02 10:52:36 localhost systemd[1]: Starting dracut pre-udev hook...
Feb 02 10:52:36 localhost rpc.statd[441]: Version 2.5.4 starting
Feb 02 10:52:36 localhost rpc.statd[441]: Initializing NSM state
Feb 02 10:52:36 localhost rpc.idmapd[446]: Setting log level to 0
Feb 02 10:52:36 localhost systemd[1]: Finished dracut pre-udev hook.
Feb 02 10:52:37 localhost rpc.idmapd[446]: exiting on signal 15
Feb 02 10:52:37 localhost systemd[1]: dracut-pre-udev.service: Deactivated successfully.
Feb 02 10:52:37 localhost systemd[1]: Stopped dracut pre-udev hook.

○ dracut-shutdown-onfailure.service - Service executing upon dracut-shutdown failure to perform cleanup
     Loaded: loaded (/usr/lib/systemd/system/dracut-shutdown-onfailure.service; static)
     Active: inactive (dead)
       Docs: man:dracut-shutdown.service(8)

● dracut-shutdown.service - Restore /run/initramfs on shutdown
     Loaded: loaded (/usr/lib/systemd/system/dracut-shutdown.service; static)
     Active: active (exited) since Mon 2026-02-02 10:52:39 UTC; 1h 32min ago
       Docs: man:dracut-shutdown.service(8)
   Main PID: 779 (code=exited, status=0/SUCCESS)
        CPU: 4ms

Feb 02 10:52:39 localhost systemd[1]: Starting Restore /run/initramfs on shutdown...
Feb 02 10:52:39 localhost systemd[1]: Finished Restore /run/initramfs on shutdown.

● edpm-container-shutdown.service - EDPM Container Shutdown
     Loaded: loaded (/etc/systemd/system/edpm-container-shutdown.service; enabled; preset: enabled)
     Active: active (exited) since Mon 2026-02-02 11:28:38 UTC; 56min ago
       Docs: https://github.com/openstack-k8s-operators/docs
   Main PID: 61654 (code=exited, status=0/SUCCESS)
        CPU: 2ms

Feb 02 11:28:38 compute-0 systemd[1]: Starting EDPM Container Shutdown...
Feb 02 11:28:38 compute-0 systemd[1]: Finished EDPM Container Shutdown.

○ edpm_libvirt_guests.service - Suspend libvirt Guests in edpm
     Loaded: loaded (/etc/systemd/system/edpm_libvirt_guests.service; enabled; preset: disabled)
     Active: inactive (dead)
       Docs: man:libvirtd(8)
             https://libvirt.org

● edpm_nova_compute.service - nova_compute container
     Loaded: loaded (/etc/systemd/system/edpm_nova_compute.service; enabled; preset: disabled)
     Active: active (running) since Mon 2026-02-02 11:47:18 UTC; 37min ago
    Process: 238868 ExecStart=/usr/bin/podman start nova_compute (code=exited, status=0/SUCCESS)
   Main PID: 238883 (conmon)
         IO: 0B read, 86.5K written
      Tasks: 1 (limit: 48560)
     Memory: 684.0K (peak: 17.0M)
        CPU: 1.163s
     CGroup: /system.slice/edpm_nova_compute.service
             └─238883 /usr/bin/conmon --api-version 1 -c e8469079de5f6cf853327e20582fc7412a39dd166f8e8fc6edb7e70c21cf9b07 -u e8469079de5f6cf853327e20582fc7412a39dd166f8e8fc6edb7e70c21cf9b07 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/e8469079de5f6cf853327e20582fc7412a39dd166f8e8fc6edb7e70c21cf9b07/userdata -p /run/containers/storage/overlay-containers/e8469079de5f6cf853327e20582fc7412a39dd166f8e8fc6edb7e70c21cf9b07/userdata/pidfile -n nova_compute --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/e8469079de5f6cf853327e20582fc7412a39dd166f8e8fc6edb7e70c21cf9b07 --full-attach -s -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/e8469079de5f6cf853327e20582fc7412a39dd166f8e8fc6edb7e70c21cf9b07/userdata/oci-log --conmon-pidfile /run/nova_compute.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg e8469079de5f6cf853327e20582fc7412a39dd166f8e8fc6edb7e70c21cf9b07

Feb 02 12:24:44 compute-0 nova_compute[238883]: 2026-02-02 12:24:44.500 238887 DEBUG nova.compute.resource_tracker [None req-2db00bf8-a3a7-4a5c-8821-4d324af3ae5a - - - - - -] Total usable vcpus: 8, total allocated vcpus: 0 _report_final_resource_view /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1057[00m
Feb 02 12:24:44 compute-0 nova_compute[238883]: 2026-02-02 12:24:44.500 238887 DEBUG nova.compute.resource_tracker [None req-2db00bf8-a3a7-4a5c-8821-4d324af3ae5a - - - - - -] Final resource view: name=compute-0.ctlplane.example.com phys_ram=7679MB used_ram=512MB phys_disk=59GB used_disk=0GB total_vcpus=8 used_vcpus=0 pci_stats=[] _report_final_resource_view /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1066[00m
Feb 02 12:24:44 compute-0 nova_compute[238883]: 2026-02-02 12:24:44.520 238887 DEBUG oslo_concurrency.processutils [None req-2db00bf8-a3a7-4a5c-8821-4d324af3ae5a - - - - - -] Running cmd (subprocess): ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384[00m
Feb 02 12:24:45 compute-0 nova_compute[238883]: 2026-02-02 12:24:45.064 238887 DEBUG oslo_concurrency.processutils [None req-2db00bf8-a3a7-4a5c-8821-4d324af3ae5a - - - - - -] CMD "ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.544s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422[00m
Feb 02 12:24:45 compute-0 nova_compute[238883]: 2026-02-02 12:24:45.072 238887 DEBUG nova.compute.provider_tree [None req-2db00bf8-a3a7-4a5c-8821-4d324af3ae5a - - - - - -] Inventory has not changed in ProviderTree for provider: 30401227-b88f-415d-9c2d-3119bd1baf61 update_inventory /usr/lib/python3.9/site-packages/nova/compute/provider_tree.py:180[00m
Feb 02 12:24:45 compute-0 nova_compute[238883]: 2026-02-02 12:24:45.094 238887 DEBUG nova.scheduler.client.report [None req-2db00bf8-a3a7-4a5c-8821-4d324af3ae5a - - - - - -] Inventory has not changed for provider 30401227-b88f-415d-9c2d-3119bd1baf61 based on inventory data: {'VCPU': {'total': 8, 'reserved': 0, 'min_unit': 1, 'max_unit': 8, 'step_size': 1, 'allocation_ratio': 4.0}, 'MEMORY_MB': {'total': 7679, 'reserved': 512, 'min_unit': 1, 'max_unit': 7679, 'step_size': 1, 'allocation_ratio': 1.0}, 'DISK_GB': {'total': 59, 'reserved': 1, 'min_unit': 1, 'max_unit': 59, 'step_size': 1, 'allocation_ratio': 0.9}} set_inventory_for_provider /usr/lib/python3.9/site-packages/nova/scheduler/client/report.py:940[00m
Feb 02 12:24:45 compute-0 nova_compute[238883]: 2026-02-02 12:24:45.096 238887 DEBUG nova.compute.resource_tracker [None req-2db00bf8-a3a7-4a5c-8821-4d324af3ae5a - - - - - -] Compute_service record updated for compute-0.ctlplane.example.com:compute-0.ctlplane.example.com _update_available_resource /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:995[00m
Feb 02 12:24:45 compute-0 nova_compute[238883]: 2026-02-02 12:24:45.097 238887 DEBUG oslo_concurrency.lockutils [None req-2db00bf8-a3a7-4a5c-8821-4d324af3ae5a - - - - - -] Lock "compute_resources" "released" by "nova.compute.resource_tracker.ResourceTracker._update_available_resource" :: held 0.686s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423[00m
Feb 02 12:24:48 compute-0 nova_compute[238883]: 2026-02-02 12:24:48.036 238887 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 25 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263[00m
Feb 02 12:24:48 compute-0 nova_compute[238883]: 2026-02-02 12:24:48.388 238887 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 25 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263[00m

● edpm_ovn_controller.service - ovn_controller container
     Loaded: loaded (/etc/systemd/system/edpm_ovn_controller.service; enabled; preset: disabled)
     Active: active (running) since Mon 2026-02-02 11:40:09 UTC; 44min ago
   Main PID: 145056 (conmon)
         IO: 0B read, 144.0K written
      Tasks: 1 (limit: 48560)
     Memory: 696.0K (peak: 19.8M)
        CPU: 257ms
     CGroup: /system.slice/edpm_ovn_controller.service
             └─145056 /usr/bin/conmon --api-version 1 -c dd42911bf41e885c3ba4077012f09a191888946e3867784418685a91ce34a059 -u dd42911bf41e885c3ba4077012f09a191888946e3867784418685a91ce34a059 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/dd42911bf41e885c3ba4077012f09a191888946e3867784418685a91ce34a059/userdata -p /run/containers/storage/overlay-containers/dd42911bf41e885c3ba4077012f09a191888946e3867784418685a91ce34a059/userdata/pidfile -n ovn_controller --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/dd42911bf41e885c3ba4077012f09a191888946e3867784418685a91ce34a059 --full-attach -s -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/dd42911bf41e885c3ba4077012f09a191888946e3867784418685a91ce34a059/userdata/oci-log --conmon-pidfile /run/ovn_controller.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg dd42911bf41e885c3ba4077012f09a191888946e3867784418685a91ce34a059

Feb 02 12:12:26 compute-0 ovn_controller[145056]: 2026-02-02T12:12:26Z|00285|binding|INFO|Setting lport 900e2f84-b3d4-4547-bc57-6f2929841348 up in Southbound
Feb 02 12:12:26 compute-0 ovn_controller[145056]: 2026-02-02T12:12:26Z|00286|binding|INFO|Releasing lport 310ea7d5-de1c-4059-9f23-e1aced8de783 from this chassis (sb_readonly=0)
Feb 02 12:12:30 compute-0 ovn_controller[145056]: 2026-02-02T12:12:30Z|00287|binding|INFO|Releasing lport 310ea7d5-de1c-4059-9f23-e1aced8de783 from this chassis (sb_readonly=0)
Feb 02 12:12:40 compute-0 ovn_controller[145056]: 2026-02-02T12:12:40Z|00072|pinctrl(ovn_pinctrl0)|INFO|DHCPOFFER fa:16:3e:c9:3a:08 10.100.0.10
Feb 02 12:12:40 compute-0 ovn_controller[145056]: 2026-02-02T12:12:40Z|00073|pinctrl(ovn_pinctrl0)|INFO|DHCPACK fa:16:3e:c9:3a:08 10.100.0.10
Feb 02 12:13:00 compute-0 ovn_controller[145056]: 2026-02-02T12:13:00Z|00288|memory_trim|INFO|Detected inactivity (last active 30006 ms ago): trimming memory
Feb 02 12:13:32 compute-0 ovn_controller[145056]: 2026-02-02T12:13:32Z|00289|binding|INFO|Releasing lport 900e2f84-b3d4-4547-bc57-6f2929841348 from this chassis (sb_readonly=0)
Feb 02 12:13:32 compute-0 ovn_controller[145056]: 2026-02-02T12:13:32Z|00290|binding|INFO|Setting lport 900e2f84-b3d4-4547-bc57-6f2929841348 down in Southbound
Feb 02 12:13:32 compute-0 ovn_controller[145056]: 2026-02-02T12:13:32Z|00291|binding|INFO|Removing iface tap900e2f84-b3 ovn-installed in OVS
Feb 02 12:14:12 compute-0 ovn_controller[145056]: 2026-02-02T12:14:12Z|00292|memory_trim|INFO|Detected inactivity (last active 30001 ms ago): trimming memory

● edpm_ovn_metadata_agent.service - ovn_metadata_agent container
     Loaded: loaded (/etc/systemd/system/edpm_ovn_metadata_agent.service; enabled; preset: disabled)
     Active: active (running) since Mon 2026-02-02 11:41:07 UTC; 43min ago
   Main PID: 155006 (conmon)
         IO: 0B read, 112.0K written
      Tasks: 1 (limit: 48560)
     Memory: 720.0K (peak: 19.1M)
        CPU: 342ms
     CGroup: /system.slice/edpm_ovn_metadata_agent.service
             └─155006 /usr/bin/conmon --api-version 1 -c f7a8a7d56deab4622312f47586dba2f4884d78e46b23bfb226b684327aab18c9 -u f7a8a7d56deab4622312f47586dba2f4884d78e46b23bfb226b684327aab18c9 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/f7a8a7d56deab4622312f47586dba2f4884d78e46b23bfb226b684327aab18c9/userdata -p /run/containers/storage/overlay-containers/f7a8a7d56deab4622312f47586dba2f4884d78e46b23bfb226b684327aab18c9/userdata/pidfile -n ovn_metadata_agent --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/f7a8a7d56deab4622312f47586dba2f4884d78e46b23bfb226b684327aab18c9 --full-attach -s -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/f7a8a7d56deab4622312f47586dba2f4884d78e46b23bfb226b684327aab18c9/userdata/oci-log --conmon-pidfile /run/ovn_metadata_agent.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg f7a8a7d56deab4622312f47586dba2f4884d78e46b23bfb226b684327aab18c9

Feb 02 12:21:10 compute-0 ovn_metadata_agent[155006]: 2026-02-02 12:21:10.051 155011 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" "released" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: held 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423[00m
Feb 02 12:22:10 compute-0 ovn_metadata_agent[155006]: 2026-02-02 12:22:10.051 155011 DEBUG oslo_concurrency.lockutils [-] Acquiring lock "_check_child_processes" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404[00m
Feb 02 12:22:10 compute-0 ovn_metadata_agent[155006]: 2026-02-02 12:22:10.052 155011 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" acquired by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409[00m
Feb 02 12:22:10 compute-0 ovn_metadata_agent[155006]: 2026-02-02 12:22:10.052 155011 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" "released" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: held 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423[00m
Feb 02 12:23:10 compute-0 ovn_metadata_agent[155006]: 2026-02-02 12:23:10.052 155011 DEBUG oslo_concurrency.lockutils [-] Acquiring lock "_check_child_processes" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404[00m
Feb 02 12:23:10 compute-0 ovn_metadata_agent[155006]: 2026-02-02 12:23:10.053 155011 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" acquired by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409[00m
Feb 02 12:23:10 compute-0 ovn_metadata_agent[155006]: 2026-02-02 12:23:10.053 155011 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" "released" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: held 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423[00m
Feb 02 12:24:10 compute-0 ovn_metadata_agent[155006]: 2026-02-02 12:24:10.053 155011 DEBUG oslo_concurrency.lockutils [-] Acquiring lock "_check_child_processes" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404[00m
Feb 02 12:24:10 compute-0 ovn_metadata_agent[155006]: 2026-02-02 12:24:10.055 155011 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" acquired by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: waited 0.002s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409[00m
Feb 02 12:24:10 compute-0 ovn_metadata_agent[155006]: 2026-02-02 12:24:10.055 155011 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" "released" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: held 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423[00m

○ emergency.service - Emergency Shell
     Loaded: loaded (/usr/lib/Unit fcoe.service could not be found.
systemd/system/emergency.service; static)
     Active: inactive (dead)
       Docs: man:sulogin(8)

○ f7a8a7d56deab4622312f47586dba2f4884d78e46b23bfb226b684327aab18c9-30b77556ff7903ab.service - /usr/bin/podman healthcheck run f7a8a7d56deab4622312f47586dba2f4884d78e46b23bfb226b684327aab18c9
     Loaded: loaded (/run/systemd/transient/f7a8a7d56deab4622312f47586dba2f4884d78e46b23bfb226b684327aab18c9-30b77556ff7903ab.service; transient)
  Transient: yes
     Active: inactive (dead) since Mon 2026-02-02 12:24:45 UTC; 6s ago
   Duration: 77ms
TriggeredBy: ● f7a8a7d56deab4622312f47586dba2f4884d78e46b23bfb226b684327aab18c9-30b77556ff7903ab.timer
    Process: 293549 ExecStart=/usr/bin/podman healthcheck run f7a8a7d56deab4622312f47586dba2f4884d78e46b23bfb226b684327aab18c9 (code=exited, status=0/SUCCESS)
   Main PID: 293549 (code=exited, status=0/SUCCESS)
        CPU: 75ms

Feb 02 12:24:45 compute-0 podman[293549]: 2026-02-02 12:24:45.04804548 +0000 UTC m=+0.055500851 container health_status f7a8a7d56deab4622312f47586dba2f4884d78e46b23bfb226b684327aab18c9 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=ovn_metadata_agent, health_status=healthy, health_failing_streak=0, health_log=, config_id=ovn_metadata_agent, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, tcib_build_tag=b85d0548925081ae8c6bdd697658cec4, container_name=ovn_metadata_agent, managed_by=edpm_ansible, org.label-schema.license=GPLv2, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20260127, tcib_managed=true, config_data={'cgroupns': 'host', 'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'EDPM_CONFIG_HASH': 'f753062280449c359ff4c2dce751de4cb0e8717503110c3ea49626eae4ec2b5b-af67468a64521187a3d19b349040e64b8ebe04ca093912f980732f2b3e0883e2-af67468a64521187a3d19b349040e64b8ebe04ca093912f980732f2b3e0883e2-af67468a64521187a3d19b349040e64b8ebe04ca093912f980732f2b3e0883e2-0823bd3e096c75f72e4a95820d41b0d4b6a1172bd2892ddb9f29b788a11bc87d'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_metadata_agent', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified', 'net': 'host', 'pid': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/run/openvswitch:/run/openvswitch:z', '/var/lib/openstack/neutron-ovn-metadata-agent:/etc/neutron.conf.d:z', '/run/netns:/run/netns:shared', '/var/lib/kolla/config_files/ovn_metadata_agent.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/neutron:/var/lib/neutron:shared,z', '/var/lib/neutron/ovn_metadata_haproxy_wrapper:/usr/local/bin/haproxy:ro', '/var/lib/neutron/kill_scripts:/etc/neutron/kill_scripts:ro', '/var/lib/openstack/cacerts/neutron-metadata/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_metadata_agent:/openstack:ro,z']})

● getty@tty1.service - Getty on tty1
     Loaded: loaded (/usr/lib/systemd/system/getty@.service; enabled; preset: enabled)
     Active: active (running) since Mon 2026-02-02 10:52:44 UTC; 1h 32min ago
       Docs: man:agetty(8)
             man:systemd-getty-generator(8)
             http://0pointer.de/blog/projects/serial-console.html
   Main PID: 1014 (agetty)
         IO: 0B read, 0B written
      Tasks: 1 (limit: 48560)
     Memory: 256.0K (peak: 732.0K)
        CPU: 7ms
     CGroup: /system.slice/system-getty.slice/getty@tty1.service
             └─1014 /sbin/agetty -o "-p -- \\u" --noclear - linux

Feb 02 10:52:44 np0005604943.novalocal systemd[1]: Started Getty on tty1.

● gssproxy.service - GSSAPI Proxy DaemUnit hv_kvp_daemon.service could not be found.
on
     Loaded: loaded (/usr/lib/systemd/system/gssproxy.service; disabled; preset: disabled)
     Active: active (running) since Mon 2026-02-02 10:52:41 UTC; 1h 32min ago
   Main PID: 876 (gssproxy)
         IO: 0B read, 0B written
      Tasks: 6 (limit: 48560)
     Memory: 1.8M (peak: 3.4M)
        CPU: 12ms
     CGroup: /system.slice/gssproxy.service
             └─876 /usr/sbin/gssproxy -D

Feb 02 10:52:41 np0005604943.novalocal systemd[1]: Starting GSSAPI Proxy Daemon...
Feb 02 10:52:41 np0005604943.novalocal systemd[1]: Started GSSAPI Proxy Daemon.

○ import-state.service - Import network configuration from initramfs
     Loaded: loaded (/usr/lib/systemd/system/import-state.service; enabled; preset: enabled)
     Active: inactive (dead)

○ initrd-cleanup.service - Cleaning Up and Shutting Down Daemons
     Loaded: loaded (/usr/lib/systemd/system/initrd-cleanup.service; static)
     Active: inactive (dead) since Mon 2026-02-02 10:52:37 UTC; 1h 32min ago
   Main PID: 618 (code=exited, status=0/SUCCESS)
        CPU: 12ms

Feb 02 10:52:37 localhost systemd[1]: Starting Cleaning Up and Shutting Down Daemons...
Feb 02 10:52:37 localhost systemd[1]: initrd-cleanup.service: Deactivated successfully.
Feb 02 10:52:37 localhost systemd[1]: Finished Cleaning Up and Shutting Down Daemons.

○ initrd-parse-etc.service - Mountpoints Configured in the Real Root
     Loaded: loaded (/usr/lib/systemd/system/initrd-parse-etc.service; static)
     Active: inactive (dead) since Mon 2026-02-02 10:52:37 UTC; 1h 32min ago
   Main PID: 570 (code=exited, status=0/SUCCESS)
        CPU: 11ms

Feb 02 10:52:37 localhost systemd[1]: Starting Mountpoints Configured in the Real Root...
Feb 02 10:52:37 localhost systemd[1]: initrd-parse-etc.service: Deactivated successfully.
Feb 02 10:52:37 localhost systemd[1]: Finished Mountpoints Configured in the Real Root.

○ initrd-switch-root.service - Switch Root
     Loaded: loaded (/usr/lib/systemd/system/initrd-switch-root.service; static)
     Active: inactive (dead) since Mon 2026-02-02 10:52:38 UTC; 1h 32min ago
   Main PID: 622 (code=exited, status=0/SUCCESS)
        CPU: 8ms

Feb 02 10:52:37 localhost systemd[1]: Starting Switch Root...

○ initrd-udevadm-cleanup-db.service - Cleanup udev Database
     Loaded: loaded (/usr/lib/systemd/system/initrd-udevadm-cleanup-db.service; static)
     Active: inactive (dead) since Mon 2026-02-02 10:52:37 UTC; 1h 32min ago
   Main PID: 620 (code=exited, status=0/SUCCESS)
        CPU: 5ms

Feb 02 10:52:37 localhost systemd[1]: Starting Cleanup udev Database...
Feb 02 10:52:37 localhost systemd[1]: initrd-udevadm-cleanup-db.service: Deactivated successfully.
Feb 02 10:52:37 localhost systemd[1]: Finished Cleanup udev Database.

○ ip6tables.service - IPv6 firewall with ip6tables
     Loaded: loaded (/usr/lib/systemd/system/ip6tables.service; disabled; preset: disabled)
     Active: inactive (dead)

○ iptables.service - IPv4 firewall with iptables
     Loaded: loaded (/usr/lib/systemd/system/iptables.service; disabled; preset: disabled)
     Active: inactive (dead) since Mon 2026-02-02 11:28:45 UTC; 56min ago
   Duration: 36min 5.290s
   Main PID: 781 (code=exited, status=0/SUCCESS)
        CPU: 84ms

Feb 02 10:52:39 localhost systemd[1]: Starting IPv4 firewall with iptables...
Feb 02 10:52:40 localhost iptables.init[781]: iptables: Applying firewall rules: [  OK  ]
Feb 02 10:52:40 localhost systemd[1]: Finished IPv4 firewall with iptables.
Feb 02 11:28:45 compute-0 systemd[1]: Stopping IPv4 firewall with iptables...
Feb 02 11:28:45 compute-0 iptables.init[62902]: iptables: Setting chains to policy ACCEPT: raw mangle filter nat [  OK  ]
Feb 02 11:28:45 compute-0 iptables.init[62902]: iptables: Flushing firewall rules: [  OK  ]
Feb 02 11:28:45 compute-0 systemd[1]: iptables.service: Deactivated successfully.
Feb 02 11:28:45 compute-0 systemd[1]: Stopped IPv4 firewall with iptables.

● irqbalance.service - irqbalance daemon
     Loaded: loaded (/usr/lib/systemd/system/irqbalance.service; enabled; preset: enabled)
     Active: active (running) since Mon 2026-02-02 10:52:39 UTC; 1h 32min ago
       Docs: man:irqbalance(1)
             https://github.com/Irqbalance/irqbalance
   Main PID: 782 (irqbalance)
         IO: 0B read, 0B written
      Tasks: 2 (limit: 48560)
     Memory: 1.1M (peak: 1.5M)
        CPU: 378ms
     CGroup: /system.slice/irqbalance.service
             └─782 /usr/sbin/irqbalance

Feb 02 10:52:50 np0005604943.novalocal irqbalance[782]: Cannot change IRQ 28 affinity: Operation not permitted
Feb 02 10:52:50 np0005604943.novalocal irqbalance[782]: IRQ 28 affinity is now unmanaged
Feb 02 10:52:50 np0005604943.novalocal irqbalance[782]: Cannot change IRQ 34 affinity: Operation not permitted
Feb 02 10:52:50 np0005604943.novalocal irqbalance[782]: IRQ 34 affinity is now unmanaged
Feb 02 10:52:50 np0005604943.novalocal irqbalance[782]: Cannot change IRQ 32 affinity: Operation not permitted
Feb 02 10:52:50 np0005604943.novalocal irqbalance[782]: IRQ 32 affinity is now unmanaged
Feb 02 10:52:50 np0005604943.novalocal irqbalance[782]: Cannot change IRQ 30 affinity: Operation not permitted
Feb 02 10:52:50 np0005604943.novalocal irqbalance[782]: IRQ 30 affinity is now unmanaged
Feb 02 10:52:50 np0005604943.novalocal irqbalance[782]: Cannot change IRQ 29 affinity: Operation not permitted
Feb 02 10:52:50 np0005604943.novalocal irqbalance[782]: IRQ 29 affinity is now unmanaged

○ iscsi-init.service - One time configuration for iscsi.service
     Loaded: loaded (/usr/lib/systemd/system/iscsi-init.service; disabled; preset: disabled)
     Active: inactive (dead)
  Condition: start condition failed at Mon 2026-02-02 11:46:00 UTC; 38min ago

Feb 02 11:45:30 compute-0 systemd[1]: One time configuration for iscsi.service was skipped because of an unmet condition check (ConditionPathExists=!/etc/iscsi/initiatorname.iscsi).
Feb 02 11:46:00 compute-0 systemd[1]: One time configuration for iscsi.service was skipped because of an unmet condition check (ConditionPathExists=!/etc/iscsi/initiatorname.iscsi).

○ iscsi-onboot.service - Special handling of early boot iSCSI sessions
     Loaded: loaded (/usr/lib/systemd/system/iscsi-onboot.service; enabled; preset: enabled)
     Active: inactive (dead)
       Docs: man:iscsiadm(8)
             man:iscsid(8)

● iscsi-shutdown.service - Logout off all iSCSI sessions on shutdown
     Loaded: loaded (/usr/lib/systemd/system/iscsi-shutdown.service; static)
     Active: active (exited) since Mon 2026-02-02 11:45:30 UTC; 39min ago
       Docs: man:iscsid(8)
             man:iscsiadm(8)
   Main PID: 216802 (code=exited, status=0/SUCCESS)
        CPU: 3ms

Feb 02 11:45:30 compute-0 systemd[1]: Starting Logout off all iSCSI sessions on shutdown...
Feb 02 11:45:30 compute-0 systemd[1]: Finished Logout off all iSCSI sessions on shutdown.

○ iscsi-starter.service
     Loaded: loaded (/usr/lib/systemd/system/iscsi-starter.service; enabled; preset: disabled)
     Active: inactive (dead)

○ iscsi.service - Login and scanning of iSCSI devices
     Loaded: loaded (/usr/lib/systemd/system/iscsi.service; indirect; preset: enabled)
     Active: inactive (dead)
       Docs: man:iscsiadm(8)
             man:iscsid(8)

● iscsid.service - Open-iSCSI
     Loaded: loaded (/usr/lib/systemd/system/iscsid.service; enabled; preset: disabled)
     Active: active (running) since Mon 2026-02-02 11:46:00 UTC; 38min ago
TriggeredBy: ● iscsid.socket
       Docs: man:iscsid(8)
             man:iscsiuio(8)
             man:iscsiadm(8)
   Main PID: 223533 (iscsid)
     Status: "Ready to process requests"
         IO: 0B read, 0B written
      Tasks: 1 (limit: 48560)
     Memory: 1.9M (peak: 2.0M)
        CPU: 5ms
     CGroup: /system.slice/iscsid.service
             └─223533 /usr/sbin/iscsid -f

Feb 02 11:46:00 compute-0 systemd[1]: Starting Open-iSCSI...
Feb 02 11:46:00 compute-0 systemd[1]: Started Open-iSCSI.

○ iscsiuio.service - iSCSI UserSpace I/O driver
     Loaded: loaded (/usr/lib/systemd/system/iscsiuio.service; disabled; preset: disabled)
     Active: inactive (dead)
TriggeredBy: ○ iscsiuio.socket
       Docs: man:iscsiuio(8)

● kduUnit lvm2-activation-early.service could not be found.
mp.service - Crash recovery kernel arming
     Loaded: loaded (/usr/lib/systemd/system/kdump.service; enabled; preset: enabled)
     Active: active (exited) since Mon 2026-02-02 10:52:53 UTC; 1h 31min ago
   Main PID: 1012 (code=exited, status=0/SUCCESS)
        CPU: 14.634s

Feb 02 10:52:53 np0005604943.novalocal dracut[1271]: Linked:         0 files
Feb 02 10:52:53 np0005604943.novalocal dracut[1271]: Compared:       0 xattrs
Feb 02 10:52:53 np0005604943.novalocal dracut[1271]: Compared:       0 files
Feb 02 10:52:53 np0005604943.novalocal dracut[1271]: Saved:          0 B
Feb 02 10:52:53 np0005604943.novalocal dracut[1271]: Duration:       0.000221 seconds
Feb 02 10:52:53 np0005604943.novalocal dracut[1271]: *** Hardlinking files done ***
Feb 02 10:52:53 np0005604943.novalocal dracut[1271]: *** Creating initramfs image file '/boot/initramfs-5.14.0-665.el9.x86_64kdump.img' done ***
Feb 02 10:52:53 np0005604943.novalocal kdumpctl[1022]: kdump: kexec: loaded kdump kernel
Feb 02 10:52:53 np0005604943.novalocal kdumpctl[1022]: kdump: Starting kdump: [OK]
Feb 02 10:52:53 np0005604943.novalocal systemd[1]: Finished Crash recovery kernel arming.

● kmod-static-nodes.service - Create List of Static Device Nodes
     Loaded: loaded (/usr/lib/systemd/system/kmod-static-nodes.service; static)
     Active: active (exited) since Mon 2026-02-02 10:52:38 UTC; 1h 32min ago
   Main PID: 674 (code=exited, status=0/SUCCESS)
        CPU: 6ms

Feb 02 10:52:38 localhost systemd[1]: Finished Create List of Static Device Nodes.

● ldconfig.service - Rebuild Dynamic Linker Cache
     Loaded: loaded (/usr/lib/systemd/system/ldconfig.service; static)
     Active: active (exited) since Mon 2026-02-02 10:52:39 UTC; 1h 32min ago
       Docs: man:ldconfig(8)
   Main PID: 697 (code=exited, status=0/SUCCESS)
        CPU: 40ms

Feb 02 10:52:39 localhost systemd[1]: Starting Rebuild Dynamic Linker Cache...
Feb 02 10:52:39 localhost systemd[1]: Finished Rebuild Dynamic Linker Cache.

○ libvirtd.service
     Loaded: masked (Reason: Unit libvirtd.service is masked.)
     Active: inactive (dead)
TriggeredBy: ○ libvirtd.socket
             ○ libvirtd-admin.socket
             ○ libvirtd-ro.socket

○ loadmodules.service - Load legacy module configuration
     Loaded: loaded (/usr/lib/systemd/system/loadmodules.service; enabled; preset: enabled)
     Active: inactive (dead)

○ logrotate.service - Rotate log files
     Loaded: loaded (/usr/lib/systemd/system/logrotate.service; static)
     Active: inactive (dead)
TriggeredBy: ● logrotate.timer
       Docs: man:logrotate(8)
             man:logrotate.conf(5)

○ lvm2-lvmpolld.service - LVM2 poll daemon
     Loaded: loaded (/usr/lib/systemd/system/lvm2-lvmpolld.service; static)
     Active: inactive (dead)
TriggeredBy: ● lvm2-lvmpolld.socket
       Docs: man:lvmpolld(8)

● lvm2-monitor.service - Monitoring of LVM2 mirrors, snapshots etc. using dmeventd or progress polling
     Loaded: loaded (/usr/lib/systemd/system/lvm2-monitor.service; enabled; preset: enabled)
     Active: active (exited) since Mon 2026-02-02 11:24:35 UTC; 1h 0min ago
       Docs: man:dmeventd(8)
             man:lvcreate(8)
             man:lvchange(8)
             man:vgchange(8)
   Main PID: 34169 (code=exited, status=0/SUCCESS)
        CPU: 31ms

Feb 02 11:24:35 compute-0 systemd[1]: Starting Monitoring of LVM2 mirrors, snapshots etc. using dmeventd or progress polling...
Feb 02 11:24:35 compute-0 systemd[1]: Finished Monitoring of LVM2 mirrors, snapshots etc. using dmeventd or progress polling.

○ mdmonitor.service - Software RAID monitoring and management
     Loaded: loaded (/usr/lib/systemd/system/mdmonitor.service; enabled; preset: enabled)
     Active: inactive (dead)

○ microcode.service - Load CPU microcode update
     Loaded: loaded (/usr/lib/systemd/system/microcode.service; enabled; preset: enabled)
     Active: inactive (dead)
  Condition: start condition failed at Mon 2026-02-02 10:52:39 UTC; 1h 32min ago

Feb 02 10:52:39 localhost systemd[1]: Load CPU microcode update was skipped because of an unmet condition check (ConditionPathExists=/sys/devices/system/cpu/microcode/reload).

○ modprobe@configfs.service - Load Kernel Module configfs
     Loaded: loaded (/usr/lib/systemd/system/modprobe@.service; static)
     Active: inactive (dead) since Mon 2026-02-02 10:52:39 UTC; 1h 32min ago
       Docs: man:modprobe(8)
   Main PID: 772 (code=exited, status=0/SUCCESS)
        CPU: 4ms

Feb 02 10:52:39 localhost systemd[1]: Starting Load Kernel Module configfs...
Feb 02 10:52:39 localhost systemd[1]: modprobe@configfs.service: Deactivated successfully.
Feb 02 10:52:39 localhost systemd[1]: Finished Load Kernel Module configfs.

○ modprobe@drm.service - Load Kernel Module drm
     Loaded: loaded (/usr/lib/systemd/system/modprobe@.service; static)
     Active: inactive (dead) since Mon 2026-02-02 10:52:38 UTC; 1h 32min ago
       Docs: man:modprobe(8)
   Main PID: 676 (code=exited, status=0/SUCCESS)
        CPU: 79ms

Feb 02 10:52:38 localhost systemd[1]: modprobe@drm.service: Deactivated successfully.
Feb 02 10:52:38 localhost systemd[1]: Finished Load Kernel Module drm.

○ modprobe@efi_pstore.service - Load Kernel Module efi_pstore
     Loaded: loaded (/usr/lib/systemd/system/modprobe@.service; static)
     Active: inactive (dead) since Mon 2026-02-02 10:52:38 UTC; 1h 32min ago
       Docs: man:modprobe(8)
   Main PID: 677 (code=exited, status=0/SUCCESS)
        CPU: 2ms

Feb 02 10:52:38 localhost systemd[1]: modprobe@efi_pstore.service: Deactivated successfully.
Feb 02 10:52:38 localhost systemd[1]: Finished Load Kernel Module efi_pstore.

○ modprobe@fuse.service - Load Kernel Module fuse
     Loaded: loaded (/usr/lib/systemd/system/modprobe@.service; static)
     Active: inactive (dead) since Mon 2026-02-02 10:52:38 UTC; 1h 32min ago
       Docs: man:modprobe(8)
   Main PID: 678 (code=exited, status=0/SUCCESS)
        CPU: 19ms

Feb 02 10:52:38 localhost systemd[1]: modprobe@fuse.service: Deactivated successfully.
Feb 02 10:52:38 localhost systemd[1]: Finished Load Kernel Module fuse.

● multipathd.service - Device-Mapper Multipath Device Controller
     Loaded: loaded (/usr/lib/systemd/system/multipathd.service; enabled; preset: enabled)
     Active: active (running) since Mon 2026-02-02 11:46:01 UTC; 38min ago
TriggeredBy: ● multipathd.socket
   Main PID: 223815 (multipathd)
     Status: "up"
         IO: 0B read, 0B written
      Tasks: 7
     Memory: 18.4M (peak: 18.8M)
        CPU: 350ms
     CGroup: /system.slice/multipathd.service
             └─223815 /sbin/multipathd -d -s

Feb 02 11:46:01 compute-0 systemd[1]: Starting Device-Mapper Multipath Device Controller...
Feb 02 11:46:01 compute-0 multipathd[223815]: --------start up--------
Feb 02 11:46:01 compute-0 multipathd[223815]: read /etc/multipath.conf
Feb 02 11:46:01 compute-0 multipathd[223815]: path checkers start up
Feb 02 11:46:01 compute-0 systemd[1]: Started Device-Mapper Multipath Device Controller.

○ netns-placeholder.service - Create netns directory
     Loaded: loaded (/etc/systemd/system/netns-placeholder.service; enabled; preset: enabled)
     Active: inactive (dead) since Mon 2026-02-02 11:40:45 UTC; 44min ago
   Main PID: 152171 (code=exited, status=0/SUCCESS)
        CPU: 9ms

Feb 02 11:40:45 compute-0 systemd[1]: Starting Create netns directory...
Feb 02 11:40:45 compute-0 systemd[1]: netns-placeholder.service: Deactivated successfully.
Feb 02 11:40:45 compute-0 systemd[1]: Finished Create netns directory.

○ network.service - LSB: Bring up/down networking
     Loaded: loaded (/etc/rc.d/init.d/network; generated)
     Active: inactive (dead)
       Docs: man:systemd-sysv-generator(8)

● NetworkManager-wait-online.service - Network Manager Wait Online
     Loaded: loaded (/usr/lib/systemd/system/NetworkManager-wait-online.service; enabled; preset: disabled)
     Active: active (exited) since Mon 2026-02-02 11:26:54 UTC; 57min ago
       Docs: man:NetworkManager-wait-online.service(8)
   Main PID: 49112 (code=exited, status=0/SUCCESS)
        CPU: 33ms

Feb 02 11:26:54 compute-0 systemd[1]: Starting Network Manager Wait Online...
Feb 02 11:26:54 compute-0 systemd[1]: Finished Network Manager Wait Online.

● NetworkManager.service - Network Manager
     Loaded: loaded (/usr/lib/systemd/system/NetworkManager.service; enabled; preset: enabled)
    Drop-In: /usr/lib/systemd/system/NetworkManager.service.d
             └─NetworkManager-ovs.conf
     Active: active (running) since Mon 2026-02-02 11:26:54 UTC; 57min ago
       Docs: man:NetworkManager(8)
   Main PID: 49093 (NetworkManager)
         IO: 104.0K read, 334.5K written
      Tasks: 3 (limit: 48560)
     Memory: 5.7M (peak: 7.6M)
        CPU: 33.703s
     CGroup: /system.slice/NetworkManager.service
             └─49093 /usr/sbin/NetworkManager --no-daemon

Feb 02 12:12:25 compute-0 NetworkManager[49093]: <info>  [1770034345.5310] manager: (tap900e2f84-b3): new Open vSwitch Port device (/org/freedesktop/NetworkManager/Devices/142)
Feb 02 12:12:26 compute-0 NetworkManager[49093]: <info>  [1770034346.2783] manager: (tap900e2f84-b3): new Tun device (/org/freedesktop/NetworkManager/Devices/143)
Feb 02 12:12:26 compute-0 NetworkManager[49093]: <info>  [1770034346.3701] device (tap900e2f84-b3): state change: unmanaged -> unavailable (reason 'connection-assumed', managed-type: 'external')
Feb 02 12:12:26 compute-0 NetworkManager[49093]: <info>  [1770034346.3708] device (tap900e2f84-b3): state change: unavailable -> disconnected (reason 'none', managed-type: 'external')
Feb 02 12:12:26 compute-0 NetworkManager[49093]: <info>  [1770034346.4341] manager: (tapc59f5e49-00): new Veth device (/org/freedesktop/NetworkManager/Devices/144)
Feb 02 12:12:26 compute-0 NetworkManager[49093]: <info>  [1770034346.5014] device (tapc59f5e49-00): carrier: link connected
Feb 02 12:12:26 compute-0 NetworkManager[49093]: <info>  [1770034346.6552] manager: (tapc59f5e49-00): new Open vSwitch Port device (/org/freedesktop/NetworkManager/Devices/145)
Feb 02 12:12:30 compute-0 NetworkManager[49093]: <info>  [1770034350.6459] manager: (patch-br-int-to-provnet-b083f27c-a844-4e95-81ce-0ce80ab4824b): new Open vSwitch Port device (/org/freedesktop/NetworkManager/Devices/146)
Feb 02 12:12:30 compute-0 NetworkManager[49093]: <info>  [1770034350.6469] manager: (patch-provnet-b083f27c-a844-4e95-81ce-0ce80ab4824b-to-br-int): new Open vSwitch Port device (/org/freedesktop/NetworkManager/Devices/147)
Feb 02 12:13:32 compute-0 NetworkManager[49093]: <info>  [1770034412.6358] device (tap900e2f84-b3): state change: disconnected -> unmanaged (reason 'unmanaged', managed-type: 'removed')

○ nfs-idmapd.service - NFSv4 ID-name mapping service
     Loaded: loaded (/usr/lib/systemd/system/nfs-idmapd.service; static)
     Active: inactive (dead)
       Docs: man:idmapd(8)

○ nfs-mountd.service - NFS Mount Daemon
     Loaded: loaded (/usr/lib/systemd/system/nfs-mountd.service; static)
     Active: inactive (dead)
       Docs: man:rpc.mountd(8)

○ nfs-server.service - NFS server and services
     Loaded: loaded (/usr/lib/systemd/system/nfs-server.service; disabled; preset: disabled)
     Active: inactive (dead)
       Docs: man:rpc.nfsd(8)
             man:exportfs(8)

○ nfs-utils.service - NFS server and client services
     Loaded: loaded (/usr/lib/systemd/system/nfs-utils.service; static)
     Active: inactive (dead)

○ nfsdcld.service - NFSv4 Client Tracking Daemon
     Loaded: loaded (/usr/lib/systemd/system/nfsdcld.service; static)
     Active: inactive (dead)
       Docs: man:nfsdcld(8)

● nftables.service - Netfilter Tables
     Loaded: loaded (/usr/lib/systemd/system/nftables.service; enabled; preset: disabled)
     Active: active (exited) since Mon 2026-02-02 11:28:47 UTC; 56min ago
       Docs: man:nft(8)
   Main PID: 63292 (code=exited, status=0/SUCCESS)
        CPU: 24ms

Feb 02 11:28:47 compute-0 systemd[1]: Starting Netfilter Tables...
Feb 02 11:28:47 compute-0 systemd[1]: Finished Netfilter Tables.

● nis-domainname.service - Read and set NIS domainname from /etc/sysconfig/network
     Loaded: loaded (/usr/lib/systemd/system/nis-domainname.service; enabled; preset: enabled)
     Active: active (exited) sincUnit ntpd.service could not be found.
Unit ntpdate.service could not be found.
e Mon 2026-02-02 10:52:38 UTC; 1h 32min ago
   Main PID: 679 (code=exited, status=0/SUCCESS)
        CPU: 4ms

Feb 02 10:52:38 localhost systemd[1]: Finished Read and set NIS domainname from /etc/sysconfig/network.

○ nvmefc-boot-connections.service - Auto-connect to subsystems on FC-NVME devices found during boot
     Loaded: loaded (/usr/lib/systemd/system/nvmefc-boot-connections.service; enabled; preset: enabled)
     Active: inactive (dead)

● openvswitch.service - Open vSwitch
     Loaded: loaded (/usr/lib/systemd/system/openvswitch.service; enabled; preset: disabled)
     Active: active (exited) since Mon 2026-02-02 11:26:41 UTC; 58min ago
   Main PID: 47407 (code=exited, status=0/SUCCESS)
        CPU: 1ms

Feb 02 11:26:41 compute-0 systemd[1]: Starting Open vSwitch...
Feb 02 11:26:41 compute-0 systemd[1]: Finished Open vSwitch.

● ovs-delete-transient-ports.service - Open vSwitch Delete Transient Ports
     Loaded: loaded (/usr/lib/systemd/system/ovs-delete-transient-ports.service; static)
     Active: active (exited) since Mon 2026-02-02 11:26:41 UTC; 58min ago
   Main PID: 47345 (code=exited, status=0/SUCCESS)
        CPU: 27ms

Feb 02 11:26:41 compute-0 systemd[1]: Starting Open vSwitch Delete Transient Ports...
Feb 02 11:26:41 compute-0 systemd[1]: Finished Open vSwitch Delete Transient Ports.

● ovs-vswitchd.service - Open vSwitch Forwarding Unit
     Loaded: loaded (/usr/lib/systemd/system/ovs-vswitchd.service; static)
     Active: active (running) since Mon 2026-02-02 11:26:41 UTC; 58min ago
   Main PID: 47398 (ovs-vswitchd)
         IO: 3.4M read, 404.0K written
      Tasks: 13 (limit: 48560)
     Memory: 246.6M (peak: 248.9M)
        CPU: 13.401s
     CGroup: /system.slice/ovs-vswitchd.service
             └─47398 ovs-vswitchd unix:/var/run/openvswitch/db.sock -vconsole:emer -vsyslog:err -vfile:info --mlockall --user openvswitch:hugetlbfs --no-chdir --log-file=/var/log/openvswitch/ovs-vswitchd.log --pidfile=/var/run/openvswitch/ovs-vswitchd.pid --detach

Feb 02 11:26:41 compute-0 systemd[1]: Starting Open vSwitch Forwarding Unit...
Feb 02 11:26:41 compute-0 ovs-ctl[47388]: Inserting openvswitch module [  OK  ]
Feb 02 11:26:41 compute-0 ovs-ctl[47357]: Starting ovs-vswitchd [  OK  ]
Feb 02 11:26:41 compute-0 ovs-ctl[47357]: Enabling remote OVSDB managers [  OK  ]
Feb 02 11:26:41 compute-0 systemd[1]: Started Open vSwitch Forwarding Unit.
Feb 02 11:26:41 compute-0 ovs-vsctl[47406]: ovs|00001|vsctl|INFO|Called as ovs-vsctl --no-wait add Open_vSwitch . external-ids hostname=compute-0

● ovsdb-server.service - Open vSwitch Database Unit
     Loaded: loaded (/usr/lib/systemd/system/ovsdb-server.service; static)
     Active: active (running) since Mon 2026-02-02 11:26:41 UTC; 58min ago
   Main PID: 47317 (ovsdb-server)
         IO: 1.2M read, 723.0K written
      Tasks: 1 (limit: 48560)
     Memory: 5.0M (peak: 40.8M)
        CPU: 12.288s
     CGroup: /system.slice/ovsdb-server.service
             └─47317 ovsdb-server /etc/openvswitch/conf.db -vconsole:emer -vsyslog:err -vfile:info --remote=punix:/var/run/openvswitch/db.sock --private-key=db:Open_vSwitch,SSL,private_key --certificate=db:Open_vSwitch,SSL,certificate --bootstrap-ca-cert=db:Open_vSwitch,SSL,ca_cert --user openvswitch:hugetlbfs --no-chdir --log-file=/var/log/openvswitch/ovsdb-server.log --pidfile=/var/run/openvswitch/ovsdb-server.pid --detach

Feb 02 11:26:41 compute-0 chown[47263]: /usr/bin/chown: cannot access '/run/openvswitch': No such file or directory
Feb 02 11:26:41 compute-0 ovs-ctl[47268]: /etc/openvswitch/conf.db does not exist ... (warning).
Feb 02 11:26:41 compute-0 ovs-ctl[47268]: Creating empty database /etc/openvswitch/conf.db [  OK  ]
Feb 02 11:26:41 compute-0 ovs-ctl[47268]: Starting ovsdb-server [  OK  ]
Feb 02 11:26:41 compute-0 ovs-vsctl[47318]: ovs|00001|vsctl|INFO|Called as ovs-vsctl --no-wait -- init -- set Open_vSwitch . db-version=8.5.1
Feb 02 11:26:41 compute-0 ovs-vsctl[47338]: ovs|00001|vsctl|INFO|Called as ovs-vsctl --no-wait set Open_vSwitch . ovs-version=3.3.5-115.el9s "external-ids:system-id=\"63c28Unit pacemaker.service could not be found.
Unit plymouth-quit-wait.service could not be found.
Unit plymouth-start.service could not be found.
Unit power-profiles-daemon.service could not be found.
000-4b99-40fb-b19f-6b3ba1922f6d\"" "external-ids:rundir=\"/var/run/openvswitch\"" "system-type=\"centos\"" "system-version=\"9\""
Feb 02 11:26:41 compute-0 ovs-ctl[47268]: Configuring Open vSwitch system IDs [  OK  ]
Feb 02 11:26:41 compute-0 ovs-vsctl[47344]: ovs|00001|vsctl|INFO|Called as ovs-vsctl --no-wait add Open_vSwitch . external-ids hostname=compute-0
Feb 02 11:26:41 compute-0 ovs-ctl[47268]: Enabling remote OVSDB managers [  OK  ]
Feb 02 11:26:41 compute-0 systemd[1]: Started Open vSwitch Database Unit.

● polkit.service - Authorization Manager
     Loaded: loaded (/usr/lib/systemd/system/polkit.service; static)
     Active: active (running) since Mon 2026-02-02 11:26:03 UTC; 58min ago
       Docs: man:polkit(8)
   Main PID: 43582 (polkitd)
         IO: 19.2M read, 0B written
      Tasks: 12 (limit: 48560)
     Memory: 25.2M (peak: 26.5M)
        CPU: 1.856s
     CGroup: /system.slice/polkit.service
             └─43582 /usr/lib/polkit-1/polkitd --no-debug

Feb 02 11:43:22 compute-0 polkitd[43582]: Collecting garbage unconditionally...
Feb 02 11:43:22 compute-0 polkitd[43582]: Loading rules from directory /etc/polkit-1/rules.d
Feb 02 11:43:22 compute-0 polkitd[43582]: Loading rules from directory /usr/share/polkit-1/rules.d
Feb 02 11:43:22 compute-0 polkitd[43582]: Finished loading, compiling and executing 3 rules
Feb 02 11:44:41 compute-0 polkitd[43582]: Registered Authentication Agent for unix-process:208288:312733 (system bus name :1.2508 [pkttyagent --process 208288 --notify-fd 4 --fallback], object path /org/freedesktop/PolicyKit1/AuthenticationAgent, locale en_US.UTF-8)
Feb 02 11:44:41 compute-0 polkitd[43582]: Unregistered Authentication Agent for unix-process:208288:312733 (system bus name :1.2508, object path /org/freedesktop/PolicyKit1/AuthenticationAgent, locale en_US.UTF-8) (disconnected from bus)
Feb 02 11:44:41 compute-0 polkitd[43582]: Registered Authentication Agent for unix-process:208287:312732 (system bus name :1.2509 [pkttyagent --process 208287 --notify-fd 4 --fallback], object path /org/freedesktop/PolicyKit1/AuthenticationAgent, locale en_US.UTF-8)
Feb 02 11:44:41 compute-0 polkitd[43582]: Unregistered Authentication Agent for unix-process:208287:312732 (system bus name :1.2509, object path /org/freedesktop/PolicyKit1/AuthenticationAgent, locale en_US.UTF-8) (disconnected from bus)
Feb 02 11:44:43 compute-0 polkitd[43582]: Registered Authentication Agent for unix-process:208754:312920 (system bus name :1.2512 [pkttyagent --process 208754 --notify-fd 4 --fallback], object path /org/freedesktop/PolicyKit1/AuthenticationAgent, locale en_US.UTF-8)
Feb 02 11:44:43 compute-0 polkitd[43582]: Unregistered Authentication Agent for unix-process:208754:312920 (system bus name :1.2512, object path /org/freedesktop/PolicyKit1/AuthenticationAgent, locale en_US.UTF-8) (disconnected from bus)

○ raid-check.service - RAID setup health check
     Loaded: loaded (/usr/lib/systemd/system/raid-check.service; static)
     Active: inactive (dead)
TriggeredBy: ○ raid-check.timer

○ rbdmap.service - Map RBD devices
     Loaded: loaded (/usr/lib/systemd/system/rbdmap.service; disabled; preset: disabled)
     Active: inactive (dead)

○ rc-local.service - /etc/rc.d/rc.local Compatibility
     Loaded: loaded (/usr/lib/systemd/system/rc-local.service; static)
     Active: inactive (dead)
       Docs: man:systemd-rc-local-generator(8)

○ rescue.service - Rescue Shell
     Loaded: loaded (/usr/lib/systemd/system/rescue.service; static)
     Active: inactive (dead)
       Docs: man:sulogin(8)

○ rpc-gssd.service - RPC security service for NFS client and server
     Loaded: loaded (/usr/lib/systemd/system/rpc-gssd.service; static)
     Active: inactive (dead)
  Condition: start condition failed at Mon 2026-02-02 10:52:41 UTC; 1h 32min ago
       Docs: man:rpc.gssd(8)

Feb 02 10:52:41 np0005604943.novalocal systemd[1]: RPC security service for NFS client and server was skipped because of an unmet condition check (ConditionPathExists=/etc/krb5.keytab).

● rpc-statd-notify.service - Notify NFS peers of a Unit rpc-svcgssd.service could not be found.
restart
     Loaded: loaded (/usr/lib/systemd/system/rpc-statd-notify.service; static)
     Active: active (exited) since Mon 2026-02-02 10:52:44 UTC; 1h 32min ago
       Docs: man:sm-notify(8)
             man:rpc.statd(8)
        CPU: 6ms

Feb 02 10:52:44 np0005604943.novalocal systemd[1]: Starting Notify NFS peers of a restart...
Feb 02 10:52:44 np0005604943.novalocal sm-notify[1008]: Version 2.5.4 starting
Feb 02 10:52:44 np0005604943.novalocal systemd[1]: Started Notify NFS peers of a restart.

○ rpc-statd.service - NFS status monitor for NFSv2/3 locking.
     Loaded: loaded (/usr/lib/systemd/system/rpc-statd.service; static)
     Active: inactive (dead)
       Docs: man:rpc.statd(8)

● rpcbind.service - RPC Bind
     Loaded: loaded (/usr/lib/systemd/system/rpcbind.service; enabled; preset: enabled)
     Active: active (running) since Mon 2026-02-02 10:52:39 UTC; 1h 32min ago
TriggeredBy: ● rpcbind.socket
       Docs: man:rpcbind(8)
   Main PID: 702 (rpcbind)
         IO: 0B read, 0B written
      Tasks: 1 (limit: 48560)
     Memory: 2.5M (peak: 2.8M)
        CPU: 34ms
     CGroup: /system.slice/rpcbind.service
             └─702 /usr/bin/rpcbind -w -f

Feb 02 10:52:39 localhost systemd[1]: Starting RPC Bind...
Feb 02 10:52:39 localhost systemd[1]: Started RPC Bind.

● rsyslog.service - System Logging Service
     Loaded: loaded (/usr/lib/systemd/system/rsyslog.service; enabled; preset: enabled)
     Active: active (running) since Mon 2026-02-02 10:52:44 UTC; 1h 32min ago
       Docs: man:rsyslogd(8)
             https://www.rsyslog.com/doc/
   Main PID: 1009 (rsyslogd)
         IO: 4.0K read, 20.1M written
      Tasks: 3 (limit: 48560)
     Memory: 21.0M (peak: 22.9M)
        CPU: 13.098s
     CGroup: /system.slice/rsyslog.service
             └─1009 /usr/sbin/rsyslogd -n

Feb 02 12:04:06 compute-0 rsyslogd[1009]: imjournal: journal files changed, reloading...  [v8.2510.0-2.el9 try https://www.rsyslog.com/e/0 ]
Feb 02 12:04:06 compute-0 rsyslogd[1009]: imjournal: journal files changed, reloading...  [v8.2510.0-2.el9 try https://www.rsyslog.com/e/0 ]
Feb 02 12:13:32 compute-0 rsyslogd[1009]: imjournal: journal files changed, reloading...  [v8.2510.0-2.el9 try https://www.rsyslog.com/e/0 ]
Feb 02 12:15:02 compute-0 rsyslogd[1009]: imjournal: journal files changed, reloading...  [v8.2510.0-2.el9 try https://www.rsyslog.com/e/0 ]
Feb 02 12:15:02 compute-0 rsyslogd[1009]: imjournal from <np0005604943:ceph-osd>: begin to drop messages due to rate-limiting
Feb 02 12:15:06 compute-0 rsyslogd[1009]: imjournal: journal files changed, reloading...  [v8.2510.0-2.el9 try https://www.rsyslog.com/e/0 ]
Feb 02 12:24:20 compute-0 rsyslogd[1009]: imjournal: journal files changed, reloading...  [v8.2510.0-2.el9 try https://www.rsyslog.com/e/0 ]
Feb 02 12:24:25 compute-0 rsyslogd[1009]: imjournal: journal files changed, reloading...  [v8.2510.0-2.el9 try https://www.rsyslog.com/e/0 ]
Feb 02 12:24:30 compute-0 rsyslogd[1009]: imjournal: journal files changed, reloading...  [v8.2510.0-2.el9 try https://www.rsyslog.com/e/0 ]
Feb 02 12:24:41 compute-0 rsyslogd[1009]: imjournal: 45203 messages lost due to rate-limiting (20000 allowed within 600 seconds)

○ selinux-autorelabel-mark.service - Mark the need to relabel after reboot
     Loaded: loaded (/usr/lib/systemd/system/selinux-autorelabel-mark.service; enabled; preset: enabled)
     Active: inactive (dead)
  Condition: start condition failed at Mon 2026-02-02 10:52:39 UTC; 1h 32min ago

Feb 02 10:52:39 localhost systemd[1]: Mark the need to relabel after reboot was skipped because of an unmet condition check (ConditionSecurity=!selinux).

● serial-getty@ttyS0.service - Serial Getty on ttyS0
     Loaded: loaded (/usr/lib/systemd/system/serial-getty@.service; enabled-runtime; preset: disabled)
     Active: active (running) since Mon 2026-02-02 10:52:44 UTC; 1h 32min ago
       Docs: man:agetty(8)
             man:systemd-getty-generator(8)
             http://0pointer.de/blog/projects/serial-console.html
   Main PID: 1015 (agetty)
         IO: 0B read, 0B written
     Unit sntp.service could not be found.
Unit sshd-keygen.service could not be found.
 Tasks: 1 (limit: 48560)
     Memory: 260.0K (peak: 504.0K)
        CPU: 8ms
     CGroup: /system.slice/system-serial\x2dgetty.slice/serial-getty@ttyS0.service
             └─1015 /sbin/agetty -o "-p -- \\u" --keep-baud 115200,57600,38400,9600 - vt220

Feb 02 10:52:44 np0005604943.novalocal systemd[1]: Started Serial Getty on ttyS0.

○ sshd-keygen@ecdsa.service - OpenSSH ecdsa Server Key Generation
     Loaded: loaded (/usr/lib/systemd/system/sshd-keygen@.service; disabled; preset: disabled)
    Drop-In: /usr/lib/systemd/system/sshd-keygen@.service.d
             └─disable-sshd-keygen-if-cloud-init-active.conf
     Active: inactive (dead)
  Condition: start condition failed at Mon 2026-02-02 11:43:26 UTC; 41min ago

Feb 02 10:52:39 localhost systemd[1]: OpenSSH ecdsa Server Key Generation was skipped because of an unmet condition check (ConditionPathExists=!/run/systemd/generator.early/multi-user.target.wants/cloud-init.target).
Feb 02 11:43:26 compute-0 systemd[1]: OpenSSH ecdsa Server Key Generation was skipped because of an unmet condition check (ConditionPathExists=!/run/systemd/generator.early/multi-user.target.wants/cloud-init.target).

○ sshd-keygen@ed25519.service - OpenSSH ed25519 Server Key Generation
     Loaded: loaded (/usr/lib/systemd/system/sshd-keygen@.service; disabled; preset: disabled)
    Drop-In: /usr/lib/systemd/system/sshd-keygen@.service.d
             └─disable-sshd-keygen-if-cloud-init-active.conf
     Active: inactive (dead)
  Condition: start condition failed at Mon 2026-02-02 11:43:26 UTC; 41min ago

Feb 02 10:52:39 localhost systemd[1]: OpenSSH ed25519 Server Key Generation was skipped because of an unmet condition check (ConditionPathExists=!/run/systemd/generator.early/multi-user.target.wants/cloud-init.target).
Feb 02 11:43:26 compute-0 systemd[1]: OpenSSH ed25519 Server Key Generation was skipped because of an unmet condition check (ConditionPathExists=!/run/systemd/generator.early/multi-user.target.wants/cloud-init.target).

○ sshd-keygen@rsa.service - OpenSSH rsa Server Key Generation
     Loaded: loaded (/usr/lib/systemd/system/sshd-keygen@.service; disabled; preset: disabled)
    Drop-In: /usr/lib/systemd/system/sshd-keygen@.service.d
             └─disable-sshd-keygen-if-cloud-init-active.conf
     Active: inactive (dead)
  Condition: start condition failed at Mon 2026-02-02 11:43:26 UTC; 41min ago

Feb 02 10:52:39 localhost systemd[1]: OpenSSH rsa Server Key Generation was skipped because of an unmet condition check (ConditionPathExists=!/run/systemd/generator.early/multi-user.target.wants/cloud-init.target).
Feb 02 11:43:26 compute-0 systemd[1]: OpenSSH rsa Server Key Generation was skipped because of an unmet condition check (ConditionPathExists=!/run/systemd/generator.early/multi-user.target.wants/cloud-init.target).

● sshd.service - OpenSSH server daemon
     Loaded: loaded (/usr/lib/systemd/system/sshd.service; enabled; preset: enabled)
     Active: active (running) since Mon 2026-02-02 11:43:26 UTC; 41min ago
       Docs: man:sshd(8)
             man:sshd_config(5)
   Main PID: 181254 (sshd)
         IO: 104.0K read, 0B written
      Tasks: 1 (limit: 48560)
     Memory: 3.5M (peak: 7.0M)
        CPU: 334ms
     CGroup: /system.slice/sshd.service
             └─181254 "sshd: /usr/sbin/sshd -D [listener] 0 of 10-100 startups"

Feb 02 12:14:40 compute-0 sshd-session[274297]: Accepted publickey for zuul from 192.168.122.10 port 40162 ssh2: ECDSA SHA256:06FbdFtHAPAeMOjyZUsVbjQFE9BZWSUY5SEUbp/6C6E
Feb 02 12:14:40 compute-0 sshd-session[274297]: pam_unix(sshd:session): session opened for user zuul(uid=1000) by zuul(uid=0)
Feb 02 12:16:02 compute-0 sshd-session[281810]: Accepted publickey for zuul from 192.168.122.10 port 35312 ssh2: ECDSA SHA256:06FbdFtHAPAeMOjyZUsVbjQFE9BZWSUY5SEUbp/6C6E
Feb 02 12:16:02 compute-0 sshd-session[281810]: pam_unix(sshd:session): session opened for user zuul(uid=1000) by zuul(uid=0)
Feb 02 12:16:02 compute-0 sshd-session[281810]: pam_unix(sshd:session): session closed for user zuul
Feb 02 12:16:02 compute-0 sshd-session[2818Unit syslog.service could not be found.
39]: Accepted publickey for zuul from 192.168.122.10 port 35328 ssh2: ECDSA SHA256:06FbdFtHAPAeMOjyZUsVbjQFE9BZWSUY5SEUbp/6C6E
Feb 02 12:16:02 compute-0 sshd-session[281839]: pam_unix(sshd:session): session opened for user zuul(uid=1000) by zuul(uid=0)
Feb 02 12:16:02 compute-0 sshd-session[281839]: pam_unix(sshd:session): session closed for user zuul
Feb 02 12:24:05 compute-0 sshd-session[287512]: Accepted publickey for zuul from 192.168.122.10 port 41610 ssh2: ECDSA SHA256:06FbdFtHAPAeMOjyZUsVbjQFE9BZWSUY5SEUbp/6C6E
Feb 02 12:24:05 compute-0 sshd-session[287512]: pam_unix(sshd:session): session opened for user zuul(uid=1000) by zuul(uid=0)

○ sssd-kcm.service - SSSD Kerberos Cache Manager
     Loaded: loaded (/usr/lib/systemd/system/sssd-kcm.service; indirect; preset: disabled)
     Active: inactive (dead)
TriggeredBy: ● sssd-kcm.socket
       Docs: man:sssd-kcm(5)

○ sssd.service - System Security Services Daemon
     Loaded: loaded (/usr/lib/systemd/system/sssd.service; enabled; preset: enabled)
     Active: inactive (dead)
  Condition: start condition failed at Mon 2026-02-02 10:52:39 UTC; 1h 32min ago

Feb 02 10:52:39 localhost systemd[1]: System Security Services Daemon was skipped because no trigger condition checks were met.

○ sysstat-collect.service - system activity accounting tool
     Loaded: loaded (/usr/lib/systemd/system/sysstat-collect.service; static)
     Active: inactive (dead)
TriggeredBy: ○ sysstat-collect.timer
       Docs: man:sa1(8)

○ sysstat-summary.service - Generate a daily summary of process accounting
     Loaded: loaded (/usr/lib/systemd/system/sysstat-summary.service; static)
     Active: inactive (dead)
TriggeredBy: ○ sysstat-summary.timer
       Docs: man:sa2(8)

○ sysstat.service - Resets System Activity Logs
     Loaded: loaded (/usr/lib/systemd/system/sysstat.service; enabled; preset: enabled)
     Active: inactive (dead)

○ systemd-ask-password-console.service - Dispatch Password Requests to Console
     Loaded: loaded (/usr/lib/systemd/system/systemd-ask-password-console.service; static)
     Active: inactive (dead)
TriggeredBy: ● systemd-ask-password-console.path
       Docs: man:systemd-ask-password-console.service(8)

○ systemd-ask-password-wall.service - Forward Password Requests to Wall
     Loaded: loaded (/usr/lib/systemd/system/systemd-ask-password-wall.service; static)
     Active: inactive (dead)
TriggeredBy: ● systemd-ask-password-wall.path
       Docs: man:systemd-ask-password-wall.service(8)

○ systemd-binfmt.service - Set Up Additional Binary Formats
     Loaded: loaded (/usr/lib/systemd/system/systemd-binfmt.service; static)
     Active: inactive (dead)
  Condition: start condition failed at Mon 2026-02-02 10:52:39 UTC; 1h 32min ago
       Docs: man:systemd-binfmt.service(8)
             man:binfmt.d(5)
             https://docs.kernel.org/admin-guide/binfmt-misc.html
             https://www.freedesktop.org/wiki/Software/systemd/APIFileSystems

Feb 02 10:52:39 localhost systemd[1]: Set Up Additional Binary Formats was skipped because no trigger condition checks were met.

○ systemd-boot-random-seed.service - Update Boot Loader Random Seed
     Loaded: loaded (/usr/lib/systemd/system/systemd-boot-random-seed.service; static)
     Active: inactive (dead)
  Condition: start condition failed at Mon 2026-02-02 10:52:39 UTC; 1h 32min ago
       Docs: man:systemd-boot-random-seed.service(8)
             man:random(4)

Feb 02 10:52:39 localhost systemd[1]: Update Boot Loader Random Seed was skipped because no trigger condition checks were met.

● systemd-boot-update.service - Automatic Boot Loader Update
     Loaded: loaded (/usr/lib/systemd/system/systemd-boot-update.service; enabled; preset: enabled)
     Active: active (exited) since Mon 2026-02-02 10:52:39 UTC; 1h 32min ago
       Docs: man:bootctl(1)
   Main PID: 698 (code=exited, status=0/SUCCESS)
        CPU: 9ms

Feb 02 10:52:39 localhost systemd[1]: Starting Automatic Boot Loader Update...
Feb 02 10:52:39 localhost bootctl[698]: Couldn't find EFI system partition, skipping.
Feb 02 10:52:39 localhost systemd[1]: Finished Automatic Boot Loader Update.

○ systemd-firstboot.service - First Boot Wizard
     Loaded: loaded (/usr/lib/systemd/system/systemd-firstboot.service; static)
     Active: inactive (dead)
  Condition: start condition failed at Mon 2026-02-02 10:52:38 UTC; 1h 32min ago
       Docs: man:systemd-firstboot(1)

Feb 02 10:52:38 localhost systemd[1]: First Boot Wizard was skipped because of an unmet condition check (ConditionFirstBoot=yes).

○ systemd-fsck-root.service - File System Check on Root Device
     Loaded: loaded (/usr/lib/systemd/system/systemd-fsck-root.service; static)
     Active: inactive (dead) since Mon 2026-02-02 10:52:38 UTC; 1h 32min ago
   Duration: 1.625s
       Docs: man:systemd-fsck-root.service(8)
   Main PID: 554 (code=exited, status=0/SUCCESS)
        CPU: 13ms

Feb 02 10:52:37 localhost systemd[1]: Starting File System Check on /dev/disk/by-uuid/822f14ea-6e7e-41df-b0d8-fbe282d9ded8...
Feb 02 10:52:37 localhost systemd-fsck[556]: /usr/sbin/fsck.xfs: XFS file system.
Feb 02 10:52:37 localhost systemd[1]: Finished File System Check on /dev/disk/by-uuid/822f14ea-6e7e-41df-b0d8-fbe282d9ded8.

● systemd-hostnamed.service - Hostname Service
     Loaded: loaded (/usr/lib/systemd/system/systemd-hostnamed.service; static)
    Drop-In: /usr/lib/systemd/system/systemd-hostnamed.service.d
             └─disable-privatedevices.conf
     Active: active (running) since Mon 2026-02-02 12:24:33 UTC; 17s ago
       Docs: man:systemd-hostnamed.service(8)
             man:hostname(5)
             man:machine-info(5)
             man:org.freedesktop.resolve1(5)
   Main PID: 291011 (systemd-hostnam)
         IO: 24.0K read, 0B written
      Tasks: 1 (limit: 48560)
     Memory: 2.7M (peak: 3.8M)
        CPU: 90ms
     CGroup: /system.slice/systemd-hostnamed.service
             └─291011 /usr/lib/systemd/systemd-hostnamed

Feb 02 12:24:33 compute-0 systemd[1]: Starting Hostname Service...
Feb 02 12:24:33 compute-0 systemd[1]: Started Hostname Service.

● systemd-hwdb-update.service - Rebuild Hardware Database
     Loaded: loaded (/usr/lib/systemd/system/systemd-hwdb-update.service; static)
     Active: active (exited) since Mon 2026-02-02 10:52:39 UTC; 1h 32min ago
       Docs: man:hwdb(7)
             man:systemd-hwdb(8)
   Main PID: 690 (code=exited, status=0/SUCCESS)
        CPU: 678ms

Feb 02 10:52:38 localhost systemd[1]: Starting Rebuild Hardware Database...
Feb 02 10:52:39 localhost systemd[1]: Finished Rebuild Hardware Database.

○ systemd-initctl.service - initctl Compatibility Daemon
     Loaded: loaded (/usr/lib/systemd/system/systemd-initctl.service; static)
     Active: inactive (dead)
TriggeredBy: ● systemd-initctl.socket
       Docs: man:systemd-initctl.service(8)

● systemd-journal-catalog-update.service - Rebuild Journal Catalog
     Loaded: loaded (/usr/lib/systemd/system/systemd-journal-catalog-update.service; static)
     Active: active (exited) since Mon 2026-02-02 10:52:39 UTC; 1h 32min ago
       Docs: man:systemd-journald.service(8)
             man:journald.conf(5)
   Main PID: 703 (code=exited, status=0/SUCCESS)
        CPU: 22ms

Feb 02 10:52:39 localhost systemd[1]: Starting Rebuild Journal Catalog...
Feb 02 10:52:39 localhost systemd[1]: Finished Rebuild Journal Catalog.

● systemd-journal-flush.service - Flush Journal to Persistent Storage
     Loaded: loaded (/usr/lib/systemd/system/systemd-journal-flush.service; static)
     Active: active (exited) since Mon 2026-02-02 10:52:38 UTC; 1h 32min ago
       Docs: man:systemd-journald.service(8)
             man:journald.conf(5)
   Main PID: 691 (code=exited, status=0/SUCCESS)
        CPU: 10ms

Feb 02 10:52:38 localhost systemd[1]: Starting Flush Journal to Persistent Storage...
Feb 02 10:52:38 localhost systemd[1]: Finished Flush Journal to Persistent Storage.

● systemd-journald.service - Journal Service
     Loaded: loaded (/usr/lib/systemd/system/systemd-journald.service; static)
     Active: active (running) since Mon 2026-02-02 10:52:38 UTC; 1h 32min ago
TriggeredBy: ● systemd-journald-dev-log.socket
             ● systemd-journald.socket
       Docs: man:systemd-journald.service(8)
             man:journald.conf(5)
   Main PID: 680 (systemd-journal)
     Status: "Processing requests..."
         IO: 0B read, 0B written
      Tasks: 1 (limit: 48560)
     Memory: 107.5M (peak: 115.5M)
        CPU: 14.345s
     CGroup: /system.slice/systemd-journald.service
             └─680 /usr/lib/systemd/systemd-journald

Feb 02 10:52:38 localhost systemd-journald[680]: Journal started
Feb 02 10:52:38 localhost systemd-journald[680]: Runtime Journal (/run/log/journal/bf0bc0bb03de29b24cba1cc9599cf5d0) is 8.0M, max 153.6M, 145.6M free.
Feb 02 10:52:38 localhost systemd[1]: systemd-journald.service: Deactivated successfully.
Feb 02 10:52:38 localhost systemd-journald[680]: Runtime Journal (/run/log/journal/bf0bc0bb03de29b24cba1cc9599cf5d0) is 8.0M, max 153.6M, 145.6M free.
Feb 02 10:52:38 localhost systemd-journald[680]: Received client request to flush runtime journal.

● systemd-logind.service - User Login Management
     Loaded: loaded (/usr/lib/systemd/system/systemd-logind.service; static)
    Drop-In: /usr/lib/systemd/system/systemd-logind.service.d
             └─10-grub2-logind-service.conf
     Active: active (running) since Mon 2026-02-02 10:52:39 UTC; 1h 32min ago
       Docs: man:sd-login(3)
             man:systemd-logind.service(8)
             man:logind.conf(5)
             man:org.freedesktop.login1(5)
   Main PID: 786 (systemd-logind)
     Status: "Processing requests..."
         IO: 4.0K read, 0B written
      Tasks: 1 (limit: 48560)
     Memory: 7.2M (peak: 7.7M)
        CPU: 3.565s
     CGroup: /system.slice/systemd-logind.service
             └─786 /usr/lib/systemd/systemd-logind

Feb 02 12:14:40 compute-0 systemd-logind[786]: New session 51 of user zuul.
Feb 02 12:16:01 compute-0 systemd-logind[786]: Session 51 logged out. Waiting for processes to exit.
Feb 02 12:16:01 compute-0 systemd-logind[786]: Removed session 51.
Feb 02 12:16:02 compute-0 systemd-logind[786]: New session 52 of user zuul.
Feb 02 12:16:02 compute-0 systemd-logind[786]: Session 52 logged out. Waiting for processes to exit.
Feb 02 12:16:02 compute-0 systemd-logind[786]: Removed session 52.
Feb 02 12:16:02 compute-0 systemd-logind[786]: New session 53 of user zuul.
Feb 02 12:16:02 compute-0 systemd-logind[786]: Session 53 logged out. Waiting for processes to exit.
Feb 02 12:16:02 compute-0 systemd-logind[786]: Removed session 53.
Feb 02 12:24:05 compute-0 systemd-logind[786]: New session 54 of user zuul.

○ systemd-machine-id-commit.service - Commit a transient machine-id on disk
     Loaded: loaded (/usr/lib/systemd/system/systemd-machine-id-commit.service; static)
     Active: inactive (dead)
  Condition: start condition failed at Mon 2026-02-02 10:52:39 UTC; 1h 32min ago
       Docs: man:systemd-machine-id-commit.service(8)

Feb 02 10:52:39 localhost systemd[1]: Commit a transient machine-id on disk was skipped because of an unmet condition check (ConditionPathIsMountPoint=/etc/machine-id).

● systemd-machined.service - Virtual Machine and Container Registration Service
     Loaded: loaded (/usr/lib/systemd/system/systemd-machined.service; static)
     Active: active (running) since Mon 2026-02-02 11:44:36 UTC; 40min ago
       Docs: man:systemd-machined.service(8)
             man:org.freedesktop.machine1(5)
   Main PID: 206973 (systemd-machine)
     Status: "Processing requests..."
         IO: 0B read, 0B written
      Tasks: 1 (limit: 48560)
     Memory: 1.5M (peak: 2.0M)
        CPU: 1.650s
     CGroup: /system.slice/systemd-machined.service
             └─206973 /usr/lib/systemd/systemd-machined

Feb 02 12:07:55 compute-0 systemd-machined[206973]: Machine qemu-24-instance-00000018 terminated.
Feb 02 12:08:01 compute-0 systemd-machined[206973]: Machine qemu-22-instance-00000016 terminated.
Feb 02 12:08:23 compute-0 systemd-machined[206973]: Machine qemu-25-instance-00000019 terminated.
Feb 02 12:08:26 compute-0 systemd-machined[206973]: Machine qemu-26-instance-0000001a terminated.
Feb 02Unit systemd-networkd-wait-online.service could not be found.
 12:09:12 compute-0 systemd-machined[206973]: New machine qemu-27-instance-0000001b.
Feb 02 12:09:35 compute-0 systemd-machined[206973]: Machine qemu-27-instance-0000001b terminated.
Feb 02 12:09:49 compute-0 systemd-machined[206973]: New machine qemu-28-instance-0000001c.
Feb 02 12:10:28 compute-0 systemd-machined[206973]: Machine qemu-28-instance-0000001c terminated.
Feb 02 12:12:26 compute-0 systemd-machined[206973]: New machine qemu-29-instance-0000001d.
Feb 02 12:13:32 compute-0 systemd-machined[206973]: Machine qemu-29-instance-0000001d terminated.

● systemd-modules-load.service - Load Kernel Modules
     Loaded: loaded (/usr/lib/systemd/system/systemd-modules-load.service; static)
     Active: active (exited) since Mon 2026-02-02 11:45:55 UTC; 38min ago
       Docs: man:systemd-modules-load.service(8)
             man:modules-load.d(5)
   Main PID: 221674 (code=exited, status=0/SUCCESS)
        CPU: 8ms

Feb 02 11:45:55 compute-0 systemd[1]: Starting Load Kernel Modules...
Feb 02 11:45:55 compute-0 systemd[1]: Finished Load Kernel Modules.

● systemd-network-generator.service - Generate network units from Kernel command line
     Loaded: loaded (/usr/lib/systemd/system/systemd-network-generator.service; enabled; preset: enabled)
     Active: active (exited) since Mon 2026-02-02 10:52:38 UTC; 1h 32min ago
       Docs: man:systemd-network-generator.service(8)
   Main PID: 681 (code=exited, status=0/SUCCESS)
        CPU: 5ms

Feb 02 10:52:38 localhost systemd[1]: Finished Generate network units from Kernel command line.

○ systemd-pcrmachine.service - TPM2 PCR Machine ID Measurement
     Loaded: loaded (/usr/lib/systemd/system/systemd-pcrmachine.service; static)
     Active: inactive (dead)
  Condition: start condition failed at Mon 2026-02-02 10:52:38 UTC; 1h 32min ago
       Docs: man:systemd-pcrmachine.service(8)

○ systemd-pcrphase-initrd.service - TPM2 PCR Barrier (initrd)
     Loaded: loaded (/usr/lib/systemd/system/systemd-pcrphase-initrd.service; static)
     Active: inactive (dead)
       Docs: man:systemd-pcrphase-initrd.service(8)

○ systemd-pcrphase-sysinit.service - TPM2 PCR Barrier (Initialization)
     Loaded: loaded (/usr/lib/systemd/system/systemd-pcrphase-sysinit.service; static)
     Active: inactive (dead)
  Condition: start condition failed at Mon 2026-02-02 10:52:39 UTC; 1h 32min ago
       Docs: man:systemd-pcrphase-sysinit.service(8)

Feb 02 10:52:39 localhost systemd[1]: TPM2 PCR Barrier (Initialization) was skipped because of an unmet condition check (ConditionPathExists=/sys/firmware/efi/efivars/StubPcrKernelImage-4a67b082-0a4c-41cf-b6c7-440b29bb8c4f).

○ systemd-pcrphase.service - TPM2 PCR Barrier (User)
     Loaded: loaded (/usr/lib/systemd/system/systemd-pcrphase.service; static)
     Active: inactive (dead)
  Condition: start condition failed at Mon 2026-02-02 10:52:41 UTC; 1h 32min ago
       Docs: man:systemd-pcrphase.service(8)

Feb 02 10:52:41 np0005604943.novalocal systemd[1]: TPM2 PCR Barrier (User) was skipped because of an unmet condition check (ConditionPathExists=/sys/firmware/efi/efivars/StubPcrKernelImage-4a67b082-0a4c-41cf-b6c7-440b29bb8c4f).

○ systemd-pstore.service - Platform Persistent Storage Archival
     Loaded: loaded (/usr/lib/systemd/system/systemd-pstore.service; enabled; preset: enabled)
     Active: inactive (dead)
  Condition: start condition failed at Mon 2026-02-02 10:52:38 UTC; 1h 32min ago
       Docs: man:systemd-pstore(8)

Feb 02 10:52:38 localhost systemd[1]: Platform Persistent Storage Archival was skipped because of an unmet condition check (ConditionDirectoryNotEmpty=/sys/fs/pstore).

● systemd-random-seed.service - Load/Save OS Random Seed
     Loaded: loaded (/usr/lib/systemd/system/systemd-random-seed.service; static)
     Active: active (exited) since Mon 2026-02-02 10:52:38 UTC; 1h 32min ago
       Docs: man:systemd-random-seed.service(8)
             man:random(4)
   Main PID: 692 (code=exited, status=0/SUCCESS)
        CPU: 6ms

Feb 02 10:52:38 localhost systemd[1]: Starting Load/Save OS Random Seed...
Feb 02 10:52:38 localhosUnit systemd-timesyncd.service could not be found.
t systemd[1]: Finished Load/Save OS Random Seed.

● systemd-remount-fs.service - Remount Root and Kernel File Systems
     Loaded: loaded (/usr/lib/systemd/system/systemd-remount-fs.service; enabled-runtime; preset: disabled)
     Active: active (exited) since Mon 2026-02-02 10:52:38 UTC; 1h 32min ago
       Docs: man:systemd-remount-fs.service(8)
             https://www.freedesktop.org/wiki/Software/systemd/APIFileSystems
   Main PID: 682 (code=exited, status=0/SUCCESS)
        CPU: 12ms

Feb 02 10:52:38 localhost systemd[1]: Finished Remount Root and Kernel File Systems.

○ systemd-repart.service - Repartition Root Disk
     Loaded: loaded (/usr/lib/systemd/system/systemd-repart.service; static)
     Active: inactive (dead)
  Condition: start condition failed at Mon 2026-02-02 10:52:38 UTC; 1h 32min ago
       Docs: man:systemd-repart.service(8)

○ systemd-rfkill.service - Load/Save RF Kill Switch Status
     Loaded: loaded (/usr/lib/systemd/system/systemd-rfkill.service; static)
     Active: inactive (dead)
TriggeredBy: ● systemd-rfkill.socket
       Docs: man:systemd-rfkill.service(8)

● systemd-sysctl.service - Apply Kernel Variables
     Loaded: loaded (/usr/lib/systemd/system/systemd-sysctl.service; static)
     Active: active (exited) since Mon 2026-02-02 11:26:12 UTC; 58min ago
       Docs: man:systemd-sysctl.service(8)
             man:sysctl.d(5)
   Main PID: 45070 (code=exited, status=0/SUCCESS)
        CPU: 19ms

Feb 02 11:26:12 compute-0 systemd[1]: Starting Apply Kernel Variables...
Feb 02 11:26:12 compute-0 systemd[1]: Finished Apply Kernel Variables.

○ systemd-sysext.service - Merge System Extension Images into /usr/ and /opt/
     Loaded: loaded (/usr/lib/systemd/system/systemd-sysext.service; disabled; preset: disabled)
     Active: inactive (dead)
       Docs: man:systemd-sysext.service(8)

● systemd-sysusers.service - Create System Users
     Loaded: loaded (/usr/lib/systemd/system/systemd-sysusers.service; static)
     Active: active (exited) since Mon 2026-02-02 10:52:38 UTC; 1h 32min ago
       Docs: man:sysusers.d(5)
             man:systemd-sysusers.service(8)
   Main PID: 693 (code=exited, status=0/SUCCESS)
        CPU: 20ms

Feb 02 10:52:38 localhost systemd[1]: Starting Create System Users...
Feb 02 10:52:38 localhost systemd[1]: Finished Create System Users.

○ systemd-tmpfiles-clean.service - Cleanup of Temporary Directories
     Loaded: loaded (/usr/lib/systemd/system/systemd-tmpfiles-clean.service; static)
     Active: inactive (dead) since Mon 2026-02-02 11:07:53 UTC; 1h 16min ago
TriggeredBy: ● systemd-tmpfiles-clean.timer
       Docs: man:tmpfiles.d(5)
             man:systemd-tmpfiles(8)
   Main PID: 30011 (code=exited, status=0/SUCCESS)
        CPU: 36ms

Feb 02 11:07:52 compute-0 systemd[1]: Starting Cleanup of Temporary Directories...
Feb 02 11:07:53 compute-0 systemd[1]: systemd-tmpfiles-clean.service: Deactivated successfully.
Feb 02 11:07:53 compute-0 systemd[1]: Finished Cleanup of Temporary Directories.

● systemd-tmpfiles-setup-dev.service - Create Static Device Nodes in /dev
     Loaded: loaded (/usr/lib/systemd/system/systemd-tmpfiles-setup-dev.service; static)
     Active: active (exited) since Mon 2026-02-02 10:52:39 UTC; 1h 32min ago
       Docs: man:tmpfiles.d(5)
             man:systemd-tmpfiles(8)
   Main PID: 695 (code=exited, status=0/SUCCESS)
        CPU: 35ms

Feb 02 10:52:38 localhost systemd[1]: Starting Create Static Device Nodes in /dev...
Feb 02 10:52:39 localhost systemd[1]: Finished Create Static Device Nodes in /dev.

● systemd-tmpfiles-setup.service - Create Volatile Files and Directories
     Loaded: loaded (/usr/lib/systemd/system/systemd-tmpfiles-setup.service; static)
     Active: active (exited) since Mon 2026-02-02 10:52:39 UTC; 1h 32min ago
       Docs: man:tmpfiles.d(5)
             man:systemd-tmpfiles(8)
   Main PID: 699 (code=exited, status=0/SUCCESS)
        CPU: 72ms

Feb 02 10:52:39 localhost systemd[1]: Starting Create Volatile Files and Directories...
Feb 02 10:52:39 localhost systemd[1]: Finished Create Volatile FiUnit systemd-tmpfiles.service could not be found.
les and Directories.

● systemd-udev-settle.service - Wait for udev To Complete Device Initialization
     Loaded: loaded (/usr/lib/systemd/system/systemd-udev-settle.service; static)
     Active: active (exited) since Mon 2026-02-02 11:45:51 UTC; 39min ago
       Docs: man:systemd-udev-settle.service(8)
   Main PID: 220765 (code=exited, status=0/SUCCESS)
        CPU: 14ms

Feb 02 11:45:51 compute-0 systemd[1]: Starting Wait for udev To Complete Device Initialization...
Feb 02 11:45:51 compute-0 udevadm[220765]: systemd-udev-settle.service is deprecated. Please fix multipathd.service not to pull it in.
Feb 02 11:45:51 compute-0 systemd[1]: Finished Wait for udev To Complete Device Initialization.

● systemd-udev-trigger.service - Coldplug All udev Devices
     Loaded: loaded (/usr/lib/systemd/system/systemd-udev-trigger.service; static)
    Drop-In: /usr/lib/systemd/system/systemd-udev-trigger.service.d
             └─systemd-udev-trigger-no-reload.conf
     Active: active (exited) since Mon 2026-02-02 10:52:38 UTC; 1h 32min ago
       Docs: man:udev(7)
             man:systemd-udevd.service(8)
   Main PID: 686 (code=exited, status=0/SUCCESS)
        CPU: 59ms

Feb 02 10:52:38 localhost systemd[1]: Finished Coldplug All udev Devices.

● systemd-udevd.service - Rule-based Manager for Device Events and Files
     Loaded: loaded (/usr/lib/systemd/system/systemd-udevd.service; static)
     Active: active (running) since Mon 2026-02-02 10:52:39 UTC; 1h 32min ago
TriggeredBy: ● systemd-udevd-kernel.socket
             ● systemd-udevd-control.socket
       Docs: man:systemd-udevd.service(8)
             man:udev(7)
   Main PID: 732 (systemd-udevd)
     Status: "Processing with 32 children at max"
         IO: 206.5M read, 103.8M written
      Tasks: 1
     Memory: 58.8M (peak: 99.4M)
        CPU: 15.640s
     CGroup: /system.slice/systemd-udevd.service
             └─udev
               └─732 /usr/lib/systemd/systemd-udevd

Feb 02 12:24:10 compute-0 lvm[288253]: VG ceph_vg0 finished
Feb 02 12:24:10 compute-0 lvm[288254]: VG ceph_vg1 finished
Feb 02 12:24:10 compute-0 lvm[288256]: PV /dev/loop5 online, VG ceph_vg2 is complete.
Feb 02 12:24:10 compute-0 lvm[288256]: VG ceph_vg2 finished
Feb 02 12:24:14 compute-0 lvm[288717]: PV /dev/loop5 online, VG ceph_vg2 is complete.
Feb 02 12:24:14 compute-0 lvm[288717]: VG ceph_vg2 finished
Feb 02 12:24:14 compute-0 lvm[288730]: PV /dev/loop3 online, VG ceph_vg0 is complete.
Feb 02 12:24:14 compute-0 lvm[288730]: VG ceph_vg0 finished
Feb 02 12:24:14 compute-0 lvm[288733]: PV /dev/loop4 online, VG ceph_vg1 is complete.
Feb 02 12:24:14 compute-0 lvm[288733]: VG ceph_vg1 finished

● systemd-update-done.service - Update is Completed
     Loaded: loaded (/usr/lib/systemd/system/systemd-update-done.service; static)
     Active: active (exited) since Mon 2026-02-02 10:52:39 UTC; 1h 32min ago
       Docs: man:systemd-update-done.service(8)
   Main PID: 733 (code=exited, status=0/SUCCESS)
        CPU: 14ms

Feb 02 10:52:39 localhost systemd[1]: Starting Update is Completed...
Feb 02 10:52:39 localhost systemd[1]: Finished Update is Completed.

○ systemd-update-utmp-runlevel.service - Record Runlevel Change in UTMP
     Loaded: loaded (/usr/lib/systemd/system/systemd-update-utmp-runlevel.service; static)
     Active: inactive (dead) since Mon 2026-02-02 10:52:44 UTC; 1h 32min ago
       Docs: man:systemd-update-utmp-runlevel.service(8)
             man:utmp(5)
   Main PID: 1021 (code=exited, status=0/SUCCESS)
        CPU: 5ms

Feb 02 10:52:44 np0005604943.novalocal systemd[1]: Starting Record Runlevel Change in UTMP...
Feb 02 10:52:44 np0005604943.novalocal systemd[1]: systemd-update-utmp-runlevel.service: Deactivated successfully.
Feb 02 10:52:44 np0005604943.novalocal systemd[1]: Finished Record Runlevel Change in UTMP.

● systemd-update-utmp.service - Record System Boot/Shutdown in UTMP
     Loaded: loaded (/usr/lib/systemd/system/systemd-update-utmp.service; static)
     Active: active (exited) since Mon 2026-02-02 10:52:39 UTC; 1h 32min ago
       Docs: man:systemd-upUnit tlp.service could not be found.
date-utmp.service(8)
             man:utmp(5)
   Main PID: 731 (code=exited, status=0/SUCCESS)
        CPU: 10ms

Feb 02 10:52:39 localhost systemd[1]: Starting Record System Boot/Shutdown in UTMP...
Feb 02 10:52:39 localhost systemd[1]: Finished Record System Boot/Shutdown in UTMP.

● systemd-user-sessions.service - Permit User Sessions
     Loaded: loaded (/usr/lib/systemd/system/systemd-user-sessions.service; static)
     Active: active (exited) since Mon 2026-02-02 10:52:44 UTC; 1h 32min ago
       Docs: man:systemd-user-sessions.service(8)
   Main PID: 1011 (code=exited, status=0/SUCCESS)
        CPU: 10ms

Feb 02 10:52:44 np0005604943.novalocal systemd[1]: Starting Permit User Sessions...
Feb 02 10:52:44 np0005604943.novalocal systemd[1]: Finished Permit User Sessions.

○ systemd-vconsole-setup.service - Setup Virtual Console
     Loaded: loaded (/usr/lib/systemd/system/systemd-vconsole-setup.service; static)
     Active: inactive (dead) since Mon 2026-02-02 10:52:37 UTC; 1h 32min ago
   Duration: 1.738s
       Docs: man:systemd-vconsole-setup.service(8)
             man:vconsole.conf(5)
   Main PID: 314 (code=exited, status=0/SUCCESS)
        CPU: 191ms

Feb 02 10:52:36 localhost systemd[1]: Finished Setup Virtual Console.
Feb 02 10:52:37 localhost systemd[1]: systemd-vconsole-setup.service: Deactivated successfully.
Feb 02 10:52:37 localhost systemd[1]: Stopped Setup Virtual Console.
Notice: journal has been rotated since unit was started, output may be incomplete.

● tuned.service - Dynamic System Tuning Daemon
     Loaded: loaded (/usr/lib/systemd/system/tuned.service; enabled; preset: enabled)
     Active: active (running) since Mon 2026-02-02 11:35:38 UTC; 49min ago
       Docs: man:tuned(8)
             man:tuned.conf(5)
             man:tuned-adm(8)
   Main PID: 105965 (tuned)
         IO: 36.0K read, 0B written
      Tasks: 4 (limit: 48560)
     Memory: 13.8M (peak: 16.0M)
        CPU: 1.320s
     CGroup: /system.slice/tuned.service
             └─105965 /usr/bin/python3 -Es /usr/sbin/tuned -l -P

Feb 02 11:35:38 compute-0 systemd[1]: Starting Dynamic System Tuning Daemon...
Feb 02 11:35:38 compute-0 systemd[1]: Started Dynamic System Tuning Daemon.

○ unbound-anchor.service - update of the root trust anchor for DNSSEC validation in unbound
     Loaded: loaded (/usr/lib/systemd/system/unbound-anchor.service; static)
     Active: inactive (dead)
TriggeredBy: ● unbound-anchor.timer
       Docs: man:unbound-anchor(8)

● user-runtime-dir@1000.service - User Runtime Directory /run/user/1000
     Loaded: loaded (/usr/lib/systemd/system/user-runtime-dir@.service; static)
     Active: active (exited) since Mon 2026-02-02 10:52:58 UTC; 1h 31min ago
       Docs: man:user@.service(5)
   Main PID: 4308 (code=exited, status=0/SUCCESS)
        CPU: 15ms

Feb 02 10:52:58 np0005604943.novalocal systemd[1]: Starting User Runtime Directory /run/user/1000...
Feb 02 10:52:58 np0005604943.novalocal systemd[1]: Finished User Runtime Directory /run/user/1000.

● user-runtime-dir@42477.service - User Runtime Directory /run/user/42477
     Loaded: loaded (/usr/lib/systemd/system/user-runtime-dir@.service; static)
     Active: active (exited) since Mon 2026-02-02 11:31:15 UTC; 53min ago
       Docs: man:user@.service(5)
   Main PID: 76639 (code=exited, status=0/SUCCESS)
        CPU: 10ms

Feb 02 11:31:15 compute-0 systemd[1]: Starting User Runtime Directory /run/user/42477...
Feb 02 11:31:15 compute-0 systemd[1]: Finished User Runtime Directory /run/user/42477.

● user@1000.service - User Manager for UID 1000
     Loaded: loaded (/usr/lib/systemd/system/user@.service; static)
    Drop-In: /usr/lib/systemd/system/user@.service.d
             └─10-login-barrier.conf
     Active: active (running) since Mon 2026-02-02 10:52:58 UTC; 1h 31min ago
       Docs: man:user@.service(5)
   Main PID: 4309 (systemd)
     Status: "Ready."
         IO: 676.0K read, 8.0K written
      Tasks: 5
     Memory: 9.2M (peak: 16.2M)
        CPU: 4.757s
     CGroup: /user.slice/user-1000.slice/user@1000.service
             ├─app.slice
             │ └─dbus-broker.service
             │   ├─13969 /usr/bin/dbus-broker-launch --scope user
             │   └─13970 dbus-broker --log 4 --controller 9 --machine-id bf0bc0bb03de29b24cba1cc9599cf5d0 --max-bytes 100000000000000 --max-fds 25000000000000 --max-matches 5000000000
             ├─init.scope
             │ ├─4309 /usr/lib/systemd/systemd --user
             │ └─4311 "(sd-pam)"
             └─user.slice
               └─podman-pause-2465a80e.scope
                 └─13952 catatonit -P

Feb 02 11:04:36 np0005604943.novalocal dbus-broker-launch[13969]: Policy to allow eavesdropping in /usr/share/dbus-1/session.conf +31: Eavesdropping is deprecated and ignored
Feb 02 11:04:36 np0005604943.novalocal dbus-broker-launch[13969]: Policy to allow eavesdropping in /usr/share/dbus-1/session.conf +33: Eavesdropping is deprecated and ignored
Feb 02 11:04:36 np0005604943.novalocal systemd[4309]: Started D-Bus User Message Bus.
Feb 02 11:04:36 np0005604943.novalocal dbus-broker-lau[13969]: Ready
Feb 02 11:04:36 np0005604943.novalocal systemd[4309]: selinux: avc:  op=load_policy lsm=selinux seqno=4 res=1
Feb 02 11:04:36 np0005604943.novalocal systemd[4309]: Created slice Slice /user.
Feb 02 11:04:36 np0005604943.novalocal systemd[4309]: podman-13947.scope: unit configures an IP firewall, but not running as root.
Feb 02 11:04:36 np0005604943.novalocal systemd[4309]: (This warning is only shown for the first unit using IP firewalling.)
Feb 02 11:04:37 np0005604943.novalocal systemd[4309]: Started podman-13947.scope.
Feb 02 11:04:37 np0005604943.novalocal systemd[4309]: Started podman-pause-2465a80e.scope.

● user@42477.service - User Manager for UID 42477
     Loaded: loaded (/usr/lib/systemd/system/user@.service; static)
    Drop-In: /usr/lib/systemd/system/user@.service.d
             └─10-login-barrier.conf
     Active: active (running) since Mon 2026-02-02 11:31:16 UTC; 53min ago
       Docs: man:user@.service(5)
   Main PID: 76640 (systemd)
     Status: "Ready."
         IO: 0B read, 0B written
      Tasks: 2
     Memory: 7.5M (peak: 11.2M)
        CPU: 3.206s
     CGroup: /user.slice/user-42477.slice/user@42477.service
             └─init.scope
               ├─76640 /usr/lib/systemd/systemd --user
               └─76642 "(sd-pam)"

Feb 02 11:31:16 compute-0 systemd[76640]: Reached target Sockets.
Feb 02 11:31:16 compute-0 systemd[76640]: Reached target Basic System.
Feb 02 11:31:16 compute-0 systemd[76640]: Reached target Main User Target.
Feb 02 11:31:16 compute-0 systemd[76640]: Startup finished in 108ms.
Feb 02 11:31:16 compute-0 systemd[1]: Started User Manager for UID 42477.
Feb 02 11:33:40 compute-0 systemd[76640]: Starting Mark boot as successful...
Feb 02 11:33:40 compute-0 systemd[76640]: Finished Mark boot as successful.
Feb 02 11:36:16 compute-0 systemd[76640]: Created slice User Background Tasks Slice.
Feb 02 11:36:16 compute-0 systemd[76640]: Starting Cleanup of User's Temporary Files and Directories...
Feb 02 11:36:16 compute-0 systemd[76640]: Finished Cleanup of User's Temporary Files and Directories.

○ virtinterfaced.service - libvirt interface daemon
     Loaded: loaded (/usr/lib/systemd/system/virtinterfaced.service; disabled; preset: disabled)
     Active: inactive (dead)
TriggeredBy: ○ virtinterfaced-admin.socket
             ○ virtinterfaced.socket
             ○ virtinterfaced-ro.socket
       Docs: man:virtinterfaced(8)
             https://libvirt.org/

○ virtlockd.service - libvirt locking daemon
     Loaded: loaded (/usr/lib/systemd/system/virtlockd.service; disabled; preset: disabled)
     Active: inactive (dead)
TriggeredBy: ● virtlockd.socket
             ○ virtlockd-admin.socket
       Docs: man:virtlockd(8)
             https://libvirt.org/

● virtlogd.service - libvirt logging daemon
     Loaded: loaded (/usr/lib/systemd/system/virtlogd.service; enabled; preset: disabled)
     Active: active (running) since Mon 2026-02-02 11:44:34 UTC; 40min ago
TriggeredBy: ● virtlogd-admin.socket
             ● virtlogd.socket
       Docs: man:virtlogd(8)
             https://libvirt.org/
   Main PID: 206344 (virtlogd)
         IO: 644.0K read, 2.5M written
      Tasks: 1 (limit: 48560)
     Memory: 3.6M (peak: 4.2M)
        CPU: 30.573s
     CGroup: /system.slice/virtlogd.service
             └─206344 /usr/sbin/virtlogd

Feb 02 11:44:33 compute-0 systemd[1]: Starting libvirt logging daemon...
Feb 02 11:44:34 compute-0 systemd[1]: Started libvirt logging daemon.

○ virtnetworkd.service - libvirt network daemon
     Loaded: loaded (/usr/lib/systemd/system/virtnetworkd.service; disabled; preset: disabled)
     Active: inactive (dead)
TriggeredBy: ○ virtnetworkd.socket
             ○ virtnetworkd-ro.socket
             ○ virtnetworkd-admin.socket
       Docs: man:virtnetworkd(8)
             https://libvirt.org/

● virtnodedevd.service - libvirt nodedev daemon
     Loaded: loaded (/usr/lib/systemd/system/virtnodedevd.service; enabled; preset: disabled)
     Active: active (running) since Mon 2026-02-02 11:47:22 UTC; 37min ago
TriggeredBy: ● virtnodedevd-ro.socket
             ● virtnodedevd-admin.socket
             ● virtnodedevd.socket
       Docs: man:virtnodedevd(8)
             https://libvirt.org/
   Main PID: 239207 (virtnodedevd)
         IO: 2.8M read, 0B written
      Tasks: 20 (limit: 48560)
     Memory: 8.7M (peak: 10.2M)
        CPU: 2.894s
     CGroup: /system.slice/virtnodedevd.service
             └─239207 /usr/sbin/virtnodedevd --timeout 120

Feb 02 11:47:22 compute-0 systemd[1]: Starting libvirt nodedev daemon...
Feb 02 11:47:22 compute-0 systemd[1]: Started libvirt nodedev daemon.

○ virtnwfilterd.service - libvirt nwfilter daemon
     Loaded: loaded (/usr/lib/systemd/system/virtnwfilterd.service; disabled; preset: disabled)
     Active: inactive (dead)
TriggeredBy: ○ virtnwfilterd.socket
             ○ virtnwfilterd-admin.socket
             ○ virtnwfilterd-ro.socket
       Docs: man:virtnwfilterd(8)
             https://libvirt.org/

○ virtproxyd.service - libvirt proxy daemon
     Loaded: loaded (/usr/lib/systemd/system/virtproxyd.service; enabled; preset: disabled)
     Active: inactive (dead) since Mon 2026-02-02 11:46:35 UTC; 38min ago
   Duration: 2min 34ms
TriggeredBy: ● virtproxyd.socket
             ● virtproxyd-admin.socket
             ● virtproxyd-tls.socket
             ● virtproxyd-ro.socket
       Docs: man:virtproxyd(8)
             https://libvirt.org/
   Main PID: 206762 (code=exited, status=0/SUCCESS)
        CPU: 41ms

Feb 02 11:44:35 compute-0 systemd[1]: Starting libvirt proxy daemon...
Feb 02 11:44:35 compute-0 systemd[1]: Started libvirt proxy daemon.
Feb 02 11:46:35 compute-0 systemd[1]: virtproxyd.service: Deactivated successfully.

● virtqemud.service - libvirt QEMU daemon
     Loaded: loaded (/usr/lib/systemd/system/virtqemud.service; enabled; preset: enabled)
     Active: active (running) since Mon 2026-02-02 11:47:15 UTC; 37min ago
TriggeredBy: ● virtqemud.socket
             ● virtqemud-admin.socket
             ● virtqemud-ro.socket
       Docs: man:virtqemud(8)
             https://libvirt.org/
   Main PID: 238654 (virtqemud)
         IO: 18.9M read, 1.2M written
      Tasks: 19 (limit: 32768)
     Memory: 42.1M (peak: 60.6M)
        CPU: 7.618s
     CGroup: /system.slice/virtqemud.service
             └─238654 /usr/sbin/virtqemud --timeout 120

Feb 02 11:47:16 compute-0 virtqemud[238654]: hostname: compute-0
Feb 02 11:47:16 compute-0 virtqemud[238654]: End of file while reading data: Input/output error
Feb 02 12:00:13 compute-0 virtqemud[238654]: End of file while reading data: Input/output error
Feb 02 12:14:50 compute-0 virtqemud[238654]: Failed to connect socket to '/var/run/libvirt/virtnetworkd-sock-ro': No such file or directory
Feb 02 12:14:50 compute-0 virtqemud[238654]: Failed to connect socket to '/var/run/libvirt/virtnwfilterd-sock-ro': No such file or directory
Feb 02 12:14:50 compute-0 virtqemud[238654]: Failed to connect socket to '/var/run/libvirt/virtstoraged-sock-ro': No such file or directory
Feb 02 12:Unit ypbind.service could not be found.
Unit yppasswdd.service could not be found.
Unit ypserv.service could not be found.
Unit ypxfrd.service could not be found.
15:30 compute-0 virtqemud[238654]: Failed to connect socket to '/var/run/libvirt/virtstoraged-sock-ro': No such file or directory
Feb 02 12:24:13 compute-0 virtqemud[238654]: Failed to connect socket to '/var/run/libvirt/virtnetworkd-sock-ro': No such file or directory
Feb 02 12:24:13 compute-0 virtqemud[238654]: Failed to connect socket to '/var/run/libvirt/virtnwfilterd-sock-ro': No such file or directory
Feb 02 12:24:13 compute-0 virtqemud[238654]: Failed to connect socket to '/var/run/libvirt/virtstoraged-sock-ro': No such file or directory

● virtsecretd.service - libvirt secret daemon
     Loaded: loaded (/usr/lib/systemd/system/virtsecretd.service; enabled; preset: disabled)
     Active: active (running) since Mon 2026-02-02 11:55:25 UTC; 29min ago
TriggeredBy: ● virtsecretd.socket
             ● virtsecretd-ro.socket
             ● virtsecretd-admin.socket
       Docs: man:virtsecretd(8)
             https://libvirt.org/
   Main PID: 245219 (virtsecretd)
         IO: 8.0K read, 157.5K written
      Tasks: 18 (limit: 48560)
     Memory: 4.1M (peak: 4.9M)
        CPU: 417ms
     CGroup: /system.slice/virtsecretd.service
             └─245219 /usr/sbin/virtsecretd --timeout 120

Feb 02 11:55:25 compute-0 systemd[1]: Starting libvirt secret daemon...
Feb 02 11:55:25 compute-0 systemd[1]: Started libvirt secret daemon.

○ virtstoraged.service - libvirt storage daemon
     Loaded: loaded (/usr/lib/systemd/system/virtstoraged.service; disabled; preset: disabled)
     Active: inactive (dead)
TriggeredBy: ○ virtstoraged-admin.socket
             ○ virtstoraged-ro.socket
             ○ virtstoraged.socket
       Docs: man:virtstoraged(8)
             https://libvirt.org/

● -.slice - Root Slice
     Loaded: loaded
     Active: active since Mon 2026-02-02 10:52:35 UTC; 1h 32min ago
      Until: Mon 2026-02-02 10:52:35 UTC; 1h 32min ago
       Docs: man:systemd.special(7)
      Tasks: 1433
     Memory: 3.6G
        CPU: 50min 57.510s
     CGroup: /
             ├─293712 turbostat --debug sleep 10
             ├─293719 sleep 10
             ├─init.scope
             │ └─1 /usr/lib/systemd/systemd --switched-root --system --deserialize 31
             ├─machine.slice
             │ ├─libpod-dd42911bf41e885c3ba4077012f09a191888946e3867784418685a91ce34a059.scope
             │ │ └─container
             │ │   ├─145058 dumb-init --single-child -- kolla_start
             │ │   └─145061 /usr/bin/ovn-controller --pidfile unix:/run/openvswitch/db.sock -p /etc/pki/tls/private/ovndb.key -c /etc/pki/tls/certs/ovndb.crt -C /etc/pki/tls/certs/ovndbca.crt
             │ ├─libpod-e8469079de5f6cf853327e20582fc7412a39dd166f8e8fc6edb7e70c21cf9b07.scope
             │ │ └─container
             │ │   ├─238885 dumb-init --single-child -- kolla_start
             │ │   ├─238887 /usr/bin/python3 /usr/bin/nova-compute
             │ │   ├─245133 /usr/bin/python3 /bin/privsep-helper --config-file /etc/nova/nova.conf --config-file /etc/nova/nova-compute.conf --config-dir /etc/nova/nova.conf.d --privsep_context vif_plug_ovs.privsep.vif_plug --privsep_sock_path /tmp/tmp8j6j0ldg/privsep.sock
             │ │   └─249642 /usr/bin/python3 /bin/privsep-helper --config-file /etc/nova/nova.conf --config-file /etc/nova/nova-compute.conf --config-dir /etc/nova/nova.conf.d --privsep_context os_brick.privileged.default --privsep_sock_path /tmp/tmpney6i09_/privsep.sock
             │ └─libpod-f7a8a7d56deab4622312f47586dba2f4884d78e46b23bfb226b684327aab18c9.scope
             │   └─container
             │     ├─155008 dumb-init --single-child -- kolla_start
             │     ├─155011 "neutron-ovn-metadata-agent (/usr/bin/python3 /usr/bin/neutron-ovn-metadata-agent)"
             │     ├─155570 "neutron-ovn-metadata-agent (/usr/bin/python3 /usr/bin/neutron-ovn-metadata-agent)"
             │     ├─155575 /usr/bin/python3 /bin/privsep-helper --config-file /etc/neutron/neutron.conf --config-dir /etc/neutron.conf.d --privsep_context neutron.privileged.namespace_cmd --privsep_sock_path /tmp/tmpsq_kuak1/privsep.sock
             │     ├─245329 /usr/bin/python3 /bin/privsep-helper --config-file /etc/neutron/neutron.conf --config-dir /etc/neutron.conf.d --privsep_context neutron.privileged.default --privsep_sock_path /tmp/tmpk6kanhyo/privsep.sock
             │     └─245414 /usr/bin/python3 /bin/privsep-helper --config-file /etc/neutron/neutron.conf --config-dir /etc/neutron.conf.d --privsep_context neutron.privileged.link_cmd --privsep_sock_path /tmp/tmpuet8pyad/privsep.sock
             ├─system.slice
             │ ├─NetworkManager.service
             │ │ └─49093 /usr/sbin/NetworkManager --no-daemon
             │ ├─auditd.service
             │ │ ├─704 /sbin/auditd
             │ │ └─706 /usr/sbin/sedispatch
             │ ├─chronyd.service
             │ │ └─58660 /usr/sbin/chronyd -F 2
             │ ├─crond.service
             │ │ ├─1013 /usr/sbin/crond -n
             │ │ └─7490 /usr/sbin/anacron -s
             │ ├─dbus-broker.service
             │ │ ├─768 /usr/bin/dbus-broker-launch --scope system --audit
             │ │ └─775 dbus-broker --log 4 --controller 9 --machine-id bf0bc0bb03de29b24cba1cc9599cf5d0 --max-bytes 536870912 --max-fds 4096 --max-matches 131072 --audit
             │ ├─edpm_nova_compute.service
             │ │ └─238883 /usr/bin/conmon --api-version 1 -c e8469079de5f6cf853327e20582fc7412a39dd166f8e8fc6edb7e70c21cf9b07 -u e8469079de5f6cf853327e20582fc7412a39dd166f8e8fc6edb7e70c21cf9b07 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/e8469079de5f6cf853327e20582fc7412a39dd166f8e8fc6edb7e70c21cf9b07/userdata -p /run/containers/storage/overlay-containers/e8469079de5f6cf853327e20582fc7412a39dd166f8e8fc6edb7e70c21cf9b07/userdata/pidfile -n nova_compute --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/e8469079de5f6cf853327e20582fc7412a39dd166f8e8fc6edb7e70c21cf9b07 --full-attach -s -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/e8469079de5f6cf853327e20582fc7412a39dd166f8e8fc6edb7e70c21cf9b07/userdata/oci-log --conmon-pidfile /run/nova_compute.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg e8469079de5f6cf853327e20582fc7412a39dd166f8e8fc6edb7e70c21cf9b07
             │ ├─edpm_ovn_controller.service
             │ │ └─145056 /usr/bin/conmon --api-version 1 -c dd42911bf41e885c3ba4077012f09a191888946e3867784418685a91ce34a059 -u dd42911bf41e885c3ba4077012f09a191888946e3867784418685a91ce34a059 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/dd42911bf41e885c3ba4077012f09a191888946e3867784418685a91ce34a059/userdata -p /run/containers/storage/overlay-containers/dd42911bf41e885c3ba4077012f09a191888946e3867784418685a91ce34a059/userdata/pidfile -n ovn_controller --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/dd42911bf41e885c3ba4077012f09a191888946e3867784418685a91ce34a059 --full-attach -s -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/dd42911bf41e885c3ba4077012f09a191888946e3867784418685a91ce34a059/userdata/oci-log --conmon-pidfile /run/ovn_controller.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg dd42911bf41e885c3ba4077012f09a191888946e3867784418685a91ce34a059
             │ ├─edpm_ovn_metadata_agent.service
             │ │ └─155006 /usr/bin/conmon --api-version 1 -c f7a8a7d56deab4622312f47586dba2f4884d78e46b23bfb226b684327aab18c9 -u f7a8a7d56deab4622312f47586dba2f4884d78e46b23bfb226b684327aab18c9 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/f7a8a7d56deab4622312f47586dba2f4884d78e46b23bfb226b684327aab18c9/userdata -p /run/containers/storage/overlay-containers/f7a8a7d56deab4622312f47586dba2f4884d78e46b23bfb226b684327aab18c9/userdata/pidfile -n ovn_metadata_agent --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/f7a8a7d56deab4622312f47586dba2f4884d78e46b23bfb226b684327aab18c9 --full-attach -s -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/f7a8a7d56deab4622312f47586dba2f4884d78e46b23bfb226b684327aab18c9/userdata/oci-log --conmon-pidfile /run/ovn_metadata_agent.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg f7a8a7d56deab4622312f47586dba2f4884d78e46b23bfb226b684327aab18c9
             │ ├─gssproxy.service
             │ │ └─876 /usr/sbin/gssproxy -D
             │ ├─irqbalance.service
             │ │ └─782 /usr/sbin/irqbalance
             │ ├─iscsid.service
             │ │ └─223533 /usr/sbin/iscsid -f
             │ ├─multipathd.service
             │ │ └─223815 /sbin/multipathd -d -s
             │ ├─ovs-vswitchd.service
             │ │ └─47398 ovs-vswitchd unix:/var/run/openvswitch/db.sock -vconsole:emer -vsyslog:err -vfile:info --mlockall --user openvswitch:hugetlbfs --no-chdir --log-file=/var/log/openvswitch/ovs-vswitchd.log --pidfile=/var/run/openvswitch/ovs-vswitchd.pid --detach
             │ ├─ovsdb-server.service
             │ │ └─47317 ovsdb-server /etc/openvswitch/conf.db -vconsole:emer -vsyslog:err -vfile:info --remote=punix:/var/run/openvswitch/db.sock --private-key=db:Open_vSwitch,SSL,private_key --certificate=db:Open_vSwitch,SSL,certificate --bootstrap-ca-cert=db:Open_vSwitch,SSL,ca_cert --user openvswitch:hugetlbfs --no-chdir --log-file=/var/log/openvswitch/ovsdb-server.log --pidfile=/var/run/openvswitch/ovsdb-server.pid --detach
             │ ├─polkit.service
             │ │ └─43582 /usr/lib/polkit-1/polkitd --no-debug
             │ ├─rpcbind.service
             │ │ └─702 /usr/bin/rpcbind -w -f
             │ ├─rsyslog.service
             │ │ └─1009 /usr/sbin/rsyslogd -n
             │ ├─sshd.service
             │ │ └─181254 "sshd: /usr/sbin/sshd -D [listener] 0 of 10-100 startups"
             │ ├─system-ceph\x2d4548a36b\x2d7cdc\x2d5e3e\x2da814\x2d4e1571be1fae.slice
             │ │ ├─ceph-4548a36b-7cdc-5e3e-a814-4e1571be1fae@crash.compute-0.service
             │ │ │ ├─libpod-payload-61b0483497dcb2f7f58b3253d407131b9b241fdd95d0c46b378db53811c0e7e0
             │ │ │ │ ├─80410 /run/podman-init -- /usr/bin/ceph-crash -n client.crash.compute-0
             │ │ │ │ └─80412 /usr/bin/python3 -s /usr/bin/ceph-crash -n client.crash.compute-0
             │ │ │ └─runtime
             │ │ │   └─80408 /usr/bin/conmon --api-version 1 -c 61b0483497dcb2f7f58b3253d407131b9b241fdd95d0c46b378db53811c0e7e0 -u 61b0483497dcb2f7f58b3253d407131b9b241fdd95d0c46b378db53811c0e7e0 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/61b0483497dcb2f7f58b3253d407131b9b241fdd95d0c46b378db53811c0e7e0/userdata -p /run/containers/storage/overlay-containers/61b0483497dcb2f7f58b3253d407131b9b241fdd95d0c46b378db53811c0e7e0/userdata/pidfile -n ceph-4548a36b-7cdc-5e3e-a814-4e1571be1fae-crash-compute-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/61b0483497dcb2f7f58b3253d407131b9b241fdd95d0c46b378db53811c0e7e0 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/61b0483497dcb2f7f58b3253d407131b9b241fdd95d0c46b378db53811c0e7e0/userdata/oci-log --conmon-pidfile /run/ceph-4548a36b-7cdc-5e3e-a814-4e1571be1fae@crash.compute-0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 61b0483497dcb2f7f58b3253d407131b9b241fdd95d0c46b378db53811c0e7e0
             │ │ ├─ceph-4548a36b-7cdc-5e3e-a814-4e1571be1fae@mds.cephfs.compute-0.mldrue.service
             │ │ │ ├─libpod-payload-2fd942fa7e2a914f99986cfceb092b754d16f7807e8ec91f52763c7e36375409
             │ │ │ │ ├─95496 /run/podman-init -- /usr/bin/ceph-mds -n mds.cephfs.compute-0.mldrue -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ │ │ └─95505 /usr/bin/ceph-mds -n mds.cephfs.compute-0.mldrue -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ │ └─runtime
             │ │ │   └─95494 /usr/bin/conmon --api-version 1 -c 2fd942fa7e2a914f99986cfceb092b754d16f7807e8ec91f52763c7e36375409 -u 2fd942fa7e2a914f99986cfceb092b754d16f7807e8ec91f52763c7e36375409 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/2fd942fa7e2a914f99986cfceb092b754d16f7807e8ec91f52763c7e36375409/userdata -p /run/containers/storage/overlay-containers/2fd942fa7e2a914f99986cfceb092b754d16f7807e8ec91f52763c7e36375409/userdata/pidfile -n ceph-4548a36b-7cdc-5e3e-a814-4e1571be1fae-mds-cephfs-compute-0-mldrue --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/2fd942fa7e2a914f99986cfceb092b754d16f7807e8ec91f52763c7e36375409 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/2fd942fa7e2a914f99986cfceb092b754d16f7807e8ec91f52763c7e36375409/userdata/oci-log --conmon-pidfile /run/ceph-4548a36b-7cdc-5e3e-a814-4e1571be1fae@mds.cephfs.compute-0.mldrue.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 2fd942fa7e2a914f99986cfceb092b754d16f7807e8ec91f52763c7e36375409
             │ │ ├─ceph-4548a36b-7cdc-5e3e-a814-4e1571be1fae@mgr.compute-0.twcemg.service
             │ │ │ ├─libpod-payload-e108912e9f7d348f2198e623df71406e8b579a2b0383329619c90634f2e480e3
             │ │ │ │ ├─75556 /run/podman-init -- /usr/bin/ceph-mgr -n mgr.compute-0.twcemg -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ │ │ └─75558 /usr/bin/ceph-mgr -n mgr.compute-0.twcemg -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ │ └─runtime
             │ │ │   └─75554 /usr/bin/conmon --api-version 1 -c e108912e9f7d348f2198e623df71406e8b579a2b0383329619c90634f2e480e3 -u e108912e9f7d348f2198e623df71406e8b579a2b0383329619c90634f2e480e3 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/e108912e9f7d348f2198e623df71406e8b579a2b0383329619c90634f2e480e3/userdata -p /run/containers/storage/overlay-containers/e108912e9f7d348f2198e623df71406e8b579a2b0383329619c90634f2e480e3/userdata/pidfile -n ceph-4548a36b-7cdc-5e3e-a814-4e1571be1fae-mgr-compute-0-twcemg --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/e108912e9f7d348f2198e623df71406e8b579a2b0383329619c90634f2e480e3 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/e108912e9f7d348f2198e623df71406e8b579a2b0383329619c90634f2e480e3/userdata/oci-log --conmon-pidfile /run/ceph-4548a36b-7cdc-5e3e-a814-4e1571be1fae@mgr.compute-0.twcemg.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg e108912e9f7d348f2198e623df71406e8b579a2b0383329619c90634f2e480e3
             │ │ ├─ceph-4548a36b-7cdc-5e3e-a814-4e1571be1fae@mon.compute-0.service
             │ │ │ ├─libpod-payload-fffb528e321276c0c3873a515991dd68a346504106615c6708abcd60682ada04
             │ │ │ │ ├─75269 /run/podman-init -- /usr/bin/ceph-mon -n mon.compute-0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false
             │ │ │ │ └─75271 /usr/bin/ceph-mon -n mon.compute-0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false
             │ │ │ └─runtime
             │ │ │   └─75267 /usr/bin/conmon --api-version 1 -c fffb528e321276c0c3873a515991dd68a346504106615c6708abcd60682ada04 -u fffb528e321276c0c3873a515991dd68a346504106615c6708abcd60682ada04 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/fffb528e321276c0c3873a515991dd68a346504106615c6708abcd60682ada04/userdata -p /run/containers/storage/overlay-containers/fffb528e321276c0c3873a515991dd68a346504106615c6708abcd60682ada04/userdata/pidfile -n ceph-4548a36b-7cdc-5e3e-a814-4e1571be1fae-mon-compute-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/fffb528e321276c0c3873a515991dd68a346504106615c6708abcd60682ada04 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/fffb528e321276c0c3873a515991dd68a346504106615c6708abcd60682ada04/userdata/oci-log --conmon-pidfile /run/ceph-4548a36b-7cdc-5e3e-a814-4e1571be1fae@mon.compute-0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg fffb528e321276c0c3873a515991dd68a346504106615c6708abcd60682ada04
             │ │ ├─ceph-4548a36b-7cdc-5e3e-a814-4e1571be1fae@osd.0.service
             │ │ │ ├─libpod-payload-409c17664cc02736f3933d20ee5231e4ffe9782f6c64da4adfb864281bfbf962
             │ │ │ │ ├─86142 /run/podman-init -- /usr/bin/ceph-osd -n osd.0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --osd-objectstore=bluestore
             │ │ │ │ └─86144 /usr/bin/ceph-osd -n osd.0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --osd-objectstore=bluestore
             │ │ │ └─runtime
             │ │ │   └─86140 /usr/bin/conmon --api-version 1 -c 409c17664cc02736f3933d20ee5231e4ffe9782f6c64da4adfb864281bfbf962 -u 409c17664cc02736f3933d20ee5231e4ffe9782f6c64da4adfb864281bfbf962 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/409c17664cc02736f3933d20ee5231e4ffe9782f6c64da4adfb864281bfbf962/userdata -p /run/containers/storage/overlay-containers/409c17664cc02736f3933d20ee5231e4ffe9782f6c64da4adfb864281bfbf962/userdata/pidfile -n ceph-4548a36b-7cdc-5e3e-a814-4e1571be1fae-osd-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/409c17664cc02736f3933d20ee5231e4ffe9782f6c64da4adfb864281bfbf962 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/409c17664cc02736f3933d20ee5231e4ffe9782f6c64da4adfb864281bfbf962/userdata/oci-log --conmon-pidfile /run/ceph-4548a36b-7cdc-5e3e-a814-4e1571be1fae@osd.0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 409c17664cc02736f3933d20ee5231e4ffe9782f6c64da4adfb864281bfbf962
             │ │ ├─ceph-4548a36b-7cdc-5e3e-a814-4e1571be1fae@osd.1.service
             │ │ │ ├─libpod-payload-8937a933e50696556fe35cb340d4ec6a1d14744b5d0b97eff32c66fdd41a97e0
             │ │ │ │ ├─87190 /run/podman-init -- /usr/bin/ceph-osd -n osd.1 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --osd-objectstore=bluestore
             │ │ │ │ └─87192 /usr/bin/ceph-osd -n osd.1 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --osd-objectstore=bluestore
             │ │ │ └─runtime
             │ │ │   └─87188 /usr/bin/conmon --api-version 1 -c 8937a933e50696556fe35cb340d4ec6a1d14744b5d0b97eff32c66fdd41a97e0 -u 8937a933e50696556fe35cb340d4ec6a1d14744b5d0b97eff32c66fdd41a97e0 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/8937a933e50696556fe35cb340d4ec6a1d14744b5d0b97eff32c66fdd41a97e0/userdata -p /run/containers/storage/overlay-containers/8937a933e50696556fe35cb340d4ec6a1d14744b5d0b97eff32c66fdd41a97e0/userdata/pidfile -n ceph-4548a36b-7cdc-5e3e-a814-4e1571be1fae-osd-1 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/8937a933e50696556fe35cb340d4ec6a1d14744b5d0b97eff32c66fdd41a97e0 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/8937a933e50696556fe35cb340d4ec6a1d14744b5d0b97eff32c66fdd41a97e0/userdata/oci-log --conmon-pidfile /run/ceph-4548a36b-7cdc-5e3e-a814-4e1571be1fae@osd.1.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 8937a933e50696556fe35cb340d4ec6a1d14744b5d0b97eff32c66fdd41a97e0
             │ │ ├─ceph-4548a36b-7cdc-5e3e-a814-4e1571be1fae@osd.2.service
             │ │ │ ├─libpod-payload-599aa4410c1f164fe63289f8e57a829ab4dd98bf68fe6ed58d4ebf68bc2ecffd
             │ │ │ │ ├─88234 /run/podman-init -- /usr/bin/ceph-osd -n osd.2 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --osd-objectstore=bluestore
             │ │ │ │ └─88236 /usr/bin/ceph-osd -n osd.2 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --osd-objectstore=bluestore
             │ │ │ └─runtime
             │ │ │   └─88232 /usr/bin/conmon --api-version 1 -c 599aa4410c1f164fe63289f8e57a829ab4dd98bf68fe6ed58d4ebf68bc2ecffd -u 599aa4410c1f164fe63289f8e57a829ab4dd98bf68fe6ed58d4ebf68bc2ecffd -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/599aa4410c1f164fe63289f8e57a829ab4dd98bf68fe6ed58d4ebf68bc2ecffd/userdata -p /run/containers/storage/overlay-containers/599aa4410c1f164fe63289f8e57a829ab4dd98bf68fe6ed58d4ebf68bc2ecffd/userdata/pidfile -n ceph-4548a36b-7cdc-5e3e-a814-4e1571be1fae-osd-2 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/599aa4410c1f164fe63289f8e57a829ab4dd98bf68fe6ed58d4ebf68bc2ecffd --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/599aa4410c1f164fe63289f8e57a829ab4dd98bf68fe6ed58d4ebf68bc2ecffd/userdata/oci-log --conmon-pidfile /run/ceph-4548a36b-7cdc-5e3e-a814-4e1571be1fae@osd.2.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 599aa4410c1f164fe63289f8e57a829ab4dd98bf68fe6ed58d4ebf68bc2ecffd
             │ │ └─ceph-4548a36b-7cdc-5e3e-a814-4e1571be1fae@rgw.rgw.compute-0.ctqttb.service
             │ │   ├─libpod-payload-e99f14eeb73d2875924ac314fa66315a78e51bc47ad69dfc1b512fee8f93bf90
             │ │   │ ├─94941 /run/podman-init -- /usr/bin/radosgw -n client.rgw.rgw.compute-0.ctqttb -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --stop-timeout=120
             │ │   │ └─94979 /usr/bin/radosgw -n client.rgw.rgw.compute-0.ctqttb -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --stop-timeout=120
             │ │   └─runtime
             │ │     └─94939 /usr/bin/conmon --api-version 1 -c e99f14eeb73d2875924ac314fa66315a78e51bc47ad69dfc1b512fee8f93bf90 -u e99f14eeb73d2875924ac314fa66315a78e51bc47ad69dfc1b512fee8f93bf90 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/e99f14eeb73d2875924ac314fa66315a78e51bc47ad69dfc1b512fee8f93bf90/userdata -p /run/containers/storage/overlay-containers/e99f14eeb73d2875924ac314fa66315a78e51bc47ad69dfc1b512fee8f93bf90/userdata/pidfile -n ceph-4548a36b-7cdc-5e3e-a814-4e1571be1fae-rgw-rgw-compute-0-ctqttb --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/e99f14eeb73d2875924ac314fa66315a78e51bc47ad69dfc1b512fee8f93bf90 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/e99f14eeb73d2875924ac314fa66315a78e51bc47ad69dfc1b512fee8f93bf90/userdata/oci-log --conmon-pidfile /run/ceph-4548a36b-7cdc-5e3e-a814-4e1571be1fae@rgw.rgw.compute-0.ctqttb.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg e99f14eeb73d2875924ac314fa66315a78e51bc47ad69dfc1b512fee8f93bf90
             │ ├─system-getty.slice
             │ │ └─getty@tty1.service
             │ │   └─1014 /sbin/agetty -o "-p -- \\u" --noclear - linux
             │ ├─system-serial\x2dgetty.slice
             │ │ └─serial-getty@ttyS0.service
             │ │   └─1015 /sbin/agetty -o "-p -- \\u" --keep-baud 115200,57600,38400,9600 - vt220
             │ ├─systemd-hostnamed.service
             │ │ └─291011 /usr/lib/systemd/systemd-hostnamed
             │ ├─systemd-journald.service
             │ │ └─680 /usr/lib/systemd/systemd-journald
             │ ├─systemd-logind.service
             │ │ └─786 /usr/lib/systemd/systemd-logind
             │ ├─systemd-machined.service
             │ │ └─206973 /usr/lib/systemd/systemd-machined
             │ ├─systemd-udevd.service
             │ │ └─udev
             │ │   └─732 /usr/lib/systemd/systemd-udevd
             │ ├─tuned.service
             │ │ └─105965 /usr/bin/python3 -Es /usr/sbin/tuned -l -P
             │ ├─virtlogd.service
             │ │ └─206344 /usr/sbin/virtlogd
             │ ├─virtnodedevd.service
             │ │ └─239207 /usr/sbin/virtnodedevd --timeout 120
             │ ├─virtqemud.service
             │ │ └─238654 /usr/sbin/virtqemud --timeout 120
             │ └─virtsecretd.service
             │   └─245219 /usr/sbin/virtsecretd --timeout 120
             └─user.slice
               ├─user-1000.slice
               │ ├─session-1.scope
               │ │ └─4519 /usr/bin/python3
               │ ├─session-54.scope
               │ │ ├─287512 "sshd-session: zuul [priv]"
               │ │ ├─287564 "sshd-session: zuul@notty"
               │ │ ├─287565 sudo bash -c "rm -rf /var/tmp/sos-osp && mkdir /var/tmp/sos-osp && sos report --batch --all-logs --tmp-dir=/var/tmp/sos-osp  -p container,openstack_edpm,system,storage,virt"
               │ │ ├─287589 /usr/bin/python3 -s /sbin/sos report --batch --all-logs --tmp-dir=/var/tmp/sos-osp -p container,openstack_edpm,system,storage,virt
               │ │ ├─293711 timeout 15s turbostat --debug sleep 10
               │ │ ├─294058 timeout 300s systemctl status --all
               │ │ ├─294059 systemctl status --all
               │ │ ├─294087 timeout 300s ceph osd df tree --format json-pretty
               │ │ ├─294088 /usr/bin/python3 -s /usr/bin/ceph osd df tree --format json-pretty
               │ │ ├─294108 timeout 300s semanage boolean -l
               │ │ └─294109 /usr/bin/python3 -EsI /usr/sbin/semanage boolean -l
               │ └─user@1000.service
               │   ├─app.slice
               │   │ └─dbus-broker.service
               │   │   ├─13969 /usr/bin/dbus-broker-launch --scope user
               │   │   └─13970 dbus-broker --log 4 --controller 9 --machine-id bf0bc0bb03de29b24cba1cc9599cf5d0 --max-bytes 100000000000000 --max-fds 25000000000000 --max-matches 5000000000
               │   ├─init.scope
               │   │ ├─4309 /usr/lib/systemd/systemd --user
               │   │ └─4311 "(sd-pam)"
               │   └─user.slice
               │     └─podman-pause-2465a80e.scope
               │       └─13952 catatonit -P
               └─user-42477.slice
                 ├─session-20.scope
                 │ ├─76636 "sshd-session: ceph-admin [priv]"
                 │ └─76657 "sshd-session: ceph-admin"
                 ├─session-22.scope
                 │ ├─76656 "sshd-session: ceph-admin [priv]"
                 │ └─76660 "sshd-session: ceph-admin@notty"
                 ├─session-23.scope
                 │ ├─76686 "sshd-session: ceph-admin [priv]"
                 │ └─76689 "sshd-session: ceph-admin@notty"
                 ├─session-24.scope
                 │ ├─76715 "sshd-session: ceph-admin [priv]"
                 │ └─76718 "sshd-session: ceph-admin@notty"
                 ├─session-25.scope
                 │ ├─76744 "sshd-session: ceph-admin [priv]"
                 │ └─76747 "sshd-session: ceph-admin@notty"
                 ├─session-26.scope
                 │ ├─76773 "sshd-session: ceph-admin [priv]"
                 │ └─76776 "sshd-session: ceph-admin@notty"
                 ├─session-27.scope
                 │ ├─76802 "sshd-session: ceph-admin [priv]"
                 │ └─76805 "sshd-session: ceph-admin@notty"
                 ├─session-28.scope
                 │ ├─76831 "sshd-session: ceph-admin [priv]"
                 │ └─76834 "sshd-session: ceph-admin@notty"
                 ├─session-29.scope
                 │ ├─76860 "sshd-session: ceph-admin [priv]"
                 │ └─76863 "sshd-session: ceph-admin@notty"
                 ├─session-30.scope
                 │ ├─76889 "sshd-session: ceph-admin [priv]"
                 │ └─76892 "sshd-session: ceph-admin@notty"
                 ├─session-31.scope
                 │ ├─76916 "sshd-session: ceph-admin [priv]"
                 │ └─76919 "sshd-session: ceph-admin@notty"
                 ├─session-32.scope
                 │ ├─76945 "sshd-session: ceph-admin [priv]"
                 │ └─76948 "sshd-session: ceph-admin@notty"
                 └─user@42477.service
                   └─init.scope
                     ├─76640 /usr/lib/systemd/systemd --user
                     └─76642 "(sd-pam)"

Feb 02 12:24:09 compute-0 systemd[1]: var-lib-containers-storage-overlay-5ed0001bc2e2cb2e19179ce2f6bcc066a8d965c602ba81a457e115e8cbd3b0cf-merged.mount: Deactivated successfully.
Feb 02 12:24:09 compute-0 systemd[1]: libpod-conmon-1ba1ee714d09a353f5a65858421b1cae55451ab980161c30b840ddbda2570181.scope: Deactivated successfully.
Feb 02 12:24:10 compute-0 systemd[1]: Started libpod-conmon-9f68c28eb02450c3fcbb3c231eb7d9458d1ce4bca9b4288ef57689472ceb0ddf.scope.
Feb 02 12:24:10 compute-0 systemd[1]: Started libcrun container.
Feb 02 12:24:11 compute-0 systemd[1]: libpod-9f68c28eb02450c3fcbb3c231eb7d9458d1ce4bca9b4288ef57689472ceb0ddf.scope: Deactivated successfully.
Feb 02 12:24:11 compute-0 systemd[1]: libpod-9f68c28eb02450c3fcbb3c231eb7d9458d1ce4bca9b4288ef57689472ceb0ddf.scope: Consumed 1.354s CPU time.
Feb 02 12:24:11 compute-0 systemd[1]: var-lib-containers-storage-overlay-e4cc7b5666d7e0a93bf541eecfb5edb4790b8e5813fc7478a8951e2f7b50513c-merged.mount: Deactivated successfully.
Feb 02 12:24:11 compute-0 systemd[1]: libpod-conmon-9f68c28eb02450c3fcbb3c231eb7d9458d1ce4bca9b4288ef57689472ceb0ddf.scope: Deactivated successfully.
Feb 02 12:24:33 compute-0 systemd[1]: Starting Hostname Service...
Feb 02 12:24:33 compute-0 systemd[1]: Started Hostname Service.

● machine.slice - Virtual Machine and Container Slice
     Loaded: loaded (/usr/lib/systemd/system/machine.slice; static)
     Active: active since Mon 2026-02-02 11:30:45 UTC; 54min ago
      Until: Mon 2026-02-02 11:30:45 UTC; 54min ago
       Docs: man:systemd.special(7)
         IO: 999.7M read, 88.4M written
      Tasks: 44
     Memory: 968.0M (peak: 1.6G)
        CPU: 12min 14.523s
     CGroup: /machine.slice
             ├─libpod-dd42911bf41e885c3ba4077012f09a191888946e3867784418685a91ce34a059.scope
             │ └─container
             │   ├─145058 dumb-init --single-child -- kolla_start
             │   └─145061 /usr/bin/ovn-controller --pidfile unix:/run/openvswitch/db.sock -p /etc/pki/tls/private/ovndb.key -c /etc/pki/tls/certs/ovndb.crt -C /etc/pki/tls/certs/ovndbca.crt
             ├─libpod-e8469079de5f6cf853327e20582fc7412a39dd166f8e8fc6edb7e70c21cf9b07.scope
             │ └─container
             │   ├─238885 dumb-init --single-child -- kolla_start
             │   ├─238887 /usr/bin/python3 /usr/bin/nova-compute
             │   ├─245133 /usr/bin/python3 /bin/privsep-helper --config-file /etc/nova/nova.conf --config-file /etc/nova/nova-compute.conf --config-dir /etc/nova/nova.conf.d --privsep_context vif_plug_ovs.privsep.vif_plug --privsep_sock_path /tmp/tmp8j6j0ldg/privsep.sock
             │   └─249642 /usr/bin/python3 /bin/privsep-helper --config-file /etc/nova/nova.conf --config-file /etc/nova/nova-compute.conf --config-dir /etc/nova/nova.conf.d --privsep_context os_brick.privileged.default --privsep_sock_path /tmp/tmpney6i09_/privsep.sock
             └─libpod-f7a8a7d56deab4622312f47586dba2f4884d78e46b23bfb226b684327aab18c9.scope
               └─container
                 ├─155008 dumb-init --single-child -- kolla_start
                 ├─155011 "neutron-ovn-metadata-agent (/usr/bin/python3 /usr/bin/neutron-ovn-metadata-agent)"
                 ├─155570 "neutron-ovn-metadata-agent (/usr/bin/python3 /usr/bin/neutron-ovn-metadata-agent)"
                 ├─155575 /usr/bin/python3 /bin/privsep-helper --config-file /etc/neutron/neutron.conf --config-dir /etc/neutron.conf.d --privsep_context neutron.privileged.namespace_cmd --privsep_sock_path /tmp/tmpsq_kuak1/privsep.sock
                 ├─245329 /usr/bin/python3 /bin/privsep-helper --config-file /etc/neutron/neutron.conf --config-dir /etc/neutron.conf.d --privsep_context neutron.privileged.default --privsep_sock_path /tmp/tmpk6kanhyo/privsep.sock
                 └─245414 /usr/bin/python3 /bin/privsep-helper --config-file /etc/neutron/neutron.conf --config-dir /etc/neutron.conf.d --privsep_context neutron.privileged.link_cmd --privsep_sock_path /tmp/tmpuet8pyad/privsep.sock

Feb 02 12:24:09 compute-0 zealous_shaw[287983]:                 "ceph.vdo": "0",
Feb 02 12:24:09 compute-0 zealous_shaw[287983]:                 "ceph.with_tpm": "0"
Feb 02 12:24:09 compute-0 zealous_shaw[287983]:             },
Feb 02 12:24:09 compute-0 zealous_shaw[287983]:             "type": "block",
Feb 02 12:24:09 compute-0 zealous_shaw[287983]:             "vg_name": "ceph_vg2"
Feb 02 12:24:09 compute-0 zealous_shaw[287983]:         }
Feb 02 12:24:09 compute-0 zealous_shaw[287983]:     ]
Feb 02 12:24:09 compute-0 zealous_shaw[287983]: }
Feb 02 12:24:09 compute-0 inspiring_lewin[288110]: 167 167
Feb 02 12:24:10 compute-0 modest_banach[288169]: {}

● system-ceph\x2d4548a36b\x2d7cdc\x2d5e3e\x2da814\x2d4e1571be1fae.slice - Slice /system/ceph-4548a36b-7cdc-5e3e-a814-4e1571be1fae
     Loaded: loaded
     Active: active since Mon 2026-02-02 11:30:49 UTC; 54min ago
      Until: Mon 2026-02-02 11:30:49 UTC; 54min ago
         IO: 1.6G read, 25.0G written
      Tasks: 1002
     Memory: 3.3G (peak: 4.5G)
        CPU: 4min 34.123s
     CGroup: /system.slice/system-ceph\x2d4548a36b\x2d7cdc\x2d5e3e\x2da814\x2d4e1571be1fae.slice
             ├─ceph-4548a36b-7cdc-5e3e-a814-4e1571be1fae@crash.compute-0.service
             │ ├─libpod-payload-61b0483497dcb2f7f58b3253d407131b9b241fdd95d0c46b378db53811c0e7e0
             │ │ ├─80410 /run/podman-init -- /usr/bin/ceph-crash -n client.crash.compute-0
             │ │ └─80412 /usr/bin/python3 -s /usr/bin/ceph-crash -n client.crash.compute-0
             │ └─runtime
             │   └─80408 /usr/bin/conmon --api-version 1 -c 61b0483497dcb2f7f58b3253d407131b9b241fdd95d0c46b378db53811c0e7e0 -u 61b0483497dcb2f7f58b3253d407131b9b241fdd95d0c46b378db53811c0e7e0 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/61b0483497dcb2f7f58b3253d407131b9b241fdd95d0c46b378db53811c0e7e0/userdata -p /run/containers/storage/overlay-containers/61b0483497dcb2f7f58b3253d407131b9b241fdd95d0c46b378db53811c0e7e0/userdata/pidfile -n ceph-4548a36b-7cdc-5e3e-a814-4e1571be1fae-crash-compute-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/61b0483497dcb2f7f58b3253d407131b9b241fdd95d0c46b378db53811c0e7e0 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/61b0483497dcb2f7f58b3253d407131b9b241fdd95d0c46b378db53811c0e7e0/userdata/oci-log --conmon-pidfile /run/ceph-4548a36b-7cdc-5e3e-a814-4e1571be1fae@crash.compute-0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 61b0483497dcb2f7f58b3253d407131b9b241fdd95d0c46b378db53811c0e7e0
             ├─ceph-4548a36b-7cdc-5e3e-a814-4e1571be1fae@mds.cephfs.compute-0.mldrue.service
             │ ├─libpod-payload-2fd942fa7e2a914f99986cfceb092b754d16f7807e8ec91f52763c7e36375409
             │ │ ├─95496 /run/podman-init -- /usr/bin/ceph-mds -n mds.cephfs.compute-0.mldrue -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ └─95505 /usr/bin/ceph-mds -n mds.cephfs.compute-0.mldrue -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ └─runtime
             │   └─95494 /usr/bin/conmon --api-version 1 -c 2fd942fa7e2a914f99986cfceb092b754d16f7807e8ec91f52763c7e36375409 -u 2fd942fa7e2a914f99986cfceb092b754d16f7807e8ec91f52763c7e36375409 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/2fd942fa7e2a914f99986cfceb092b754d16f7807e8ec91f52763c7e36375409/userdata -p /run/containers/storage/overlay-containers/2fd942fa7e2a914f99986cfceb092b754d16f7807e8ec91f52763c7e36375409/userdata/pidfile -n ceph-4548a36b-7cdc-5e3e-a814-4e1571be1fae-mds-cephfs-compute-0-mldrue --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/2fd942fa7e2a914f99986cfceb092b754d16f7807e8ec91f52763c7e36375409 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/2fd942fa7e2a914f99986cfceb092b754d16f7807e8ec91f52763c7e36375409/userdata/oci-log --conmon-pidfile /run/ceph-4548a36b-7cdc-5e3e-a814-4e1571be1fae@mds.cephfs.compute-0.mldrue.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 2fd942fa7e2a914f99986cfceb092b754d16f7807e8ec91f52763c7e36375409
             ├─ceph-4548a36b-7cdc-5e3e-a814-4e1571be1fae@mgr.compute-0.twcemg.service
             │ ├─libpod-payload-e108912e9f7d348f2198e623df71406e8b579a2b0383329619c90634f2e480e3
             │ │ ├─75556 /run/podman-init -- /usr/bin/ceph-mgr -n mgr.compute-0.twcemg -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ └─75558 /usr/bin/ceph-mgr -n mgr.compute-0.twcemg -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ └─runtime
             │   └─75554 /usr/bin/conmon --api-version 1 -c e108912e9f7d348f2198e623df71406e8b579a2b0383329619c90634f2e480e3 -u e108912e9f7d348f2198e623df71406e8b579a2b0383329619c90634f2e480e3 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/e108912e9f7d348f2198e623df71406e8b579a2b0383329619c90634f2e480e3/userdata -p /run/containers/storage/overlay-containers/e108912e9f7d348f2198e623df71406e8b579a2b0383329619c90634f2e480e3/userdata/pidfile -n ceph-4548a36b-7cdc-5e3e-a814-4e1571be1fae-mgr-compute-0-twcemg --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/e108912e9f7d348f2198e623df71406e8b579a2b0383329619c90634f2e480e3 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/e108912e9f7d348f2198e623df71406e8b579a2b0383329619c90634f2e480e3/userdata/oci-log --conmon-pidfile /run/ceph-4548a36b-7cdc-5e3e-a814-4e1571be1fae@mgr.compute-0.twcemg.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg e108912e9f7d348f2198e623df71406e8b579a2b0383329619c90634f2e480e3
             ├─ceph-4548a36b-7cdc-5e3e-a814-4e1571be1fae@mon.compute-0.service
             │ ├─libpod-payload-fffb528e321276c0c3873a515991dd68a346504106615c6708abcd60682ada04
             │ │ ├─75269 /run/podman-init -- /usr/bin/ceph-mon -n mon.compute-0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false
             │ │ └─75271 /usr/bin/ceph-mon -n mon.compute-0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false
             │ └─runtime
             │   └─75267 /usr/bin/conmon --api-version 1 -c fffb528e321276c0c3873a515991dd68a346504106615c6708abcd60682ada04 -u fffb528e321276c0c3873a515991dd68a346504106615c6708abcd60682ada04 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/fffb528e321276c0c3873a515991dd68a346504106615c6708abcd60682ada04/userdata -p /run/containers/storage/overlay-containers/fffb528e321276c0c3873a515991dd68a346504106615c6708abcd60682ada04/userdata/pidfile -n ceph-4548a36b-7cdc-5e3e-a814-4e1571be1fae-mon-compute-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/fffb528e321276c0c3873a515991dd68a346504106615c6708abcd60682ada04 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/fffb528e321276c0c3873a515991dd68a346504106615c6708abcd60682ada04/userdata/oci-log --conmon-pidfile /run/ceph-4548a36b-7cdc-5e3e-a814-4e1571be1fae@mon.compute-0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg fffb528e321276c0c3873a515991dd68a346504106615c6708abcd60682ada04
             ├─ceph-4548a36b-7cdc-5e3e-a814-4e1571be1fae@osd.0.service
             │ ├─libpod-payload-409c17664cc02736f3933d20ee5231e4ffe9782f6c64da4adfb864281bfbf962
             │ │ ├─86142 /run/podman-init -- /usr/bin/ceph-osd -n osd.0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --osd-objectstore=bluestore
             │ │ └─86144 /usr/bin/ceph-osd -n osd.0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --osd-objectstore=bluestore
             │ └─runtime
             │   └─86140 /usr/bin/conmon --api-version 1 -c 409c17664cc02736f3933d20ee5231e4ffe9782f6c64da4adfb864281bfbf962 -u 409c17664cc02736f3933d20ee5231e4ffe9782f6c64da4adfb864281bfbf962 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/409c17664cc02736f3933d20ee5231e4ffe9782f6c64da4adfb864281bfbf962/userdata -p /run/containers/storage/overlay-containers/409c17664cc02736f3933d20ee5231e4ffe9782f6c64da4adfb864281bfbf962/userdata/pidfile -n ceph-4548a36b-7cdc-5e3e-a814-4e1571be1fae-osd-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/409c17664cc02736f3933d20ee5231e4ffe9782f6c64da4adfb864281bfbf962 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/409c17664cc02736f3933d20ee5231e4ffe9782f6c64da4adfb864281bfbf962/userdata/oci-log --conmon-pidfile /run/ceph-4548a36b-7cdc-5e3e-a814-4e1571be1fae@osd.0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 409c17664cc02736f3933d20ee5231e4ffe9782f6c64da4adfb864281bfbf962
             ├─ceph-4548a36b-7cdc-5e3e-a814-4e1571be1fae@osd.1.service
             │ ├─libpod-payload-8937a933e50696556fe35cb340d4ec6a1d14744b5d0b97eff32c66fdd41a97e0
             │ │ ├─87190 /run/podman-init -- /usr/bin/ceph-osd -n osd.1 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --osd-objectstore=bluestore
             │ │ └─87192 /usr/bin/ceph-osd -n osd.1 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --osd-objectstore=bluestore
             │ └─runtime
             │   └─87188 /usr/bin/conmon --api-version 1 -c 8937a933e50696556fe35cb340d4ec6a1d14744b5d0b97eff32c66fdd41a97e0 -u 8937a933e50696556fe35cb340d4ec6a1d14744b5d0b97eff32c66fdd41a97e0 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/8937a933e50696556fe35cb340d4ec6a1d14744b5d0b97eff32c66fdd41a97e0/userdata -p /run/containers/storage/overlay-containers/8937a933e50696556fe35cb340d4ec6a1d14744b5d0b97eff32c66fdd41a97e0/userdata/pidfile -n ceph-4548a36b-7cdc-5e3e-a814-4e1571be1fae-osd-1 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/8937a933e50696556fe35cb340d4ec6a1d14744b5d0b97eff32c66fdd41a97e0 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/8937a933e50696556fe35cb340d4ec6a1d14744b5d0b97eff32c66fdd41a97e0/userdata/oci-log --conmon-pidfile /run/ceph-4548a36b-7cdc-5e3e-a814-4e1571be1fae@osd.1.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 8937a933e50696556fe35cb340d4ec6a1d14744b5d0b97eff32c66fdd41a97e0
             ├─ceph-4548a36b-7cdc-5e3e-a814-4e1571be1fae@osd.2.service
             │ ├─libpod-payload-599aa4410c1f164fe63289f8e57a829ab4dd98bf68fe6ed58d4ebf68bc2ecffd
             │ │ ├─88234 /run/podman-init -- /usr/bin/ceph-osd -n osd.2 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --osd-objectstore=bluestore
             │ │ └─88236 /usr/bin/ceph-osd -n osd.2 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --osd-objectstore=bluestore
             │ └─runtime
             │   └─88232 /usr/bin/conmon --api-version 1 -c 599aa4410c1f164fe63289f8e57a829ab4dd98bf68fe6ed58d4ebf68bc2ecffd -u 599aa4410c1f164fe63289f8e57a829ab4dd98bf68fe6ed58d4ebf68bc2ecffd -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/599aa4410c1f164fe63289f8e57a829ab4dd98bf68fe6ed58d4ebf68bc2ecffd/userdata -p /run/containers/storage/overlay-containers/599aa4410c1f164fe63289f8e57a829ab4dd98bf68fe6ed58d4ebf68bc2ecffd/userdata/pidfile -n ceph-4548a36b-7cdc-5e3e-a814-4e1571be1fae-osd-2 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/599aa4410c1f164fe63289f8e57a829ab4dd98bf68fe6ed58d4ebf68bc2ecffd --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/599aa4410c1f164fe63289f8e57a829ab4dd98bf68fe6ed58d4ebf68bc2ecffd/userdata/oci-log --conmon-pidfile /run/ceph-4548a36b-7cdc-5e3e-a814-4e1571be1fae@osd.2.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 599aa4410c1f164fe63289f8e57a829ab4dd98bf68fe6ed58d4ebf68bc2ecffd
             └─ceph-4548a36b-7cdc-5e3e-a814-4e1571be1fae@rgw.rgw.compute-0.ctqttb.service
               ├─libpod-payload-e99f14eeb73d2875924ac314fa66315a78e51bc47ad69dfc1b512fee8f93bf90
               │ ├─94941 /run/podman-init -- /usr/bin/radosgw -n client.rgw.rgw.compute-0.ctqttb -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --stop-timeout=120
               │ └─94979 /usr/bin/radosgw -n client.rgw.rgw.compute-0.ctqttb -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --stop-timeout=120
               └─runtime
                 └─94939 /usr/bin/conmon --api-version 1 -c e99f14eeb73d2875924ac314fa66315a78e51bc47ad69dfc1b512fee8f93bf90 -u e99f14eeb73d2875924ac314fa66315a78e51bc47ad69dfc1b512fee8f93bf90 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/e99f14eeb73d2875924ac314fa66315a78e51bc47ad69dfc1b512fee8f93bf90/userdata -p /run/containers/storage/overlay-containers/e99f14eeb73d2875924ac314fa66315a78e51bc47ad69dfc1b512fee8f93bf90/userdata/pidfile -n ceph-4548a36b-7cdc-5e3e-a814-4e1571be1fae-rgw-rgw-compute-0-ctqttb --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/e99f14eeb73d2875924ac314fa66315a78e51bc47ad69dfc1b512fee8f93bf90 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/e99f14eeb73d2875924ac314fa66315a78e51bc47ad69dfc1b512fee8f93bf90/userdata/oci-log --conmon-pidfile /run/ceph-4548a36b-7cdc-5e3e-a814-4e1571be1fae@rgw.rgw.compute-0.ctqttb.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg e99f14eeb73d2875924ac314fa66315a78e51bc47ad69dfc1b512fee8f93bf90

Feb 02 12:24:50 compute-0 ceph-mon[75271]: from='client.19634 -' entity='client.admin' cmd=[{"prefix": "fs status", "target": ["mon-mgr", ""], "format": "json-pretty"}]: dispatch
Feb 02 12:24:50 compute-0 ceph-mon[75271]: from='client.? 192.168.122.100:0/3703384699' entity='client.admin' cmd={"prefix": "mds stat", "format": "json-pretty"} : dispatch
Feb 02 12:24:50 compute-0 ceph-mon[75271]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "mon dump", "format": "json-pretty"} v 0)
Feb 02 12:24:50 compute-0 ceph-mon[75271]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/462666043' entity='client.admin' cmd={"prefix": "mon dump", "format": "json-pretty"} : dispatch
Feb 02 12:24:50 compute-0 ceph-mgr[75558]: log_channel(audit) log [DBG] : from='client.19640 -' entity='client.admin' cmd=[{"prefix": "osd blocked-by", "target": ["mon-mgr", ""], "format": "json-pretty"}]: dispatch
Feb 02 12:24:51 compute-0 ceph-mgr[75558]: log_channel(cluster) log [DBG] : pgmap v2103: 305 pgs: 305 active+clean; 271 MiB data, 633 MiB used, 59 GiB / 60 GiB avail
Feb 02 12:24:51 compute-0 ceph-mon[75271]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd blocklist ls", "format": "json-pretty"} v 0)
Feb 02 12:24:51 compute-0 ceph-mon[75271]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/3242803391' entity='client.admin' cmd={"prefix": "osd blocklist ls", "format": "json-pretty"} : dispatch
Feb 02 12:24:51 compute-0 ceph-mon[75271]: from='client.? 192.168.122.100:0/462666043' entity='client.admin' cmd={"prefix": "mon dump", "format": "json-pretty"} : dispatch
Feb 02 12:24:51 compute-0 ceph-mon[75271]: from='client.? 192.168.122.100:0/3242803391' entity='client.admin' cmd={"prefix": "osd blocklist ls", "format": "json-pretty"} : dispatch

● system-dbus\x2d:1.1\x2dorg.fedoraproject.SetroubleshootPrivileged.slice - Slice /system/dbus-:1.1-org.fedoraproject.SetroubleshootPrivileged
     Loaded: loaded
     Active: active since Mon 2026-02-02 11:44:35 UTC; 40min ago
      Until: Mon 2026-02-02 11:44:35 UTC; 40min ago
         IO: 4.0K read, 0B written
      Tasks: 0
     Memory: 4.0K (peak: 58.9M)
        CPU: 933ms
     CGroup: /system.slice/system-dbus\x2d:1.1\x2dorg.fedoraproject.SetroubleshootPrivileged.slice

Feb 02 11:44:35 compute-0 systemd[1]: Created slice Slice /system/dbus-:1.1-org.fedoraproject.SetroubleshootPrivileged.

● system-getty.slice - Slice /system/getty
     Loaded: loaded
     Active: active since Mon 2026-02-02 10:52:38 UTC; 1h 32min ago
      Until: Mon 2026-02-02 10:52:38 UTC; 1h 32min ago
         IO: 0B read, 0B written
      Tasks: 1
     Memory: 276.0K (peak: 752.0K)
        CPU: 7ms
     CGroup: /system.slice/system-getty.slice
             └─getty@tty1.service
               └─1014 /sbin/agetty -o "-p -- \\u" --noclear - linux

● system-modprobe.slice - Slice /system/modprobe
     Loaded: loaded
     Active: active since Mon 2026-02-02 10:52:36 UTC; 1h 32min ago
      Until: Mon 2026-02-02 10:52:36 UTC; 1h 32min ago
         IO: 0B read, 0B written
      Tasks: 0
     Memory: 52.0K (peak: 11.3M)
        CPU: 112ms
     CGroup: /system.slice/system-modprobe.slice

Feb 02 10:52:36 localhost systemd[1]: Created slice Slice /system/modprobe.

● system-serial\x2dgetty.slice - Slice /system/serial-getty
     Loaded: loaded
     Active: active since Mon 2026-02-02 10:52:38 UTC; 1h 32min ago
      Until: Mon 2026-02-02 10:52:38 UTC; 1h 32min ago
         IO: 0B read, 0B written
      Tasks: 1
     Memory: 280.0K (peak: 524.0K)
        CPU: 8ms
     CGroup: /system.slice/system-serial\x2dgetty.slice
             └─serial-getty@ttyS0.service
               └─1015 /sbin/agetty -o "-p -- \\u" --keep-baud 115200,57600,38400,9600 - vt220

● system-sshd\x2dkeygen.slice - Slice /system/sshd-keygen
     Loaded: loaded
     Active: active since Mon 2026-02-02 10:52:38 UTC; 1h 32min ago
      Until: Mon 2026-02-02 10:52:38 UTC; 1h 32min ago
         IO: 0B read, 0B written
      Tasks: 0
     Memory: 0B (peak: 0B)
        CPU: 0
     CGroup: /system.slice/system-sshd\x2dkeygen.slice

● system.slice - System Slice
     Loaded: loaded
     Active: active since Mon 2026-02-02 10:52:35 UTC; 1h 32min ago
      Until: Mon 2026-02-02 10:52:35 UTC; 1h 32min ago
       Docs: man:systemd.special(7)
         IO: 1.9G read, 25.4G written
      Tasks: 1133
     Memory: 4.1G (peak: 5.1G)
        CPU: 9min 42.022s
     CGroup: /system.slice
             ├─NetworkManager.service
             │ └─49093 /usr/sbin/NetworkManager --no-daemon
             ├─auditd.service
             │ ├─704 /sbin/auditd
             │ └─706 /usr/sbin/sedispatch
             ├─chronyd.service
             │ └─58660 /usr/sbin/chronyd -F 2
             ├─crond.service
             │ ├─1013 /usr/sbin/crond -n
             │ └─7490 /usr/sbin/anacron -s
             ├─dbus-broker.service
             │ ├─768 /usr/bin/dbus-broker-launch --scope system --audit
             │ └─775 dbus-broker --log 4 --controller 9 --machine-id bf0bc0bb03de29b24cba1cc9599cf5d0 --max-bytes 536870912 --max-fds 4096 --max-matches 131072 --audit
             ├─edpm_nova_compute.service
             │ └─238883 /usr/bin/conmon --api-version 1 -c e8469079de5f6cf853327e20582fc7412a39dd166f8e8fc6edb7e70c21cf9b07 -u e8469079de5f6cf853327e20582fc7412a39dd166f8e8fc6edb7e70c21cf9b07 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/e8469079de5f6cf853327e20582fc7412a39dd166f8e8fc6edb7e70c21cf9b07/userdata -p /run/containers/storage/overlay-containers/e8469079de5f6cf853327e20582fc7412a39dd166f8e8fc6edb7e70c21cf9b07/userdata/pidfile -n nova_compute --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/e8469079de5f6cf853327e20582fc7412a39dd166f8e8fc6edb7e70c21cf9b07 --full-attach -s -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/e8469079de5f6cf853327e20582fc7412a39dd166f8e8fc6edb7e70c21cf9b07/userdata/oci-log --conmon-pidfile /run/nova_compute.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg e8469079de5f6cf853327e20582fc7412a39dd166f8e8fc6edb7e70c21cf9b07
             ├─edpm_ovn_controller.service
             │ └─145056 /usr/bin/conmon --api-version 1 -c dd42911bf41e885c3ba4077012f09a191888946e3867784418685a91ce34a059 -u dd42911bf41e885c3ba4077012f09a191888946e3867784418685a91ce34a059 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/dd42911bf41e885c3ba4077012f09a191888946e3867784418685a91ce34a059/userdata -p /run/containers/storage/overlay-containers/dd42911bf41e885c3ba4077012f09a191888946e3867784418685a91ce34a059/userdata/pidfile -n ovn_controller --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/dd42911bf41e885c3ba4077012f09a191888946e3867784418685a91ce34a059 --full-attach -s -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/dd42911bf41e885c3ba4077012f09a191888946e3867784418685a91ce34a059/userdata/oci-log --conmon-pidfile /run/ovn_controller.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg dd42911bf41e885c3ba4077012f09a191888946e3867784418685a91ce34a059
             ├─edpm_ovn_metadata_agent.service
             │ └─155006 /usr/bin/conmon --api-version 1 -c f7a8a7d56deab4622312f47586dba2f4884d78e46b23bfb226b684327aab18c9 -u f7a8a7d56deab4622312f47586dba2f4884d78e46b23bfb226b684327aab18c9 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/f7a8a7d56deab4622312f47586dba2f4884d78e46b23bfb226b684327aab18c9/userdata -p /run/containers/storage/overlay-containers/f7a8a7d56deab4622312f47586dba2f4884d78e46b23bfb226b684327aab18c9/userdata/pidfile -n ovn_metadata_agent --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/f7a8a7d56deab4622312f47586dba2f4884d78e46b23bfb226b684327aab18c9 --full-attach -s -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/f7a8a7d56deab4622312f47586dba2f4884d78e46b23bfb226b684327aab18c9/userdata/oci-log --conmon-pidfile /run/ovn_metadata_agent.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg f7a8a7d56deab4622312f47586dba2f4884d78e46b23bfb226b684327aab18c9
             ├─gssproxy.service
             │ └─876 /usr/sbin/gssproxy -D
             ├─irqbalance.service
             │ └─782 /usr/sbin/irqbalance
             ├─iscsid.service
             │ └─223533 /usr/sbin/iscsid -f
             ├─multipathd.service
             │ └─223815 /sbin/multipathd -d -s
             ├─ovs-vswitchd.service
             │ └─47398 ovs-vswitchd unix:/var/run/openvswitch/db.sock -vconsole:emer -vsyslog:err -vfile:info --mlockall --user openvswitch:hugetlbfs --no-chdir --log-file=/var/log/openvswitch/ovs-vswitchd.log --pidfile=/var/run/openvswitch/ovs-vswitchd.pid --detach
             ├─ovsdb-server.service
             │ └─47317 ovsdb-server /etc/openvswitch/conf.db -vconsole:emer -vsyslog:err -vfile:info --remote=punix:/var/run/openvswitch/db.sock --private-key=db:Open_vSwitch,SSL,private_key --certificate=db:Open_vSwitch,SSL,certificate --bootstrap-ca-cert=db:Open_vSwitch,SSL,ca_cert --user openvswitch:hugetlbfs --no-chdir --log-file=/var/log/openvswitch/ovsdb-server.log --pidfile=/var/run/openvswitch/ovsdb-server.pid --detach
             ├─polkit.service
             │ └─43582 /usr/lib/polkit-1/polkitd --no-debug
             ├─rpcbind.service
             │ └─702 /usr/bin/rpcbind -w -f
             ├─rsyslog.service
             │ └─1009 /usr/sbin/rsyslogd -n
             ├─sshd.service
             │ └─181254 "sshd: /usr/sbin/sshd -D [listener] 0 of 10-100 startups"
             ├─system-ceph\x2d4548a36b\x2d7cdc\x2d5e3e\x2da814\x2d4e1571be1fae.slice
             │ ├─ceph-4548a36b-7cdc-5e3e-a814-4e1571be1fae@crash.compute-0.service
             │ │ ├─libpod-payload-61b0483497dcb2f7f58b3253d407131b9b241fdd95d0c46b378db53811c0e7e0
             │ │ │ ├─80410 /run/podman-init -- /usr/bin/ceph-crash -n client.crash.compute-0
             │ │ │ └─80412 /usr/bin/python3 -s /usr/bin/ceph-crash -n client.crash.compute-0
             │ │ └─runtime
             │ │   └─80408 /usr/bin/conmon --api-version 1 -c 61b0483497dcb2f7f58b3253d407131b9b241fdd95d0c46b378db53811c0e7e0 -u 61b0483497dcb2f7f58b3253d407131b9b241fdd95d0c46b378db53811c0e7e0 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/61b0483497dcb2f7f58b3253d407131b9b241fdd95d0c46b378db53811c0e7e0/userdata -p /run/containers/storage/overlay-containers/61b0483497dcb2f7f58b3253d407131b9b241fdd95d0c46b378db53811c0e7e0/userdata/pidfile -n ceph-4548a36b-7cdc-5e3e-a814-4e1571be1fae-crash-compute-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/61b0483497dcb2f7f58b3253d407131b9b241fdd95d0c46b378db53811c0e7e0 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/61b0483497dcb2f7f58b3253d407131b9b241fdd95d0c46b378db53811c0e7e0/userdata/oci-log --conmon-pidfile /run/ceph-4548a36b-7cdc-5e3e-a814-4e1571be1fae@crash.compute-0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 61b0483497dcb2f7f58b3253d407131b9b241fdd95d0c46b378db53811c0e7e0
             │ ├─ceph-4548a36b-7cdc-5e3e-a814-4e1571be1fae@mds.cephfs.compute-0.mldrue.service
             │ │ ├─libpod-payload-2fd942fa7e2a914f99986cfceb092b754d16f7807e8ec91f52763c7e36375409
             │ │ │ ├─95496 /run/podman-init -- /usr/bin/ceph-mds -n mds.cephfs.compute-0.mldrue -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ │ └─95505 /usr/bin/ceph-mds -n mds.cephfs.compute-0.mldrue -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ └─runtime
             │ │   └─95494 /usr/bin/conmon --api-version 1 -c 2fd942fa7e2a914f99986cfceb092b754d16f7807e8ec91f52763c7e36375409 -u 2fd942fa7e2a914f99986cfceb092b754d16f7807e8ec91f52763c7e36375409 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/2fd942fa7e2a914f99986cfceb092b754d16f7807e8ec91f52763c7e36375409/userdata -p /run/containers/storage/overlay-containers/2fd942fa7e2a914f99986cfceb092b754d16f7807e8ec91f52763c7e36375409/userdata/pidfile -n ceph-4548a36b-7cdc-5e3e-a814-4e1571be1fae-mds-cephfs-compute-0-mldrue --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/2fd942fa7e2a914f99986cfceb092b754d16f7807e8ec91f52763c7e36375409 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/2fd942fa7e2a914f99986cfceb092b754d16f7807e8ec91f52763c7e36375409/userdata/oci-log --conmon-pidfile /run/ceph-4548a36b-7cdc-5e3e-a814-4e1571be1fae@mds.cephfs.compute-0.mldrue.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 2fd942fa7e2a914f99986cfceb092b754d16f7807e8ec91f52763c7e36375409
             │ ├─ceph-4548a36b-7cdc-5e3e-a814-4e1571be1fae@mgr.compute-0.twcemg.service
             │ │ ├─libpod-payload-e108912e9f7d348f2198e623df71406e8b579a2b0383329619c90634f2e480e3
             │ │ │ ├─75556 /run/podman-init -- /usr/bin/ceph-mgr -n mgr.compute-0.twcemg -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ │ └─75558 /usr/bin/ceph-mgr -n mgr.compute-0.twcemg -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ └─runtime
             │ │   └─75554 /usr/bin/conmon --api-version 1 -c e108912e9f7d348f2198e623df71406e8b579a2b0383329619c90634f2e480e3 -u e108912e9f7d348f2198e623df71406e8b579a2b0383329619c90634f2e480e3 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/e108912e9f7d348f2198e623df71406e8b579a2b0383329619c90634f2e480e3/userdata -p /run/containers/storage/overlay-containers/e108912e9f7d348f2198e623df71406e8b579a2b0383329619c90634f2e480e3/userdata/pidfile -n ceph-4548a36b-7cdc-5e3e-a814-4e1571be1fae-mgr-compute-0-twcemg --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/e108912e9f7d348f2198e623df71406e8b579a2b0383329619c90634f2e480e3 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/e108912e9f7d348f2198e623df71406e8b579a2b0383329619c90634f2e480e3/userdata/oci-log --conmon-pidfile /run/ceph-4548a36b-7cdc-5e3e-a814-4e1571be1fae@mgr.compute-0.twcemg.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg e108912e9f7d348f2198e623df71406e8b579a2b0383329619c90634f2e480e3
             │ ├─ceph-4548a36b-7cdc-5e3e-a814-4e1571be1fae@mon.compute-0.service
             │ │ ├─libpod-payload-fffb528e321276c0c3873a515991dd68a346504106615c6708abcd60682ada04
             │ │ │ ├─75269 /run/podman-init -- /usr/bin/ceph-mon -n mon.compute-0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false
             │ │ │ └─75271 /usr/bin/ceph-mon -n mon.compute-0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false
             │ │ └─runtime
             │ │   └─75267 /usr/bin/conmon --api-version 1 -c fffb528e321276c0c3873a515991dd68a346504106615c6708abcd60682ada04 -u fffb528e321276c0c3873a515991dd68a346504106615c6708abcd60682ada04 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/fffb528e321276c0c3873a515991dd68a346504106615c6708abcd60682ada04/userdata -p /run/containers/storage/overlay-containers/fffb528e321276c0c3873a515991dd68a346504106615c6708abcd60682ada04/userdata/pidfile -n ceph-4548a36b-7cdc-5e3e-a814-4e1571be1fae-mon-compute-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/fffb528e321276c0c3873a515991dd68a346504106615c6708abcd60682ada04 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/fffb528e321276c0c3873a515991dd68a346504106615c6708abcd60682ada04/userdata/oci-log --conmon-pidfile /run/ceph-4548a36b-7cdc-5e3e-a814-4e1571be1fae@mon.compute-0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg fffb528e321276c0c3873a515991dd68a346504106615c6708abcd60682ada04
             │ ├─ceph-4548a36b-7cdc-5e3e-a814-4e1571be1fae@osd.0.service
             │ │ ├─libpod-payload-409c17664cc02736f3933d20ee5231e4ffe9782f6c64da4adfb864281bfbf962
             │ │ │ ├─86142 /run/podman-init -- /usr/bin/ceph-osd -n osd.0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --osd-objectstore=bluestore
             │ │ │ └─86144 /usr/bin/ceph-osd -n osd.0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --osd-objectstore=bluestore
             │ │ └─runtime
             │ │   └─86140 /usr/bin/conmon --api-version 1 -c 409c17664cc02736f3933d20ee5231e4ffe9782f6c64da4adfb864281bfbf962 -u 409c17664cc02736f3933d20ee5231e4ffe9782f6c64da4adfb864281bfbf962 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/409c17664cc02736f3933d20ee5231e4ffe9782f6c64da4adfb864281bfbf962/userdata -p /run/containers/storage/overlay-containers/409c17664cc02736f3933d20ee5231e4ffe9782f6c64da4adfb864281bfbf962/userdata/pidfile -n ceph-4548a36b-7cdc-5e3e-a814-4e1571be1fae-osd-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/409c17664cc02736f3933d20ee5231e4ffe9782f6c64da4adfb864281bfbf962 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/409c17664cc02736f3933d20ee5231e4ffe9782f6c64da4adfb864281bfbf962/userdata/oci-log --conmon-pidfile /run/ceph-4548a36b-7cdc-5e3e-a814-4e1571be1fae@osd.0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 409c17664cc02736f3933d20ee5231e4ffe9782f6c64da4adfb864281bfbf962
             │ ├─ceph-4548a36b-7cdc-5e3e-a814-4e1571be1fae@osd.1.service
             │ │ ├─libpod-payload-8937a933e50696556fe35cb340d4ec6a1d14744b5d0b97eff32c66fdd41a97e0
             │ │ │ ├─87190 /run/podman-init -- /usr/bin/ceph-osd -n osd.1 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --osd-objectstore=bluestore
             │ │ │ └─87192 /usr/bin/ceph-osd -n osd.1 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --osd-objectstore=bluestore
             │ │ └─runtime
             │ │   └─87188 /usr/bin/conmon --api-version 1 -c 8937a933e50696556fe35cb340d4ec6a1d14744b5d0b97eff32c66fdd41a97e0 -u 8937a933e50696556fe35cb340d4ec6a1d14744b5d0b97eff32c66fdd41a97e0 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/8937a933e50696556fe35cb340d4ec6a1d14744b5d0b97eff32c66fdd41a97e0/userdata -p /run/containers/storage/overlay-containers/8937a933e50696556fe35cb340d4ec6a1d14744b5d0b97eff32c66fdd41a97e0/userdata/pidfile -n ceph-4548a36b-7cdc-5e3e-a814-4e1571be1fae-osd-1 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/8937a933e50696556fe35cb340d4ec6a1d14744b5d0b97eff32c66fdd41a97e0 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/8937a933e50696556fe35cb340d4ec6a1d14744b5d0b97eff32c66fdd41a97e0/userdata/oci-log --conmon-pidfile /run/ceph-4548a36b-7cdc-5e3e-a814-4e1571be1fae@osd.1.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 8937a933e50696556fe35cb340d4ec6a1d14744b5d0b97eff32c66fdd41a97e0
             │ ├─ceph-4548a36b-7cdc-5e3e-a814-4e1571be1fae@osd.2.service
             │ │ ├─libpod-payload-599aa4410c1f164fe63289f8e57a829ab4dd98bf68fe6ed58d4ebf68bc2ecffd
             │ │ │ ├─88234 /run/podman-init -- /usr/bin/ceph-osd -n osd.2 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --osd-objectstore=bluestore
             │ │ │ └─88236 /usr/bin/ceph-osd -n osd.2 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --osd-objectstore=bluestore
             │ │ └─runtime
             │ │   └─88232 /usr/bin/conmon --api-version 1 -c 599aa4410c1f164fe63289f8e57a829ab4dd98bf68fe6ed58d4ebf68bc2ecffd -u 599aa4410c1f164fe63289f8e57a829ab4dd98bf68fe6ed58d4ebf68bc2ecffd -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/599aa4410c1f164fe63289f8e57a829ab4dd98bf68fe6ed58d4ebf68bc2ecffd/userdata -p /run/containers/storage/overlay-containers/599aa4410c1f164fe63289f8e57a829ab4dd98bf68fe6ed58d4ebf68bc2ecffd/userdata/pidfile -n ceph-4548a36b-7cdc-5e3e-a814-4e1571be1fae-osd-2 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/599aa4410c1f164fe63289f8e57a829ab4dd98bf68fe6ed58d4ebf68bc2ecffd --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/599aa4410c1f164fe63289f8e57a829ab4dd98bf68fe6ed58d4ebf68bc2ecffd/userdata/oci-log --conmon-pidfile /run/ceph-4548a36b-7cdc-5e3e-a814-4e1571be1fae@osd.2.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 599aa4410c1f164fe63289f8e57a829ab4dd98bf68fe6ed58d4ebf68bc2ecffd
             │ └─ceph-4548a36b-7cdc-5e3e-a814-4e1571be1fae@rgw.rgw.compute-0.ctqttb.service
             │   ├─libpod-payload-e99f14eeb73d2875924ac314fa66315a78e51bc47ad69dfc1b512fee8f93bf90
             │   │ ├─94941 /run/podman-init -- /usr/bin/radosgw -n client.rgw.rgw.compute-0.ctqttb -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --stop-timeout=120
             │   │ └─94979 /usr/bin/radosgw -n client.rgw.rgw.compute-0.ctqttb -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --stop-timeout=120
             │   └─runtime
             │     └─94939 /usr/bin/conmon --api-version 1 -c e99f14eeb73d2875924ac314fa66315a78e51bc47ad69dfc1b512fee8f93bf90 -u e99f14eeb73d2875924ac314fa66315a78e51bc47ad69dfc1b512fee8f93bf90 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/e99f14eeb73d2875924ac314fa66315a78e51bc47ad69dfc1b512fee8f93bf90/userdata -p /run/containers/storage/overlay-containers/e99f14eeb73d2875924ac314fa66315a78e51bc47ad69dfc1b512fee8f93bf90/userdata/pidfile -n ceph-4548a36b-7cdc-5e3e-a814-4e1571be1fae-rgw-rgw-compute-0-ctqttb --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/e99f14eeb73d2875924ac314fa66315a78e51bc47ad69dfc1b512fee8f93bf90 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/e99f14eeb73d2875924ac314fa66315a78e51bc47ad69dfc1b512fee8f93bf90/userdata/oci-log --conmon-pidfile /run/ceph-4548a36b-7cdc-5e3e-a814-4e1571be1fae@rgw.rgw.compute-0.ctqttb.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg e99f14eeb73d2875924ac314fa66315a78e51bc47ad69dfc1b512fee8f93bf90
             ├─system-getty.slice
             │ └─getty@tty1.service
             │   └─1014 /sbin/agetty -o "-p -- \\u" --noclear - linux
             ├─system-serial\x2dgetty.slice
             │ └─serial-getty@ttyS0.service
             │   └─1015 /sbin/agetty -o "-p -- \\u" --keep-baud 115200,57600,38400,9600 - vt220
             ├─systemd-hostnamed.service
             │ └─291011 /usr/lib/systemd/systemd-hostnamed
             ├─systemd-journald.service
             │ └─680 /usr/lib/systemd/systemd-journald
             ├─systemd-logind.service
             │ └─786 /usr/lib/systemd/systemd-logind
             ├─systemd-machined.service
             │ └─206973 /usr/lib/systemd/systemd-machined
             ├─systemd-udevd.service
             │ └─udev
             │   └─732 /usr/lib/systemd/systemd-udevd
             ├─tuned.service
             │ └─105965 /usr/bin/python3 -Es /usr/sbin/tuned -l -P
             ├─virtlogd.service
             │ └─206344 /usr/sbin/virtlogd
             ├─virtnodedevd.service
             │ └─239207 /usr/sbin/virtnodedevd --timeout 120
             ├─virtqemud.service
             │ └─238654 /usr/sbin/virtqemud --timeout 120
             └─virtsecretd.service
               └─245219 /usr/sbin/virtsecretd --timeout 120

Feb 02 12:24:44 compute-0 nova_compute[238883]: 2026-02-02 12:24:44.520 238887 DEBUG oslo_concurrency.processutils [None req-2db00bf8-a3a7-4a5c-8821-4d324af3ae5a - - - - - -] Running cmd (subprocess): ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384[00m
Feb 02 12:24:45 compute-0 podman[293549]: 2026-02-02 12:24:45.04804548 +0000 UTC m=+0.055500851 container health_status f7a8a7d56deab4622312f47586dba2f4884d78e46b23bfb226b684327aab18c9 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=ovn_metadata_agent, health_status=healthy, health_failing_streak=0, health_log=, config_id=ovn_metadata_agent, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, tcib_build_tag=b85d0548925081ae8c6bdd697658cec4, container_name=ovn_metadata_agent, managed_by=edpm_ansible, org.label-schema.license=GPLv2, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20260127, tcib_managed=true, config_data={'cgroupns': 'host', 'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'EDPM_CONFIG_HASH': 'f753062280449c359ff4c2dce751de4cb0e8717503110c3ea49626eae4ec2b5b-af67468a64521187a3d19b349040e64b8ebe04ca093912f980732f2b3e0883e2-af67468a64521187a3d19b349040e64b8ebe04ca093912f980732f2b3e0883e2-af67468a64521187a3d19b349040e64b8ebe04ca093912f980732f2b3e0883e2-0823bd3e096c75f72e4a95820d41b0d4b6a1172bd2892ddb9f29b788a11bc87d'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_metadata_agent', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified', 'net': 'host', 'pid': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/run/openvswitch:/run/openvswitch:z', '/var/lib/openstack/neutron-ovn-metadata-agent:/etc/neutron.conf.d:z', '/run/netns:/run/netns:shared', '/var/lib/kolla/config_files/ovn_metadata_agent.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/neutron:/var/lib/neutron:shared,z', '/var/lib/neutron/ovn_metadata_haproxy_wrapper:/usr/local/bin/haproxy:ro', '/var/lib/neutron/kill_scripts:/etc/neutron/kill_scripts:ro', '/var/lib/openstack/cacerts/neutron-metadata/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_metadata_agent:/openstack:ro,z']})
Feb 02 12:24:45 compute-0 nova_compute[238883]: 2026-02-02 12:24:45.064 238887 DEBUG oslo_concurrency.processutils [None req-2db00bf8-a3a7-4a5c-8821-4d324af3ae5a - - - - - -] CMD "ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.544s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422[00m
Feb 02 12:24:45 compute-0 nova_compute[238883]: 2026-02-02 12:24:45.072 238887 DEBUG nova.compute.provider_tree [None req-2db00bf8-a3a7-4a5c-8821-4d324af3ae5a - - - - - -] Inventory has not changed in ProviderTree for provider: 30401227-b88f-415d-9c2d-3119bd1baf61 update_inventory /usr/lib/python3.9/site-packages/nova/compute/provider_tree.py:180[00m
Feb 02 12:24:45 compute-0 podman[293547]: 2026-02-02 12:24:45.080522176 +0000 UTC m=+0.085193833 container health_status dd42911bf41e885c3ba4077012f09a191888946e3867784418685a91ce34a059 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.name=CentOS Stream 9 Base Image, container_name=ovn_controller, org.label-schema.license=GPLv2, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'EDPM_CONFIG_HASH': 'f753062280449c359ff4c2dce751de4cb0e8717503110c3ea49626eae4ec2b5b-af67468a64521187a3d19b349040e64b8ebe04ca093912f980732f2b3e0883e2-af67468a64521187a3d19b349040e64b8ebe04ca093912f980732f2b3e0883e2-af67468a64521187a3d19b349040e64b8ebe04ca093912f980732f2b3e0883e2'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, org.label-schema.vendor=CentOS, tcib_build_tag=b85d0548925081ae8c6bdd697658cec4, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20260127, org.label-schema.schema-version=1.0, tcib_managed=true, config_id=ovn_controller, io.buildah.version=1.41.3, managed_by=edpm_ansible)
Feb 02 12:24:45 compute-0 nova_compute[238883]: 2026-02-02 12:24:45.094 238887 DEBUG nova.scheduler.client.report [None req-2db00bf8-a3a7-4a5c-8821-4d324af3ae5a - - - - - -] Inventory has not changed for provider 30401227-b88f-415d-9c2d-3119bd1baf61 based on inventory data: {'VCPU': {'total': 8, 'reserved': 0, 'min_unit': 1, 'max_unit': 8, 'step_size': 1, 'allocation_ratio': 4.0}, 'MEMORY_MB': {'total': 7679, 'reserved': 512, 'min_unit': 1, 'max_unit': 7679, 'step_size': 1, 'allocation_ratio': 1.0}, 'DISK_GB': {'total': 59, 'reserved': 1, 'min_unit': 1, 'max_unit': 59, 'step_size': 1, 'allocation_ratio': 0.9}} set_inventory_for_provider /usr/lib/python3.9/site-packages/nova/scheduler/client/report.py:940[00m
Feb 02 12:24:45 compute-0 nova_compute[238883]: 2026-02-02 12:24:45.096 238887 DEBUG nova.compute.resource_tracker [None req-2db00bf8-a3a7-4a5c-8821-4d324af3ae5a - - - - - -] Compute_service record updated for compute-0.ctlplane.example.com:compute-0.ctlplane.example.com _update_available_resource /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:995[00m
Feb 02 12:24:45 compute-0 nova_compute[238883]: 2026-02-02 12:24:45.097 238887 DEBUG oslo_concurrency.lockutils [None req-2db00bf8-a3a7-4a5c-8821-4d324af3ae5a - - - - - -] Lock "compute_resources" "released" by "nova.compute.resource_tracker.ResourceTracker._update_available_resource" :: held 0.686s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423[00m
Feb 02 12:24:48 compute-0 nova_compute[238883]: 2026-02-02 12:24:48.036 238887 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 25 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263[00m
Feb 02 12:24:48 compute-0 nova_compute[238883]: 2026-02-02 12:24:48.388 238887 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 25 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263[00m

● user-1000.slice - User Slice of UID 1000
     Loaded: loaded
    Drop-In: /usr/lib/systemd/system/user-.slice.d
             └─10-defaults.conf
     Active: active since Mon 2026-02-02 10:52:58 UTC; 1h 31min ago
      Until: Mon 2026-02-02 10:52:58 UTC; 1h 31min ago
       Docs: man:user@.service(5)
         IO: 828.8M read, 8.7G written
      Tasks: 37 (limit: 20031)
     Memory: 1.5G (peak: 4.2G)
        CPU: 22min 9.274s
     CGroup: /user.slice/user-1000.slice
             ├─session-1.scope
             │ └─4519 /usr/bin/python3
             ├─session-54.scope
             │ ├─287512 "sshd-session: zuul [priv]"
             │ ├─287564 "sshd-session: zuul@notty"
             │ ├─287565 sudo bash -c "rm -rf /var/tmp/sos-osp && mkdir /var/tmp/sos-osp && sos report --batch --all-logs --tmp-dir=/var/tmp/sos-osp  -p container,openstack_edpm,system,storage,virt"
             │ ├─287589 /usr/bin/python3 -s /sbin/sos report --batch --all-logs --tmp-dir=/var/tmp/sos-osp -p container,openstack_edpm,system,storage,virt
             │ ├─293711 timeout 15s turbostat --debug sleep 10
             │ ├─294058 timeout 300s systemctl status --all
             │ ├─294059 systemctl status --all
             │ ├─294087 timeout 300s ceph osd df tree --format json-pretty
             │ ├─294088 /usr/bin/python3 -s /usr/bin/ceph osd df tree --format json-pretty
             │ ├─294108 timeout 300s semanage boolean -l
             │ └─294109 /usr/bin/python3 -EsI /usr/sbin/semanage boolean -l
             └─user@1000.service
               ├─app.slice
               │ └─dbus-broker.service
               │   ├─13969 /usr/bin/dbus-broker-launch --scope user
               │   └─13970 dbus-broker --log 4 --controller 9 --machine-id bf0bc0bb03de29b24cba1cc9599cf5d0 --max-bytes 100000000000000 --max-fds 25000000000000 --max-matches 5000000000
               ├─init.scope
               │ ├─4309 /usr/lib/systemd/systemd --user
               │ └─4311 "(sd-pam)"
               └─user.slice
                 └─podman-pause-2465a80e.scope
                   └─13952 catatonit -P

Feb 02 12:16:02 compute-0 sudo[281843]:     zuul : PWD=/home/zuul ; USER=root ; COMMAND=/bin/rm -rf /var/tmp/sos-osp
Feb 02 12:16:02 compute-0 sudo[281843]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Feb 02 12:16:02 compute-0 sudo[281843]: pam_unix(sudo:session): session closed for user root
Feb 02 12:16:02 compute-0 sshd-session[281842]: Received disconnect from 192.168.122.10 port 35328:11: disconnected by user
Feb 02 12:16:02 compute-0 sshd-session[281842]: Disconnected from user zuul 192.168.122.10 port 35328
Feb 02 12:24:06 compute-0 sudo[287565]:     zuul : PWD=/home/zuul ; USER=root ; COMMAND=/bin/bash -c 'rm -rf /var/tmp/sos-osp && mkdir /var/tmp/sos-osp && sos report --batch --all-logs --tmp-dir=/var/tmp/sos-osp  -p container,openstack_edpm,system,storage,virt'
Feb 02 12:24:06 compute-0 sudo[287565]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Feb 02 12:24:12 compute-0 ovs-vsctl[288324]: ovs|00001|db_ctl_base|ERR|no key "dpdk-init" in Open_vSwitch record "." column other_config
Feb 02 12:24:43 compute-0 ovs-appctl[292799]: ovs|00001|daemon_unix|WARN|/var/run/openvswitch/ovs-monitor-ipsec.pid: open: No such file or directory
Feb 02 12:24:43 compute-0 ovs-appctl[292804]: ovs|00001|daemon_unix|WARN|/var/run/openvswitch/ovs-monitor-ipsec.pid: open: No such file or directory

● user-42477.slice - User Slice of UID 42477
     Loaded: loaded
    Drop-In: /usr/lib/systemd/system/user-.slice.d
             └─10-defaults.conf
     Active: active since Mon 2026-02-02 11:31:15 UTC; 53min ago
      Until: Mon 2026-02-02 11:31:15 UTC; 53min ago
       Docs: man:user@.service(5)
         IO: 1.8M read, 177.1M written
      Tasks: 26 (limit: 20031)
     Memory: 28.7M (peak: 77.2M)
        CPU: 3min 10.603s
     CGroup: /user.slice/user-42477.slice
             ├─session-20.scope
             │ ├─76636 "sshd-session: ceph-admin [priv]"
             │ └─76657 "sshd-session: ceph-admin"
             ├─session-22.scope
             │ ├─76656 "sshd-session: ceph-admin [priv]"
             │ └─76660 "sshd-session: ceph-admin@notty"
             ├─session-23.scope
             │ ├─76686 "sshd-session: ceph-admin [priv]"
             │ └─76689 "sshd-session: ceph-admin@notty"
             ├─session-24.scope
             │ ├─76715 "sshd-session: ceph-admin [priv]"
             │ └─76718 "sshd-session: ceph-admin@notty"
             ├─session-25.scope
             │ ├─76744 "sshd-session: ceph-admin [priv]"
             │ └─76747 "sshd-session: ceph-admin@notty"
             ├─session-26.scope
             │ ├─76773 "sshd-session: ceph-admin [priv]"
             │ └─76776 "sshd-session: ceph-admin@notty"
             ├─session-27.scope
             │ ├─76802 "sshd-session: ceph-admin [priv]"
             │ └─76805 "sshd-session: ceph-admin@notty"
             ├─session-28.scope
             │ ├─76831 "sshd-session: ceph-admin [priv]"
             │ └─76834 "sshd-session: ceph-admin@notty"
             ├─session-29.scope
             │ ├─76860 "sshd-session: ceph-admin [priv]"
             │ └─76863 "sshd-session: ceph-admin@notty"
             ├─session-30.scope
             │ ├─76889 "sshd-session: ceph-admin [priv]"
             │ └─76892 "sshd-session: ceph-admin@notty"
             ├─session-31.scope
             │ ├─76916 "sshd-session: ceph-admin [priv]"
             │ └─76919 "sshd-session: ceph-admin@notty"
             ├─session-32.scope
             │ ├─76945 "sshd-session: ceph-admin [priv]"
             │ └─76948 "sshd-session: ceph-admin@notty"
             └─user@42477.service
               └─init.scope
                 ├─76640 /usr/lib/systemd/systemd --user
                 └─76642 "(sd-pam)"

Feb 02 12:24:10 compute-0 podman[288152]: 2026-02-02 12:24:10.01373312 +0000 UTC m=+0.023877447 image pull 524f3da276461682bec27427fb8a63b5139c40ad4185939aede197474a6817b3 quay.io/ceph/ceph@sha256:1228c3d05e45fbc068a8c33614e4409b6dac688bcc77369b06009b5830fa8d86
Feb 02 12:24:10 compute-0 podman[288152]: 2026-02-02 12:24:10.130612926 +0000 UTC m=+0.140757253 container init 9f68c28eb02450c3fcbb3c231eb7d9458d1ce4bca9b4288ef57689472ceb0ddf (image=quay.io/ceph/ceph@sha256:1228c3d05e45fbc068a8c33614e4409b6dac688bcc77369b06009b5830fa8d86, name=modest_banach, org.label-schema.schema-version=1.0, CEPH_REF=tentacle, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, CEPH_SHA1=69f84cc2651aa259a15bc192ddaabd3baba07489, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.41.3, org.label-schema.build-date=20251030)
Feb 02 12:24:10 compute-0 podman[288152]: 2026-02-02 12:24:10.138662451 +0000 UTC m=+0.148806748 container start 9f68c28eb02450c3fcbb3c231eb7d9458d1ce4bca9b4288ef57689472ceb0ddf (image=quay.io/ceph/ceph@sha256:1228c3d05e45fbc068a8c33614e4409b6dac688bcc77369b06009b5830fa8d86, name=modest_banach, org.label-schema.build-date=20251030, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=tentacle, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=69f84cc2651aa259a15bc192ddaabd3baba07489, ceph=True, org.label-schema.schema-version=1.0, io.buildah.version=1.41.3)
Feb 02 12:24:10 compute-0 podman[288152]: 2026-02-02 12:24:10.143490899 +0000 UTC m=+0.153635216 container attach 9f68c28eb02450c3fcbb3c231eb7d9458d1ce4bca9b4288ef57689472ceb0ddf (image=quay.io/ceph/ceph@sha256:1228c3d05e45fbc068a8c33614e4409b6dac688bcc77369b06009b5830fa8d86, name=modest_banach, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=tentacle, OSD_FLAVOR=default, io.buildah.version=1.41.3, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20251030, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=69f84cc2651aa259a15bc192ddaabd3baba07489)
Feb 02 12:24:11 compute-0 podman[288152]: 2026-02-02 12:24:11.036501625 +0000 UTC m=+1.046645942 container died 9f68c28eb02450c3fcbb3c231eb7d9458d1ce4bca9b4288ef57689472ceb0ddf (image=quay.io/ceph/ceph@sha256:1228c3d05e45fbc068a8c33614e4409b6dac688bcc77369b06009b5830fa8d86, name=modest_banach, org.label-schema.vendor=CentOS, ceph=True, io.buildah.version=1.41.3, org.label-schema.build-date=20251030, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_REF=tentacle, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=69f84cc2651aa259a15bc192ddaabd3baba07489, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0)
Feb 02 12:24:11 compute-0 podman[288152]: 2026-02-02 12:24:11.089633501 +0000 UTC m=+1.099777798 container remove 9f68c28eb02450c3fcbb3c231eb7d9458d1ce4bca9b4288ef57689472ceb0ddf (image=quay.io/ceph/ceph@sha256:1228c3d05e45fbc068a8c33614e4409b6dac688bcc77369b06009b5830fa8d86, name=modest_banach, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=tentacle, org.label-schema.build-date=20251030, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, ceph=True, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, io.buildah.version=1.41.3, CEPH_SHA1=69f84cc2651aa259a15bc192ddaabd3baba07489)
Feb 02 12:24:11 compute-0 sudo[288052]: pam_unix(sudo:session): session closed for user root
Feb 02 12:24:11 compute-0 sudo[288270]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/ls /etc/sysctl.d
Feb 02 12:24:11 compute-0 sudo[288270]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Feb 02 12:24:11 compute-0 sudo[288270]: pam_unix(sudo:session): session closed for user root

● user.slice - User and Session Slice
     Loaded: loaded (/usr/lib/systemd/system/user.slice; static)
     Active: active since Mon 2026-02-02 10:52:38 UTC; 1h 32min ago
      Until: Mon 2026-02-02 10:52:38 UTC; 1h 32min ago
       Docs: man:systemd.special(7)
         IO: 830.7M read, 8.9G written
      Tasks: 63
     Memory: 1.5G (peak: 4.2G)
        CPU: 25min 20.401s
     CGroup: /user.slice
             ├─user-1000.slice
             │ ├─session-1.scope
             │ │ └─4519 /usr/bin/python3
             │ ├─session-54.scope
             │ │ ├─287512 "sshd-session: zuul [priv]"
             │ │ ├─287564 "sshd-session: zuul@notty"
             │ │ ├─287565 sudo bash -c "rm -rf /var/tmp/sos-osp && mkdir /var/tmp/sos-osp && sos report --batch --all-logs --tmp-dir=/var/tmp/sos-osp  -p container,openstack_edpm,system,storage,virt"
             │ │ ├─287589 /usr/bin/python3 -s /sbin/sos report --batch --all-logs --tmp-dir=/var/tmp/sos-osp -p container,openstack_edpm,system,storage,virt
             │ │ ├─293711 timeout 15s turbostat --debug sleep 10
             │ │ ├─294058 timeout 300s systemctl status --all
             │ │ ├─294059 systemctl status --all
             │ │ ├─294087 timeout 300s ceph osd df tree --format json-pretty
             │ │ ├─294088 /usr/bin/python3 -s /usr/bin/ceph osd df tree --format json-pretty
             │ │ ├─294108 timeout 300s semanage boolean -l
             │ │ └─294109 /usr/bin/python3 -EsI /usr/sbin/semanage boolean -l
             │ └─user@1000.service
             │   ├─app.slice
             │   │ └─dbus-broker.service
             │   │   ├─13969 /usr/bin/dbus-broker-launch --scope user
             │   │   └─13970 dbus-broker --log 4 --controller 9 --machine-id bf0bc0bb03de29b24cba1cc9599cf5d0 --max-bytes 100000000000000 --max-fds 25000000000000 --max-matches 5000000000
             │   ├─init.scope
             │   │ ├─4309 /usr/lib/systemd/systemd --user
             │   │ └─4311 "(sd-pam)"
             │   └─user.slice
             │     └─podman-pause-2465a80e.scope
             │       └─13952 catatonit -P
             └─user-42477.slice
               ├─session-20.scope
               │ ├─76636 "sshd-session: ceph-admin [priv]"
               │ └─76657 "sshd-session: ceph-admin"
               ├─session-22.scope
               │ ├─76656 "sshd-session: ceph-admin [priv]"
               │ └─76660 "sshd-session: ceph-admin@notty"
               ├─session-23.scope
               │ ├─76686 "sshd-session: ceph-admin [priv]"
               │ └─76689 "sshd-session: ceph-admin@notty"
               ├─session-24.scope
               │ ├─76715 "sshd-session: ceph-admin [priv]"
               │ └─76718 "sshd-session: ceph-admin@notty"
               ├─session-25.scope
               │ ├─76744 "sshd-session: ceph-admin [priv]"
               │ └─76747 "sshd-session: ceph-admin@notty"
               ├─session-26.scope
               │ ├─76773 "sshd-session: ceph-admin [priv]"
               │ └─76776 "sshd-session: ceph-admin@notty"
               ├─session-27.scope
               │ ├─76802 "sshd-session: ceph-admin [priv]"
               │ └─76805 "sshd-session: ceph-admin@notty"
               ├─session-28.scope
               │ ├─76831 "sshd-session: ceph-admin [priv]"
               │ └─76834 "sshd-session: ceph-admin@notty"
               ├─session-29.scope
               │ ├─76860 "sshd-session: ceph-admin [priv]"
               │ └─76863 "sshd-session: ceph-admin@notty"
               ├─session-30.scope
               │ ├─76889 "sshd-session: ceph-admin [priv]"
               │ └─76892 "sshd-session: ceph-admin@notty"
               ├─session-31.scope
               │ ├─76916 "sshd-session: ceph-admin [priv]"
               │ └─76919 "sshd-session: ceph-admin@notty"
               ├─session-32.scope
               │ ├─76945 "sshd-session: ceph-admin [priv]"
               │ └─76948 "sshd-session: ceph-admin@notty"
               └─user@42477.service
                 └─init.scope
                   ├─76640 /usr/lib/systemd/systemd --user
                   └─76642 "(sd-pam)"

● dbus.socket - D-Bus System Message Bus Socket
     Loaded: loaded (/usr/lib/systemd/system/dbus.socket; enabled; preset: enabled)
     Active: active (running) since Mon 2026-02-02 10:52:39 UTC; 1h 32min ago
      Until: Mon 2026-02-02 10:52:39 UTC; 1h 32min ago
   Triggers: ● dbus-broker.service
     Listen: /run/dbus/system_bus_socket (Stream)
     CGroup: /system.slice/dbus.socket

Feb 02 10:52:39 localhost systemd[1]: Listening on D-Bus System Message Bus Socket.

● dm-event.socket - Device-mapper event daemon FIFOs
     Loaded: loaded (/usr/lib/systemd/system/dm-event.socket; enabled; preset: enabled)
     Active: active (listening) since Mon 2026-02-02 11:24:35 UTC; 1h 0min ago
      Until: Mon 2026-02-02 11:24:35 UTC; 1h 0min ago
   Triggers: ● dm-event.service
       Docs: man:dmeventd(8)
     Listen: /run/dmeventd-server (FIFO)
             /run/dmeventd-client (FIFO)
     CGroup: /system.slice/dm-event.socket

Feb 02 11:24:35 compute-0 systemd[1]: Listening on Device-mapper event daemon FIFOs.

● iscsid.socket - Open-iSCSI iscsid Socket
     Loaded: loaded (/usr/lib/systemd/system/iscsid.socket; enabled; preset: enabled)
     Active: active (running) since Mon 2026-02-02 11:45:29 UTC; 39min ago
      Until: Mon 2026-02-02 11:45:29 UTC; 39min ago
   Triggers: ● iscsid.service
       Docs: man:iscsid(8)
             man:iscsiadm(8)
     Listen: @ISCSIADM_ABSTRACT_NAMESPACE (Stream)
     CGroup: /system.slice/iscsid.socket

Feb 02 11:45:29 compute-0 systemd[1]: Listening on Open-iSCSI iscsid Socket.

○ iscsiuio.socket - Open-iSCSI iscsiuio Socket
     Loaded: loaded (/usr/lib/systemd/system/iscsiuio.socket; enabled; preset: enabled)
     Active: inactive (dead)
   Triggers: ● iscsiuio.service
       Docs: man:iscsiuio(8)
     Listen: @ISCSID_UIP_ABSTRACT_NAMESPACE (Stream)

○ libvirtd-admin.socket - libvirt legacy monolithic daemon admin socket
     Loaded: loaded (/usr/lib/systemd/system/libvirtd-admin.socket; disabled; preset: disabled)
     Active: inactive (dead)
   Triggers: ● libvirtd.service
     Listen: /run/libvirt/libvirt-admin-sock (Stream)

○ libvirtd-ro.socket - libvirt legacy monolithic daemon read-only socket
     Loaded: loaded (/usr/lib/systemd/system/libvirtd-ro.socket; disabled; preset: disabled)
     Active: inactive (dead)
   Triggers: ● libvirtd.service
     Listen: /run/libvirt/libvirt-sock-ro (Stream)

○ libvirtd-tls.socket
     Loaded: masked (Reason: Unit libvirtd-tls.socket is masked.)
     Active: inactive (dead)

○ libvirtd.socket - libvirt legacy monolithic daemon socket
     Loaded: loaded (/usr/lib/systemd/system/libvirtd.socket; disabled; preset: disabled)
     Active: inactive (dead)
   Triggers: ● libvirtd.service
     Listen: /run/libvirt/libvirt-sock (Stream)

● lvm2-lvmpolld.socket - LVM2 poll daemon socket
     Loaded: loaded (/usr/lib/systemd/system/lvm2-lvmpolld.socket; enabled; preset: enabled)
     Active: active (listening) since Mon 2026-02-02 11:24:35 UTC; 1h 0min ago
      Until: Mon 2026-02-02 11:24:35 UTC; 1h 0min ago
   Triggers: ● lvm2-lvmpolld.service
       Docs: man:lvmpolld(8)
     Listen: /run/lvm/lvmpolld.socket (Stream)
     CGroup: /system.slice/lvm2-lvmpolld.socket

Feb 02 11:24:35 compute-0 systemd[1]: Listening on LVM2 poll daemon socket.

● multipathd.socket - multipathd control socket
     Loaded: loaded (/usr/lib/systemd/system/multipathd.socket; enabled; preset: disabled)
     Active: active (running) since Mon 2026-02-02 11:45:50 UTC; 39min ago
      Until: Mon 2026-02-02 11:45:50 UTC; 39min ago
   Triggers: ● multipathd.service
     Listen: @/org/kernel/linux/storage/multipathd (Stream)
             /run/multipathd.socket (Stream)
     CGroup: /system.slice/multipathd.socket

Feb 02 11:45:50 compute-0 systemd[1]: Listening on multipathd control socket.

● rpcbind.socket - RPCbind Server Activation Socket
     Loaded: loaded (/usr/lib/systemd/system/rpcbind.socket; enabled; preset: enabled)
     Active: active (running) since Mon 2026-02-02 10:52:38 UTC; 1h 32min ago
      Until: Mon 2026-02-02 10:52:38 UTC; 1h 32min ago
   Triggers: ● rpcbind.service
     Listen: /run/rpcbind.sock (Stream)
             0.0.0.0:111 (Stream)
             0.0.0.0:111 (Datagram)
             [::]:111 (Stream)
             [::]:111 (Datagram)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48560)
     Memory: 16.0K (peak: 288.0K)
        CPU: 3ms
     CGroup: /system.slice/rpcbind.socket

● sssd-kcm.socket - SSSD Kerberos Cache Manager responder socket
     Loaded: loaded (/usr/lib/systemd/system/sssd-kcm.socket; enabled; preset: enabled)
     Active: active (listening) since Mon 2026-02-02 10:52:39 UTC; 1h 32min ago
      Until: Mon 2026-02-02 10:52:39 UTC; 1h 32min ago
   Triggers: ● sssd-kcm.service
       Docs: man:sssd-kcm(8)
     Listen: /run/.heim_org.h5l.kcm-socket (Stream)
     CGroup: /system.slice/sssd-kcm.socket

Feb 02 10:52:39 localhost systemd[1]: Listening on SSSD Kerberos Cache Manager responder socket.

○ syslog.socket - Syslog Socket
     Loaded: loaded (/usr/lib/systemd/system/syslog.socket; static)
     Active: inactive (dead)
   Triggers: ● syslog.service
       Docs: man:systemd.special(7)
             https://www.freedesktop.org/wiki/Software/systemd/syslog
     Listen: /run/systemd/journal/syslog (Datagram)

● systemd-coredump.socket - Process Core Dump Socket
     Loaded: loaded (/usr/lib/systemd/system/systemd-coredump.socket; static)
     Active: active (listening) since Mon 2026-02-02 10:52:38 UTC; 1h 32min ago
      Until: Mon 2026-02-02 10:52:38 UTC; 1h 32min ago
       Docs: man:systemd-coredump(8)
     Listen: /run/systemd/coredump (SequentialPacket)
   Accepted: 0; Connected: 0;
     CGroup: /system.slice/systemd-coredump.socket

● systemd-initctl.socket - initctl Compatibility Named Pipe
     Loaded: loaded (/usr/lib/systemd/system/systemd-initctl.socket; static)
     Active: active (listening) since Mon 2026-02-02 10:52:38 UTC; 1h 32min ago
      Until: Mon 2026-02-02 10:52:38 UTC; 1h 32min ago
   Triggers: ● systemd-initctl.service
       Docs: man:systemd-initctl.socket(8)
     Listen: /run/initctl (FIFO)
     CGroup: /system.slice/systemd-initctl.socket

● systemd-journald-dev-log.socket - Journal Socket (/dev/log)
     Loaded: loaded (/usr/lib/systemd/system/systemd-journald-dev-log.socket; static)
     Active: active (running) since Mon 2026-02-02 10:52:35 UTC; 1h 32min ago
      Until: Mon 2026-02-02 10:52:35 UTC; 1h 32min ago
   Triggers: ● systemd-journald.service
       Docs: man:systemd-journald.service(8)
             man:journald.conf(5)
     Listen: /run/systemd/journal/dev-log (Datagram)
     CGroup: /system.slice/systemd-journald-dev-log.socket

Notice: journal has been rotated since unit was started, output may be incomplete.

● systemd-journald.socket - Journal Socket
     Loaded: loaded (/usr/lib/systemd/system/systemd-journald.socket; static)
     Active: active (running) since Mon 2026-02-02 10:52:35 UTC; 1h 32min ago
      Until: Mon 2026-02-02 10:52:35 UTC; 1h 32min ago
   Triggers: ● systemd-journald.service
       Docs: man:systemd-journald.service(8)
             man:journald.conf(5)
     Listen: /run/systemd/journal/socket (Datagram)
             /run/systemd/journal/stdout (Stream)
     CGroup: /system.slice/systemd-journald.socket

Notice: journal has been rotated since unit was started, output may be incomplete.

● systemd-rfkill.socket - Load/Save RF Kill Switch Status /dev/rfkill Watch
     Loaded: loaded (/usr/lib/systemd/system/systemd-rfkill.socket; static)
     Active: active (listening) since Mon 2026-02-02 10:52:41 UTC; 1h 32min ago
      Until: Mon 2026-02-02 10:52:41 UTC; 1h 32min ago
   Triggers: ● systemd-rfkill.service
       Docs: man:systemd-rfkill.socket(8)
     Listen: /dev/rfkill (Special)
     CGroup: /system.slice/systemd-rfkill.socket

Feb 02 10:52:41 np0005604943.novalocal systemd[1]: Listening on Load/Save RF Kill Switch Status /dev/rfkill Watch.

● systemd-udevd-control.socket - udev Control Socket
     Loaded: loaded (/usr/lib/systemd/system/systemd-udevd-control.socket; static)
     Active: active (running) since Mon 2026-02-02 10:52:38 UTC; 1h 32min ago
      Until: Mon 2026-02-02 10:52:38 UTC; 1h 32min ago
   Triggers: ● systemd-udevd.service
       Docs: man:systemd-udevd-control.socket(8)
             man:udev(7)
     Listen: /run/udev/control (SequentialPacket)
     CGroup: /system.slice/systemd-udevd-control.socket

● systemd-udevd-kernel.socket - udev Kernel Socket
     Loaded: loaded (/usr/lib/systemd/system/systemd-udevd-kernel.socket; static)
     Active: active (running) since Mon 2026-02-02 10:52:38 UTC; 1h 32min ago
      Until: Mon 2026-02-02 10:52:38 UTC; 1h 32min ago
   Triggers: ● systemd-udevd.service
       Docs: man:systemd-udevd-kernel.socket(8)
             man:udev(7)
     Listen: kobject-uevent 1 (Netlink)
     CGroup: /system.slice/systemd-udevd-kernel.socket

○ virtinterfaced-admin.socket - libvirt interface daemon admin socket
     Loaded: loaded (/usr/lib/systemd/system/virtinterfaced-admin.socket; enabled; preset: enabled)
     Active: inactive (dead)
   Triggers: ● virtinterfaced.service
     Listen: /run/libvirt/virtinterfaced-admin-sock (Stream)

○ virtinterfaced-ro.socket - libvirt interface daemon read-only socket
     Loaded: loaded (/usr/lib/systemd/system/virtinterfaced-ro.socket; enabled; preset: enabled)
     Active: inactive (dead)
   Triggers: ● virtinterfaced.service
     Listen: /run/libvirt/virtinterfaced-sock-ro (Stream)

○ virtinterfaced.socket - libvirt interface daemon socket
     Loaded: loaded (/usr/lib/systemd/system/virtinterfaced.socket; enabled; preset: enabled)
     Active: inactive (dead)
   Triggers: ● virtinterfaced.service
     Listen: /run/libvirt/virtinterfaced-sock (Stream)

○ virtlockd-admin.socket - libvirt locking daemon admin socket
     Loaded: loaded (/usr/lib/systemd/system/virtlockd-admin.socket; enabled; preset: enabled)
     Active: inactive (dead)
   Triggers: ● virtlockd.service
     Listen: /run/libvirt/virtlockd-admin-sock (Stream)

● virtlockd.socket - libvirt locking daemon socket
     Loaded: loaded (/usr/lib/systemd/system/virtlockd.socket; enabled; preset: enabled)
     Active: active (listening) since Mon 2026-02-02 11:44:36 UTC; 40min ago
      Until: Mon 2026-02-02 11:44:36 UTC; 40min ago
   Triggers: ● virtlockd.service
     Listen: /run/libvirt/virtlockd-sock (Stream)
     CGroup: /system.slice/virtlockd.socket

Feb 02 11:44:36 compute-0 systemd[1]: Listening on libvirt locking daemon socket.

● virtlogd-admin.socket - libvirt logging daemon admin socket
     Loaded: loaded (/usr/lib/systemd/system/virtlogd-admin.socket; enabled; preset: enabled)
    Drop-In: /etc/systemd/system/virtlogd-admin.socket.d
             └─override.conf
     Active: active (running) since Mon 2026-02-02 11:44:33 UTC; 40min ago
      Until: Mon 2026-02-02 11:44:33 UTC; 40min ago
   Triggers: ● virtlogd.service
     Listen: /run/libvirt/virtlogd-admin-sock (Stream)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48560)
     Memory: 4.0K (peak: 512.0K)
        CPU: 2ms
     CGroup: /system.slice/virtlogd-admin.socket

Feb 02 11:44:33 compute-0 systemd[1]: Starting libvirt logging daemon admin socket...
Feb 02 11:44:33 compute-0 systemd[1]: Listening on libvirt logging daemon admin socket.

● virtlogd.socket - libvirt logging daemon socket
     Loaded: loaded (/usr/lib/systemd/system/virtlogd.socket; enabled; preset: enabled)
    Drop-In: /etc/systemd/system/virtlogd.socket.d
             └─override.conf
     Active: active (running) since Mon 2026-02-02 11:44:33 UTC; 40min ago
      Until: Mon 2026-02-02 11:44:33 UTC; 40min ago
   Triggers: ● virtlogd.service
     Listen: /run/libvirt/virtlogd-sock (Stream)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48560)
     Memory: 0B (peak: 512.0K)
        CPU: 3ms
     CGroup: /system.slice/virtlogd.socket

Feb 02 11:44:33 compute-0 systemd[1]: Starting libvirt logging daemon socket...
Feb 02 11:44:33 compute-0 systemd[1]: Listening on libvirt logging daemon socket.

○ virtnetworkd-admin.socket - libvirt network daemon admin socket
     Loaded: loaded (/usr/lib/systemd/system/virtnetworkd-admin.socket; enabled; preset: enabled)
     Active: inactive (dead)
   Triggers: ● virtnetworkd.service
     Listen: /run/libvirt/virtnetworkd-admin-sock (Stream)

○ virtnetworkd-ro.socket - libvirt network daemon read-only socket
     Loaded: loaded (/usr/lib/systemd/system/virtnetworkd-ro.socket; enabled; preset: enabled)
     Active: inactive (dead)
   Triggers: ● virtnetworkd.service
     Listen: /run/libvirt/virtnetworkd-sock-ro (Stream)

○ virtnetworkd.socket - libvirt network daemon socket
     Loaded: loaded (/usr/lib/systemd/system/virtnetworkd.socket; enabled; preset: enabled)
     Active: inactive (dead)
   Triggers: ● virtnetworkd.service
     Listen: /run/libvirt/virtnetworkd-sock (Stream)

● virtnodedevd-admin.socket - libvirt nodedev daemon admin socket
     Loaded: loaded (/usr/lib/systemd/system/virtnodedevd-admin.socket; enabled; preset: enabled)
    Drop-In: /etc/systemd/system/virtnodedevd-admin.socket.d
             └─override.conf
     Active: active (running) since Mon 2026-02-02 11:44:34 UTC; 40min ago
      Until: Mon 2026-02-02 11:44:34 UTC; 40min ago
   Triggers: ● virtnodedevd.service
     Listen: /run/libvirt/virtnodedevd-admin-sock (Stream)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48560)
     Memory: 0B (peak: 512.0K)
        CPU: 2ms
     CGroup: /system.slice/virtnodedevd-admin.socket

Feb 02 11:44:34 compute-0 systemd[1]: Starting libvirt nodedev daemon admin socket...
Feb 02 11:44:34 compute-0 systemd[1]: Listening on libvirt nodedev daemon admin socket.

● virtnodedevd-ro.socket - libvirt nodedev daemon read-only socket
     Loaded: loaded (/usr/lib/systemd/system/virtnodedevd-ro.socket; enabled; preset: enabled)
    Drop-In: /etc/systemd/system/virtnodedevd-ro.socket.d
             └─override.conf
     Active: active (running) since Mon 2026-02-02 11:44:34 UTC; 40min ago
      Until: Mon 2026-02-02 11:44:34 UTC; 40min ago
   Triggers: ● virtnodedevd.service
     Listen: /run/libvirt/virtnodedevd-sock-ro (Stream)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48560)
     Memory: 4.0K (peak: 492.0K)
        CPU: 2ms
     CGroup: /system.slice/virtnodedevd-ro.socket

Feb 02 11:44:34 compute-0 systemd[1]: Starting libvirt nodedev daemon read-only socket...
Feb 02 11:44:34 compute-0 systemd[1]: Listening on libvirt nodedev daemon read-only socket.

● virtnodedevd.socket - libvirt nodedev daemon socket
     Loaded: loaded (/usr/lib/systemd/system/virtnodedevd.socket; enabled; preset: enabled)
    Drop-In: /etc/systemd/system/virtnodedevd.socket.d
             └─override.conf
     Active: active (running) since Mon 2026-02-02 11:44:34 UTC; 40min ago
      Until: Mon 2026-02-02 11:44:34 UTC; 40min ago
   Triggers: ● virtnodedevd.service
     Listen: /run/libvirt/virtnodedevd-sock (Stream)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48560)
     Memory: 4.0K (peak: 512.0K)
        CPU: 2ms
     CGroup: /system.slice/virtnodedevd.socket

Feb 02 11:44:34 compute-0 systemd[1]: Starting libvirt nodedev daemon socket...
Feb 02 11:44:34 compute-0 systemd[1]: Listening on libvirt nodedev daemon socket.

○ virtnwfilterd-admin.socket - libvirt nwfilter daemon admin socket
     Loaded: loaded (/usr/lib/systemd/system/virtnwfilterd-admin.socket; enabled; preset: enabled)
     Active: inactive (dead)
   Triggers: ● virtnwfilterd.service
     Listen: /run/libvirt/virtnwfilterd-admin-sock (Stream)

○ virtnwfilterd-ro.socket - libvirt nwfilter daemon read-only socket
     Loaded: loaded (/usr/lib/systemd/system/virtnwfilterd-ro.socket; enabled; preset: enabled)
     Active: inactive (dead)
   Triggers: ● virtnwfilterd.service
     Listen: /run/libvirt/virtnwfilterd-sock-ro (Stream)

○ virtnwfilterd.socket - libvirt nwfilter daemon socket
     Loaded: loaded (/usr/lib/systemd/system/virtnwfilterd.socket; enabled; preset: enabled)
     Active: inactive (dead)
   Triggers: ● virtnwfilterd.service
     Listen: /run/libvirt/virtnwfilterd-sock (Stream)

● virtproxyd-admin.socket - libvirt proxy daemon admin socket
     Loaded: loaded (/usr/lib/systemd/system/virtproxyd-admin.socket; enabled; preset: enabled)
    Drop-In: /etc/systemd/system/virtproxyd-admin.socket.d
             └─override.conf
     Active: active (listening) since Mon 2026-02-02 11:44:35 UTC; 40min ago
      Until: Mon 2026-02-02 11:44:35 UTC; 40min ago
   Triggers: ● virtproxyd.service
     Listen: /run/libvirt/libvirt-admin-sock (Stream)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48560)
     Memory: 0B (peak: 512.0K)
        CPU: 1ms
     CGroup: /system.slice/virtproxyd-admin.socket

Feb 02 11:44:35 compute-0 systemd[1]: Starting libvirt proxy daemon admin socket...
Feb 02 11:44:35 compute-0 systemd[1]: Listening on libvirt proxy daemon admin socket.

● virtproxyd-ro.socket - libvirt proxy daemon read-only socket
     Loaded: loaded (/usr/lib/systemd/system/virtproxyd-ro.socket; enabled; preset: enabled)
    Drop-In: /etc/systemd/system/virtproxyd-ro.socket.d
             └─override.conf
     Active: active (listening) since Mon 2026-02-02 11:44:35 UTC; 40min ago
      Until: Mon 2026-02-02 11:44:35 UTC; 40min ago
   Triggers: ● virtproxyd.service
     Listen: /run/libvirt/libvirt-sock-ro (Stream)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48560)
     Memory: 0B (peak: 512.0K)
        CPU: 2ms
     CGroup: /system.slice/virtproxyd-ro.socket

Feb 02 11:44:35 compute-0 systemd[1]: Starting libvirt proxy daemon read-only socket...
Feb 02 11:44:35 compute-0 systemd[1]: Listening on libvirt proxy daemon read-only socket.

● virtproxyd-tls.socket - libvirt proxy daemon TLS IP socket
     Loaded: loaded (/usr/lib/systemd/system/virtproxyd-tls.socket; enabled; preset: disabled)
     Active: active (listening) since Mon 2026-02-02 11:43:40 UTC; 41min ago
      Until: Mon 2026-02-02 11:43:40 UTC; 41min ago
   Triggers: ● virtproxyd.service
     Listen: [::]:16514 (Stream)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48560)
     Memory: 8.0K (peak: 256.0K)
        CPU: 1ms
     CGroup: /system.slice/virtproxyd-tls.socket

Feb 02 11:43:40 compute-0 systemd[1]: Listening on libvirt proxy daemon TLS IP socket.

● virtproxyd.socket - libvirt proxy daemon socket
     Loaded: loaded (/usr/lib/systemd/system/virtproxyd.socket; enabled; preset: enabled)
    Drop-In: /etc/systemd/system/virtproxyd.socket.d
             └─override.conf
     Active: active (listening) since Mon 2026-02-02 11:43:40 UTC; 41min ago
      Until: Mon 2026-02-02 11:43:40 UTC; 41min ago
   Triggers: ● virtproxyd.service
     Listen: /run/libvirt/libvirt-sock (Stream)
     CGroup: /system.slice/virtproxyd.socket

Feb 02 11:43:40 compute-0 systemd[1]: Listening on libvirt proxy daemon socket.

● virtqemud-admin.socket - libvirt QEMU daemon admin socket
     Loaded: loaded (/usr/lib/systemd/system/virtqemud-admin.socket; enabled; preset: enabled)
    Drop-In: /etc/systemd/system/virtqemud-admin.socket.d
             └─override.conf
     Active: active (running) since Mon 2026-02-02 11:44:36 UTC; 40min ago
      Until: Mon 2026-02-02 11:44:36 UTC; 40min ago
   Triggers: ● virtqemud.service
     Listen: /run/libvirt/virtqemud-admin-sock (Stream)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48560)
     Memory: 0B (peak: 520.0K)
        CPU: 2ms
     CGroup: /system.slice/virtqemud-admin.socket

Feb 02 11:44:36 compute-0 systemd[1]: Starting libvirt QEMU daemon admin socket...
Feb 02 11:44:36 compute-0 systemd[1]: Listening on libvirt QEMU daemon admin socket.

● virtqemud-ro.socket - libvirt QEMU daemon read-only socket
     Loaded: loaded (/usr/lib/systemd/system/virtqemud-ro.socket; enabled; preset: enabled)
    Drop-In: /etc/systemd/system/virtqemud-ro.socket.d
             └─override.conf
     Active: active (running) since Mon 2026-02-02 11:44:36 UTC; 40min ago
      Until: Mon 2026-02-02 11:44:36 UTC; 40min ago
   Triggers: ● virtqemud.service
     Listen: /run/libvirt/virtqemud-sock-ro (Stream)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48560)
     Memory: 4.0K (peak: 596.0K)
        CPU: 2ms
     CGroup: /system.slice/virtqemud-ro.socket

Feb 02 11:44:36 compute-0 systemd[1]: Starting libvirt QEMU daemon read-only socket...
Feb 02 11:44:36 compute-0 systemd[1]: Listening on libvirt QEMU daemon read-only socket.

● virtqemud.socket - libvirt QEMU daemon socket
     Loaded: loaded (/usr/lib/systemd/system/virtqemud.socket; enabled; preset: enabled)
    Drop-In: /etc/systemd/system/virtqemud.socket.d
             └─override.conf
     Active: active (running) since Mon 2026-02-02 11:44:36 UTC; 40min ago
      Until: Mon 2026-02-02 11:44:36 UTC; 40min ago
   Triggers: ● virtqemud.service
     Listen: /run/libvirt/virtqemud-sock (Stream)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48560)
     Memory: 4.0K (peak: 512.0K)
        CPU: 2ms
     CGroup: /system.slice/virtqemud.socket

Feb 02 11:44:36 compute-0 systemd[1]: Starting libvirt QEMU daemon socket...
Feb 02 11:44:36 compute-0 systemd[1]: Listening on libvirt QEMU daemon socket.

● virtsecretd-admin.socket - libvirt secret daemon admin socket
     Loaded: loaded (/usr/lib/systemd/system/virtsecretd-admin.socket; enabled; preset: enabled)
    Drop-In: /etc/systemd/system/virtsecretd-admin.socket.d
             └─override.conf
     Active: active (running) since Mon 2026-02-02 11:44:37 UTC; 40min ago
      Until: Mon 2026-02-02 11:44:37 UTC; 40min ago
   Triggers: ● virtsecretd.service
     Listen: /run/libvirt/virtsecretd-admin-sock (Stream)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48560)
     Memory: 0B (peak: 576.0K)
        CPU: 2ms
     CGroup: /system.slice/virtsecretd-admin.socket

Feb 02 11:44:37 compute-0 systemd[1]: Starting libvirt secret daemon admin socket...
Feb 02 11:44:37 compute-0 systemd[1]: Listening on libvirt secret daemon admin socket.

● virtsecretd-ro.socket - libvirt secret daemon read-only socket
     Loaded: loaded (/usr/lib/systemd/system/virtsecretd-ro.socket; enabled; preset: enabled)
    Drop-In: /etc/systemd/system/virtsecretd-ro.socket.d
             └─override.conf
     Active: active (running) since Mon 2026-02-02 11:44:37 UTC; 40min ago
      Until: Mon 2026-02-02 11:44:37 UTC; 40min ago
   Triggers: ● virtsecretd.service
     Listen: /run/libvirt/virtsecretd-sock-ro (Stream)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48560)
     Memory: 0B (peak: 512.0K)
        CPU: 1ms
     CGroup: /system.slice/virtsecretd-ro.socket

Feb 02 11:44:37 compute-0 systemd[1]: Starting libvirt secret daemon read-only socket...
Feb 02 11:44:37 compute-0 systemd[1]: Listening on libvirt secret daemon read-only socket.

● virtsecretd.socket - libvirt secret daemon socket
     Loaded: loaded (/usr/lib/systemd/system/virtsecretd.socket; enabled; preset: enabled)
    Drop-In: /etc/systemd/system/virtsecretd.socket.d
             └─override.conf
     Active: active (running) since Mon 2026-02-02 11:44:37 UTC; 40min ago
      Until: Mon 2026-02-02 11:44:37 UTC; 40min ago
   Triggers: ● virtsecretd.service
     Listen: /run/libvirt/virtsecretd-sock (Stream)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48560)
     Memory: 4.0K (peak: 512.0K)
        CPU: 2ms
     CGroup: /system.slice/virtsecretd.socket

Feb 02 11:44:37 compute-0 systemd[1]: Starting libvirt secret daemon socket...
Feb 02 11:44:37 compute-0 systemd[1]: Listening on libvirt secret daemon socket.

○ virtstoraged-admin.socket - libvirt storage daemon admin socket
     Loaded: loaded (/usr/lib/systemd/system/virtstoraged-admin.socket; enabled; preset: enabled)
     Active: inactive (dead)
   Triggers: ● virtstoraged.service
     Listen: /run/libvirt/virtstoraged-admin-sock (Stream)

○ virtstoraged-ro.socket - libvirt storage daemon read-only socket
     Loaded: loaded (/usr/lib/systemd/system/virtstoraged-ro.socket; enabled; preset: enabled)
     Active: inactive (dead)
   Triggers: ● virtstoraged.service
     Listen: /run/libvirt/virtstoraged-sock-ro (Stream)

○ virtstoraged.socket - libvirt storage daemon socket
     Loaded: loaded (/usr/lib/systemd/system/virtstoraged.socket; enabled; preset: enabled)
     Active: inactive (dead)
   Triggers: ● virtstoraged.service
     Listen: /run/libvirt/virtstoraged-sock (Stream)

● swap.swap - /swap
     Loaded: loaded (/etc/fstab; generated)
     Active: active since Mon 2026-02-02 11:26:09 UTC; 58min ago
      Until: Mon 2026-02-02 11:26:09 UTC; 58min ago
       What: /swap
       Docs: man:fstab(5)
             man:systemd-fstab-generator(8)

● basic.target - Basic System
     Loaded: loaded (/usr/lib/systemd/system/basic.target; static)
     Active: active since Mon 2026-02-02 10:52:39 UTC; 1h 32min ago
      Until: Mon 2026-02-02 10:52:39 UTC; 1h 32min ago
       Docs: man:systemd.special(7)

Feb 02 10:52:39 localhost systemd[1]: Reached target Basic System.

○ blockdev@dev-disk-by\x2duuid-822f14ea\x2d6e7e\x2d41df\x2db0d8\x2dfbe282d9ded8.target - Block Device Preparation for /dev/disk/by-uuid/822f14ea-6e7e-41df-b0d8-fbe282d9ded8
     Loaded: loaded (/usr/lib/systemd/system/blockdev@.target; static)
     Active: inactive (dead)
       Docs: man:systemd.special(7)

○ blockdev@dev-vda1.target - Block Device Preparation for /dev/vda1
     Loaded: loaded (/usr/lib/systemd/system/blockdev@.target; static)
     Active: inactive (dead)
       Docs: man:systemd.special(7)

● ceph-4548a36b-7cdc-5e3e-a814-4e1571be1fae.target - Ceph cluster 4548a36b-7cdc-5e3e-a814-4e1571be1fae
     Loaded: loaded (/etc/systemd/system/ceph-4548a36b-7cdc-5e3e-a814-4e1571be1fae.target; enabled; preset: disabled)
     Active: active since Mon 2026-02-02 11:30:48 UTC; 54min ago
      Until: Mon 2026-02-02 11:30:48 UTC; 54min ago

Feb 02 11:30:48 compute-0 systemd[1]: Reached target Ceph cluster 4548a36b-7cdc-5e3e-a814-4e1571be1fae.

● ceph.target - All Ceph clusters and services
     Loaded: loaded (/etc/systemd/system/ceph.target; enabled; preset: disabled)
     Active: active since Mon 2026-02-02 11:30:48 UTC; 54min ago
      Until: Mon 2026-02-02 11:30:48 UTC; 54min ago

Feb 02 11:30:48 compute-0 systemd[1]: Reached target All Ceph clusters and services.

● cloud-config.target - Cloud-config availability
     Loaded: loaded (/usr/lib/systemd/system/cloud-config.target; static)
     Active: active since Mon 2026-02-02 10:52:44 UTC; 1h 32min ago
      Until: Mon 2026-02-02 10:52:44 UTC; 1h 32min ago

Feb 02 10:52:44 np0005604943.novalocal systemd[1]: Reached target Cloud-config availability.

● cloud-init.target - Cloud-init target
     Loaded: loaded (/usr/lib/systemd/system/cloud-init.target; enabled-runtime; preset: disabled)
     Active: active since Mon 2026-02-02 10:52:44 UTC; 1h 32min ago
      Until: Mon 2026-02-02 10:52:44 UTC; 1h 32min ago

Feb 02 10:52:44 np0005604943.novalocal systemd[1]: Reached target Cloud-init target.

○ cryptsetup-pre.target - Local Encrypted Volumes (Pre)
     Loaded: loaded (/usr/lib/systemd/system/cryptsetup-pre.target; static)
     Active: inactive (dead)
       Docs: man:systemd.special(7)

● cryptsetup.target - Local Encrypted Volumes
     Loaded: loaded (/usr/lib/systemd/system/cryptsetup.target; static)
     Active: active since Mon 2026-02-02 10:52:38 UTC; 1h 32min ago
      Until: Mon 2026-02-02 10:52:38 UTC; 1h 32min ago
       Docs: man:systemd.special(7)

● edpm_libvirt.target
     Loaded: loaded (/etc/systemd/system/edpm_libvirt.target; static)
     Active: active since Mon 2026-02-02 11:45:03 UTC; 39min ago
      Until: Mon 2026-02-02 11:45:03 UTC; 39min ago

Feb 02 11:45:03 compute-0 systemd[1]: Reached target edpm_libvirt.target.

○ emergency.target - Emergency Mode
     Loaded: loaded (/usr/lib/systemd/system/emergency.target; static)
     Active: inactive (dead)
       Docs: man:systemd.special(7)

○ first-boot-complete.target - First Boot Complete
     Loaded: loaded (/usr/lib/systemd/system/first-boot-complete.target; static)
     Active: inactive (dead)
  Condition: start condition failed at Mon 2026-02-02 10:52:38 UTC; 1h 32min ago
       Docs: man:systemd.special(7)

Feb 02 10:52:38 localhost systemd[1]: First Boot Complete was skipped because of an unmet condition check (ConditionFirstBoot=yes).

○ getty-pre.target - Preparation for Logins
     Loaded: loaded (/usr/lib/systemd/system/getty-pre.target; static)
     Active: inactive (dead)
       Docs: man:systemd.special(7)
             man:systemd-getty-generator(8)
             http://0pointer.de/blog/projects/serial-console.html

● getty.target - Login Prompts
     Loaded: loaded (/usr/lib/systemd/system/getty.target; static)
     Active: active since Mon 2026-02-02 10:52:44 UTC; 1h 32min ago
      Until: Mon 2026-02-02 10:52:44 UTC; 1h 32min ago
       Docs: man:systemd.special(7)
             man:systemd-getty-generator(8)
             http://0pointer.de/blog/projects/serial-console.html

Feb 02 10:52:44 np0005604943.novalocal systemd[1]: Reached target Login Prompts.

○ graphical.target - Graphical Interface
     Loaded: loaded (/usr/lib/systemd/system/graphical.target; static)
     Active: inactive (dead)
       Docs: man:systemd.special(7)

○ initrd-fs.target - Initrd File Systems
     Loaded: loaded (/usr/lib/systemd/system/initrd-fs.target; static)
     Active: inactive (dead) since Mon 2026-02-02 10:52:38 UTC; 1h 32min ago
       Docs: man:systemd.special(7)

Feb 02 10:52:37 localhost systemd[1]: Reached target Initrd File Systems.

○ initrd-root-device.target - Initrd Root Device
     Loaded: loaded (/usr/lib/systemd/system/initrd-root-device.target; static)
     Active: inactive (dead) since Mon 2026-02-02 10:52:37 UTC; 1h 32min ago
       Docs: man:systemd.special(7)

Feb 02 10:52:36 localhost systemd[1]: Reached target Initrd Root Device.
Feb 02 10:52:37 localhost systemd[1]: Stopped target Initrd Root Device.

○ initrd-root-fs.target - Initrd Root File System
     Loaded: loaded (/usr/lib/systemd/system/initrd-root-fs.target; static)
     Active: inactive (dead) since Mon 2026-02-02 10:52:38 UTC; 1h 32min ago
       Docs: man:systemd.special(7)

Feb 02 10:52:37 localhost systemd[1]: Reached target Initrd Root File System.

○ initrd-switch-root.target - Switch Root
     Loaded: loaded (/usr/lib/systemd/system/initrd-switch-root.target; static)
     Active: inactive (dead) since Mon 2026-02-02 10:52:38 UTC; 1h 32min ago

Feb 02 10:52:37 localhost systemd[1]: Reached target Switch Root.

○ initrd-usr-fs.target - Initrd /usr File System
     Loaded: loaded (/usr/lib/systemd/system/initrd-usr-fs.target; static)
     Active: inactive (dead) since Mon 2026-02-02 10:52:37 UTC; 1h 32min ago
       Docs: man:systemd.special(7)

Feb 02 10:52:37 localhost systemd[1]: Stopped target Initrd /usr File System.
Notice: journal has been rotated since unit was started, output may be incomplete.

○ initrd.target - Initrd Default Target
     Loaded: loaded (/usr/lib/systemd/system/initrd.target; static)
     Active: inactive (dead) since Mon 2026-02-02 10:52:37 UTC; 1h 32min ago
       Docs: man:systemd.special(7)

Feb 02 10:52:37 localhost systemd[1]: Reached target Initrd Default Target.
Feb 02 10:52:37 localhost systemd[1]: Stopped target Initrd Default Target.

● integritysetup.target - Local Integrity Protected Volumes
     Loaded: loaded (/usr/lib/systemd/system/integritysetup.target; static)
     Active: active since Mon 2026-02-02 10:52:38 UTC; 1h 32min ago
      Until: Mon 2026-02-02 10:52:38 UTC; 1h 32min ago
       Docs: man:systemd.special(7)

● local-fs-pre.target - Preparation for Local File Systems
     Loaded: loaded (/usr/lib/systemd/system/local-fs-pre.target; static)
     Active: active since Mon 2026-02-02 10:52:39 UTC; 1h 32min ago
      Until: Mon 2026-02-02 10:52:39 UTC; 1h 32min ago
       Docs: man:systemd.special(7)

Feb 02 10:52:39 localhost systemd[1]: Reached target Preparation for Local File Systems.

● local-fs.target - Local File Systems
     Loaded: loaded (/usr/lib/systemd/system/local-fs.target; static)
     Active: active since Mon 2026-02-02 10:52:39 UTC; 1h 32min ago
      Until: Mon 2026-02-02 10:52:39 UTC; 1h 32min ago
       Docs: man:systemd.special(7)

Feb 02 10:52:39 localhost systemd[1]: Reached target Local File Systems.

● multi-user.target - Multi-User System
     Loaded: loaded (/usr/lib/systemd/system/multi-user.target; indirect; preset: disabled)
     Active: active since Mon 2026-02-02 10:52:44 UTC; 1h 32min ago
      Until: Mon 2026-02-02 10:52:44 UTC; 1h 32min ago
       Docs: man:systemd.special(7)

Feb 02 10:52:44 np0005604943.novalocal systemd[1]: Reached target Multi-User System.

● network-online.target - Network is Online
     Loaded: loaded (/usr/lib/systemd/system/network-online.target; static)
     Active: active since Mon 2026-02-02 10:52:44 UTC; 1h 32min ago
      Until: Mon 2026-02-02 10:52:44 UTC; 1h 32min ago
       Docs: man:systemd.special(7)
             https://systemd.io/NETWORK_ONLINE

Feb 02 10:52:44 np0005604943.novalocal systemd[1]: Reached target Network is Online.

● network-pre.target - Preparation for Network
     Loaded: loaded (/usr/lib/systemd/system/network-pre.target; static)
     Active: active since Mon 2026-02-02 10:52:41 UTC; 1h 32min ago
      Until: Mon 2026-02-02 10:52:41 UTC; 1h 32min ago
       Docs: man:systemd.special(7)
             https://systemd.io/NETWORK_ONLINE

Feb 02 10:52:41 np0005604943.novalocal systemd[1]: Reached target Preparation for Network.

● network.target - Network
     Loaded: loaded (/usr/lib/systemd/system/network.target; static)
     Active: active since Mon 2026-02-02 10:52:41 UTC; 1h 32min ago
      Until: Mon 2026-02-02 10:52:41 UTC; 1h 32min ago
       Docs: man:systemd.special(7)
             https://systemd.io/NETWORK_ONLINE

Feb 02 10:52:41 np0005604943.novalocal systemd[1]: Reached target Network.

● nfs-client.target - NFS client services
     Loaded: loaded (/usr/lib/systemd/system/nfs-client.target; enabled; preset: disabled)
     Active: active since Mon 2026-02-02 10:52:41 UTC; 1h 32min ago
      Until: Mon 2026-02-02 10:52:41 UTC; 1h 32min ago

Feb 02 10:52:41 np0005604943.novalocal systemd[1]: Reached target NFS client services.

○ nss-lookup.target - Host and Network Name Lookups
     Loaded: loaded (/usr/lib/systemd/system/nss-lookup.target; static)
     Active: inactive (dead)
       Docs: man:systemd.special(7)

● nss-user-lookup.target - User and Group Name Lookups
     Loaded: loaded (/usr/lib/systemd/system/nss-user-lookup.target; static)
     Active: active since Mon 2026-02-02 10:52:39 UTC; 1h 32min ago
      Until: Mon 2026-02-02 10:52:39 UTC; 1h 32min ago
       Docs: man:systemd.special(7)

Feb 02 10:52:39 localhost systemd[1]: Reached target User and Group Name Lookups.

● paths.target - Path Units
     Loaded: loaded (/usr/lib/systemd/system/paths.target; static)
     Active: active since Mon 2026-02-02 10:52:38 UTC; 1h 32min ago
      Until: Mon 2026-02-02 10:52:38 UTC; 1h 32min ago
       Docs: man:systemd.special(7)

○ remote-cryptsetup.target - Remote Encrypted Volumes
     Loaded: loaded (/usr/lib/systemd/system/remote-cryptsetup.target; disabled; preset: enabled)
     ActiveUnit syslog.target could not be found.
: inactive (dead) since Mon 2026-02-02 10:52:37 UTC; 1h 32min ago
       Docs: man:systemd.special(7)

Feb 02 10:52:37 localhost systemd[1]: Reached target Remote Encrypted Volumes.
Feb 02 10:52:37 localhost systemd[1]: Stopped target Remote Encrypted Volumes.

● remote-fs-pre.target - Preparation for Remote File Systems
     Loaded: loaded (/usr/lib/systemd/system/remote-fs-pre.target; static)
     Active: active since Mon 2026-02-02 10:52:41 UTC; 1h 32min ago
      Until: Mon 2026-02-02 10:52:41 UTC; 1h 32min ago
       Docs: man:systemd.special(7)

Feb 02 10:52:41 np0005604943.novalocal systemd[1]: Reached target Preparation for Remote File Systems.

● remote-fs.target - Remote File Systems
     Loaded: loaded (/usr/lib/systemd/system/remote-fs.target; enabled; preset: enabled)
     Active: active since Mon 2026-02-02 10:52:41 UTC; 1h 32min ago
      Until: Mon 2026-02-02 10:52:41 UTC; 1h 32min ago
       Docs: man:systemd.special(7)

Feb 02 10:52:41 np0005604943.novalocal systemd[1]: Reached target Remote File Systems.

○ remote-veritysetup.target - Remote Verity Protected Volumes
     Loaded: loaded (/usr/lib/systemd/system/remote-veritysetup.target; disabled; preset: disabled)
     Active: inactive (dead)
       Docs: man:systemd.special(7)

○ rescue.target - Rescue Mode
     Loaded: loaded (/usr/lib/systemd/system/rescue.target; static)
     Active: inactive (dead)
       Docs: man:systemd.special(7)

● rpc_pipefs.target
     Loaded: loaded (/usr/lib/systemd/system/rpc_pipefs.target; static)
     Active: active since Mon 2026-02-02 10:52:38 UTC; 1h 32min ago
      Until: Mon 2026-02-02 10:52:38 UTC; 1h 32min ago

● rpcbind.target - RPC Port Mapper
     Loaded: loaded (/usr/lib/systemd/system/rpcbind.target; static)
     Active: active since Mon 2026-02-02 10:52:38 UTC; 1h 32min ago
      Until: Mon 2026-02-02 10:52:38 UTC; 1h 32min ago
       Docs: man:systemd.special(7)

○ shutdown.target - System Shutdown
     Loaded: loaded (/usr/lib/systemd/system/shutdown.target; static)
     Active: inactive (dead)
       Docs: man:systemd.special(7)

● slices.target - Slice Units
     Loaded: loaded (/usr/lib/systemd/system/slices.target; static)
     Active: active since Mon 2026-02-02 10:52:38 UTC; 1h 32min ago
      Until: Mon 2026-02-02 10:52:38 UTC; 1h 32min ago
       Docs: man:systemd.special(7)

● sockets.target - Socket Units
     Loaded: loaded (/usr/lib/systemd/system/sockets.target; static)
     Active: active since Mon 2026-02-02 10:52:39 UTC; 1h 32min ago
      Until: Mon 2026-02-02 10:52:39 UTC; 1h 32min ago
       Docs: man:systemd.special(7)

Feb 02 10:52:39 localhost systemd[1]: Reached target Socket Units.

● sshd-keygen.target
     Loaded: loaded (/usr/lib/systemd/system/sshd-keygen.target; static)
     Active: active since Mon 2026-02-02 11:43:26 UTC; 41min ago
      Until: Mon 2026-02-02 11:43:26 UTC; 41min ago

Feb 02 11:43:26 compute-0 systemd[1]: Reached target sshd-keygen.target.

● swap.target - Swaps
     Loaded: loaded (/usr/lib/systemd/system/swap.target; static)
     Active: active since Mon 2026-02-02 10:52:38 UTC; 1h 32min ago
      Until: Mon 2026-02-02 10:52:38 UTC; 1h 32min ago
       Docs: man:systemd.special(7)

● sysinit.target - System Initialization
     Loaded: loaded (/usr/lib/systemd/system/sysinit.target; static)
     Active: active since Mon 2026-02-02 10:52:39 UTC; 1h 32min ago
      Until: Mon 2026-02-02 10:52:39 UTC; 1h 32min ago
       Docs: man:systemd.special(7)

Feb 02 10:52:39 localhost systemd[1]: Reached target System Initialization.

● time-set.target - System Time Set
     Loaded: loaded (/usr/lib/systemd/system/time-set.target; static)
     Active: active since Mon 2026-02-02 11:30:49 UTC; 54min ago
      Until: Mon 2026-02-02 11:30:49 UTC; 54min ago
       Docs: man:systemd.special(7)

Feb 02 11:30:49 compute-0 systemd[1]: Reached target System Time Set.

● time-sync.target - System Time Synchronized
     Loaded: loaded (/usr/lib/systemd/system/time-sync.target; static)
     Active: active since Mon 2026-02-02 11:30:49 UTC; 54min ago
      Until: Mon 2026-02-02 11:30:49 UTC; 54min ago
       Docs: man:systemd.special(7)

Feb 02 11:30:49 compute-0 systemd[1]: Reached target System Time Synchronized.

● timers.target - Timer Units
     Loaded: loaded (/usr/lib/systemd/system/timers.target; static)
     Active: active since Mon 2026-02-02 10:52:39 UTC; 1h 32min ago
      Until: Mon 2026-02-02 10:52:39 UTC; 1h 32min ago
       Docs: man:systemd.special(7)

Feb 02 10:52:39 localhost systemd[1]: Reached target Timer Units.

○ umount.target - Unmount All Filesystems
     Loaded: loaded (/usr/lib/systemd/system/umount.target; static)
     Active: inactive (dead)
       Docs: man:systemd.special(7)

○ veritysetup-pre.target - Local Verity Protected Volumes (Pre)
     Loaded: loaded (/usr/lib/systemd/system/veritysetup-pre.target; static)
     Active: inactive (dead)
       Docs: man:systemd.special(7)

● veritysetup.target - Local Verity Protected Volumes
     Loaded: loaded (/usr/lib/systemd/system/veritysetup.target; static)
     Active: active since Mon 2026-02-02 10:52:38 UTC; 1h 32min ago
      Until: Mon 2026-02-02 10:52:38 UTC; 1h 32min ago
       Docs: man:systemd.special(7)

○ virt-guest-shutdown.target - Libvirt guests shutdown
     Loaded: loaded (/etc/systemd/system/virt-guest-shutdown.target; static)
     Active: inactive (dead)
       Docs: https://libvirt.org

● dd42911bf41e885c3ba4077012f09a191888946e3867784418685a91ce34a059-1f21a3646628ee04.timer - /usr/bin/podman healthcheck run dd42911bf41e885c3ba4077012f09a191888946e3867784418685a91ce34a059
     Loaded: loaded (/run/systemd/transient/dd42911bf41e885c3ba4077012f09a191888946e3867784418685a91ce34a059-1f21a3646628ee04.timer; transient)
  Transient: yes
     Active: active (waiting) since Mon 2026-02-02 11:40:08 UTC; 44min ago
      Until: Mon 2026-02-02 11:40:08 UTC; 44min ago
    Trigger: Mon 2026-02-02 12:25:15 UTC; 23s left
   Triggers: ● dd42911bf41e885c3ba4077012f09a191888946e3867784418685a91ce34a059-1f21a3646628ee04.service

Feb 02 11:40:08 compute-0 systemd[1]: Started /usr/bin/podman healthcheck run dd42911bf41e885c3ba4077012f09a191888946e3867784418685a91ce34a059.

● dnf-makecache.timer - dnf makecache --timer
     Loaded: loaded (/usr/lib/systemd/system/dnf-makecache.timer; enabled; preset: enabled)
     Active: active (waiting) since Mon 2026-02-02 10:52:39 UTC; 1h 32min ago
      Until: Mon 2026-02-02 10:52:39 UTC; 1h 32min ago
    Trigger: Mon 2026-02-02 13:02:36 UTC; 37min left
   Triggers: ● dnf-makecache.service

Feb 02 10:52:39 localhost systemd[1]: Started dnf makecache --timer.

● f7a8a7d56deab4622312f47586dba2f4884d78e46b23bfb226b684327aab18c9-30b77556ff7903ab.timer - /usr/bin/podman healthcheck run f7a8a7d56deab4622312f47586dba2f4884d78e46b23bfb226b684327aab18c9
     Loaded: loaded (/run/systemd/transient/f7a8a7d56deab4622312f47586dba2f4884d78e46b23bfb226b684327aab18c9-30b77556ff7903ab.timer; transient)
  Transient: yes
     Active: active (waiting) since Mon 2026-02-02 11:41:07 UTC; 43min ago
      Until: Mon 2026-02-02 11:41:07 UTC; 43min ago
    Trigger: Mon 2026-02-02 12:25:15 UTC; 23s left
   Triggers: ● f7a8a7d56deab4622312f47586dba2f4884d78e46b23bfb226b684327aab18c9-30b77556ff7903ab.service

Feb 02 11:41:07 compute-0 systemd[1]: Started /usr/bin/podman healthcheck run f7a8a7d56deab4622312f47586dba2f4884d78e46b23bfb226b684327aab18c9.

● logrotate.timer - Daily rotation of log files
     Loaded: loaded (/usr/lib/systemd/system/logrotate.timer; enabled; preset: enabled)
     Active: active (waiting) since Mon 2026-02-02 10:52:39 UTC; 1h 32min ago
      Until: Mon 2026-02-02 10:52:39 UTC; 1h 32min ago
    Trigger: Tue 2026-02-03 00:00:00 UTC; 11h left
   Triggers: ● logrotate.service
       Docs: man:logrotate(8)
             man:logrotate.conf(5)

Feb 02 10:52:39 localhost systemd[1]: Started Daily rotation of log files.

○ raid-check.timer - Weekly RAID setup health check
     Loaded: loaded (/usr/lib/systemd/system/raid-check.timer; enabled; preset: enabled)
     Active: inactive (dead)
    Trigger: n/a
   Triggers: ● raid-check.service

○ sysstat-collect.timer - Run system activity accounting tool every 10 minutes
     Loaded: loaded (/usr/lib/systemd/system/sysstat-collect.timer; enabled; preset: disabled)
     Active: inactive (dead)
    Trigger: n/a
   Triggers: ● sysstat-collect.service

○ sysstat-summary.timer - Generate summary of yesterday's process accounting
     Loaded: loaded (/usr/lib/systemd/system/sysstat-summary.timer; enabled; preset: disabled)
     Active: inactive (dead)
    Trigger: n/a
   Triggers: ● sysstat-summary.service

● systemd-tmpfiles-clean.timer - Daily Cleanup of Temporary Directories
     Loaded: loaded (/usr/lib/systemd/system/systemd-tmpfiles-clean.timer; static)
     Active: active (waiting) since Mon 2026-02-02 10:52:39 UTC; 1h 32min ago
      Until: Mon 2026-02-02 10:52:39 UTC; 1h 32min ago
    Trigger: Tue 2026-02-03 11:07:52 UTC; 22h left
   Triggers: ● systemd-tmpfiles-clean.service
       Docs: man:tmpfiles.d(5)
             man:systemd-tmpfiles(8)

Feb 02 10:52:39 localhost systemd[1]: Started Daily Cleanup of Temporary Directories.

● unbound-anchor.timer - daily update of the root trust anchor for DNSSEC
     Loaded: loaded (/usr/lib/systemd/system/unbound-anchor.timer; enabled; preset: enabled)
     Active: active (waiting) since Mon 2026-02-02 11:26:38 UTC; 58min ago
      Until: Mon 2026-02-02 11:26:38 UTC; 58min ago
    Trigger: Tue 2026-02-03 00:00:00 UTC; 11h left
   Triggers: ● unbound-anchor.service
       Docs: man:unbound-anchor(8)

Feb 02 11:26:38 compute-0 systemd[1]: Started daily update of the root trust anchor for DNSSEC.
