● compute-0
    State: running
    Units: 475 loaded (incl. loaded aliases)
     Jobs: 0 queued
   Failed: 0 units
    Since: Sat 2025-12-13 06:42:09 UTC; 54min ago
  systemd: 252-59.el9
   CGroup: /
           ├─252669 turbostat --debug sleep 10
           ├─252673 sleep 10
           ├─init.scope
           │ └─1 /usr/lib/systemd/systemd --switched-root --system --deserialize 31
           ├─machine.slice
           │ ├─libpod-1929b765dac802841eb5d5f56597ea7bfd15768bcf514c3ef50eb60bf1b13d07.scope
           │ │ └─container
           │ │   ├─154118 dumb-init --single-child -- kolla_start
           │ │   ├─154121 "neutron-ovn-metadata-agent (/usr/bin/python3 /usr/bin/neutron-ovn-metadata-agent)"
           │ │   ├─154224 "neutron-ovn-metadata-agent (/usr/bin/python3 /usr/bin/neutron-ovn-metadata-agent)"
           │ │   └─154229 /usr/bin/python3 /bin/privsep-helper --config-file /etc/neutron/neutron.conf --config-dir /etc/neutron.conf.d --privsep_context neutron.privileged.namespace_cmd --privsep_sock_path /tmp/tmpj55srbhp/privsep.sock
           │ ├─libpod-85647c0fe69de86f6774be6672640c6d932467db58f079f97876cd7dae0a1306.scope
           │ │ └─container
           │ │   ├─241224 dumb-init --single-child -- kolla_start
           │ │   └─241226 /usr/bin/python3 /usr/bin/nova-compute
           │ ├─libpod-d4b07d1867f144077f7c5c5cc5ae5c3e4d24058947898d6fee77240ec3efb8ed.scope
           │ │ └─container
           │ │   ├─144649 dumb-init --single-child -- kolla_start
           │ │   └─144652 /usr/bin/ovn-controller --pidfile unix:/run/openvswitch/db.sock -p /etc/pki/tls/private/ovndb.key -c /etc/pki/tls/certs/ovndb.crt -C /etc/pki/tls/certs/ovndbca.crt
           │ └─libpod-f696b337a701eeb12548640e55e827503e894b0e602e4d8080c3212ba22210e6.scope
           │   └─container
           │     ├─223788 dumb-init --single-child -- kolla_start
           │     └─223792 /usr/sbin/multipathd -d
           ├─system.slice
           │ ├─NetworkManager.service
           │ │ └─48896 /usr/sbin/NetworkManager --no-daemon
           │ ├─auditd.service
           │ │ ├─673 /sbin/auditd
           │ │ └─675 /usr/sbin/sedispatch
           │ ├─chronyd.service
           │ │ └─58459 /usr/sbin/chronyd -F 2
           │ ├─crond.service
           │ │ ├─  968 /usr/sbin/crond -n
           │ │ └─30916 /usr/sbin/anacron -s
           │ ├─dbus-broker.service
           │ │ ├─727 /usr/bin/dbus-broker-launch --scope system --audit
           │ │ └─734 dbus-broker --log 4 --controller 9 --machine-id 64f1d6692049d8be5e8b216cc203502c --max-bytes 536870912 --max-fds 4096 --max-matches 131072 --audit
           │ ├─edpm_multipathd.service
           │ │ └─223786 /usr/bin/conmon --api-version 1 -c f696b337a701eeb12548640e55e827503e894b0e602e4d8080c3212ba22210e6 -u f696b337a701eeb12548640e55e827503e894b0e602e4d8080c3212ba22210e6 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/f696b337a701eeb12548640e55e827503e894b0e602e4d8080c3212ba22210e6/userdata -p /run/containers/storage/overlay-containers/f696b337a701eeb12548640e55e827503e894b0e602e4d8080c3212ba22210e6/userdata/pidfile -n multipathd --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/f696b337a701eeb12548640e55e827503e894b0e602e4d8080c3212ba22210e6 --full-attach -s -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/f696b337a701eeb12548640e55e827503e894b0e602e4d8080c3212ba22210e6/userdata/oci-log --conmon-pidfile /run/multipathd.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg f696b337a701eeb12548640e55e827503e894b0e602e4d8080c3212ba22210e6
           │ ├─edpm_nova_compute.service
           │ │ └─241222 /usr/bin/conmon --api-version 1 -c 85647c0fe69de86f6774be6672640c6d932467db58f079f97876cd7dae0a1306 -u 85647c0fe69de86f6774be6672640c6d932467db58f079f97876cd7dae0a1306 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/85647c0fe69de86f6774be6672640c6d932467db58f079f97876cd7dae0a1306/userdata -p /run/containers/storage/overlay-containers/85647c0fe69de86f6774be6672640c6d932467db58f079f97876cd7dae0a1306/userdata/pidfile -n nova_compute --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/85647c0fe69de86f6774be6672640c6d932467db58f079f97876cd7dae0a1306 --full-attach -s -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/85647c0fe69de86f6774be6672640c6d932467db58f079f97876cd7dae0a1306/userdata/oci-log --conmon-pidfile /run/nova_compute.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg 85647c0fe69de86f6774be6672640c6d932467db58f079f97876cd7dae0a1306
           │ ├─edpm_ovn_controller.service
           │ │ └─144647 /usr/bin/conmon --api-version 1 -c d4b07d1867f144077f7c5c5cc5ae5c3e4d24058947898d6fee77240ec3efb8ed -u d4b07d1867f144077f7c5c5cc5ae5c3e4d24058947898d6fee77240ec3efb8ed -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/d4b07d1867f144077f7c5c5cc5ae5c3e4d24058947898d6fee77240ec3efb8ed/userdata -p /run/containers/storage/overlay-containers/d4b07d1867f144077f7c5c5cc5ae5c3e4d24058947898d6fee77240ec3efb8ed/userdata/pidfile -n ovn_controller --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/d4b07d1867f144077f7c5c5cc5ae5c3e4d24058947898d6fee77240ec3efb8ed --full-attach -s -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/d4b07d1867f144077f7c5c5cc5ae5c3e4d24058947898d6fee77240ec3efb8ed/userdata/oci-log --conmon-pidfile /run/ovn_controller.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg d4b07d1867f144077f7c5c5cc5ae5c3e4d24058947898d6fee77240ec3efb8ed
           │ ├─edpm_ovn_metadata_agent.service
           │ │ └─154116 /usr/bin/conmon --api-version 1 -c 1929b765dac802841eb5d5f56597ea7bfd15768bcf514c3ef50eb60bf1b13d07 -u 1929b765dac802841eb5d5f56597ea7bfd15768bcf514c3ef50eb60bf1b13d07 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/1929b765dac802841eb5d5f56597ea7bfd15768bcf514c3ef50eb60bf1b13d07/userdata -p /run/containers/storage/overlay-containers/1929b765dac802841eb5d5f56597ea7bfd15768bcf514c3ef50eb60bf1b13d07/userdata/pidfile -n ovn_metadata_agent --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/1929b765dac802841eb5d5f56597ea7bfd15768bcf514c3ef50eb60bf1b13d07 --full-attach -s -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/1929b765dac802841eb5d5f56597ea7bfd15768bcf514c3ef50eb60bf1b13d07/userdata/oci-log --conmon-pidfile /run/ovn_metadata_agent.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg 1929b765dac802841eb5d5f56597ea7bfd15768bcf514c3ef50eb60bf1b13d07
           │ ├─gssproxy.service
           │ │ └─827 /usr/sbin/gssproxy -D
           │ ├─irqbalance.service
           │ │ └─740 /usr/sbin/irqbalance
           │ ├─iscsid.service
           │ │ └─226492 /usr/sbin/iscsid -f
           │ ├─ovs-vswitchd.service
           │ │ └─47201 ovs-vswitchd unix:/var/run/openvswitch/db.sock -vconsole:emer -vsyslog:err -vfile:info --mlockall --user openvswitch:hugetlbfs --no-chdir --log-file=/var/log/openvswitch/ovs-vswitchd.log --pidfile=/var/run/openvswitch/ovs-vswitchd.pid --detach
           │ ├─ovsdb-server.service
           │ │ └─47120 ovsdb-server /etc/openvswitch/conf.db -vconsole:emer -vsyslog:err -vfile:info --remote=punix:/var/run/openvswitch/db.sock --private-key=db:Open_vSwitch,SSL,private_key --certificate=db:Open_vSwitch,SSL,certificate --bootstrap-ca-cert=db:Open_vSwitch,SSL,ca_cert --user openvswitch:hugetlbfs --no-chdir --log-file=/var/log/openvswitch/ovsdb-server.log --pidfile=/var/run/openvswitch/ovsdb-server.pid --detach
           │ ├─polkit.service
           │ │ └─43388 /usr/lib/polkit-1/polkitd --no-debug
           │ ├─rpcbind.service
           │ │ └─671 /usr/bin/rpcbind -w -f
           │ ├─rsyslog.service
           │ │ └─962 /usr/sbin/rsyslogd -n
           │ ├─sshd.service
           │ │ └─179217 "sshd: /usr/sbin/sshd -D [listener] 0 of 10-100 startups"
           │ ├─system-ceph\x2d00fdae1b\x2d7fad\x2d5f1b\x2d8734\x2dba4d9298a6de.slice
           │ │ ├─ceph-00fdae1b-7fad-5f1b-8734-ba4d9298a6de@crash.compute-0.service
           │ │ │ ├─libpod-payload-8e6a4f61ea03b0deb3b22f2359f4e4ace46ba5e323139a1f6359205360a9b0cc
           │ │ │ │ ├─79703 /run/podman-init -- /usr/bin/ceph-crash -n client.crash.compute-0
           │ │ │ │ └─79705 /usr/bin/python3 -s /usr/bin/ceph-crash -n client.crash.compute-0
           │ │ │ └─runtime
           │ │ │   └─79701 /usr/bin/conmon --api-version 1 -c 8e6a4f61ea03b0deb3b22f2359f4e4ace46ba5e323139a1f6359205360a9b0cc -u 8e6a4f61ea03b0deb3b22f2359f4e4ace46ba5e323139a1f6359205360a9b0cc -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/8e6a4f61ea03b0deb3b22f2359f4e4ace46ba5e323139a1f6359205360a9b0cc/userdata -p /run/containers/storage/overlay-containers/8e6a4f61ea03b0deb3b22f2359f4e4ace46ba5e323139a1f6359205360a9b0cc/userdata/pidfile -n ceph-00fdae1b-7fad-5f1b-8734-ba4d9298a6de-crash-compute-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/8e6a4f61ea03b0deb3b22f2359f4e4ace46ba5e323139a1f6359205360a9b0cc --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/8e6a4f61ea03b0deb3b22f2359f4e4ace46ba5e323139a1f6359205360a9b0cc/userdata/oci-log --conmon-pidfile /run/ceph-00fdae1b-7fad-5f1b-8734-ba4d9298a6de@crash.compute-0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 8e6a4f61ea03b0deb3b22f2359f4e4ace46ba5e323139a1f6359205360a9b0cc
           │ │ ├─ceph-00fdae1b-7fad-5f1b-8734-ba4d9298a6de@mds.cephfs.compute-0.zwnyoz.service
           │ │ │ ├─libpod-payload-c65ab07d188ff0da2857402181db229477778de43e052dbfea38f8c212c47f50
           │ │ │ │ ├─93862 /run/podman-init -- /usr/bin/ceph-mds -n mds.cephfs.compute-0.zwnyoz -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
           │ │ │ │ └─93864 /usr/bin/ceph-mds -n mds.cephfs.compute-0.zwnyoz -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
           │ │ │ └─runtime
           │ │ │   └─93860 /usr/bin/conmon --api-version 1 -c c65ab07d188ff0da2857402181db229477778de43e052dbfea38f8c212c47f50 -u c65ab07d188ff0da2857402181db229477778de43e052dbfea38f8c212c47f50 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/c65ab07d188ff0da2857402181db229477778de43e052dbfea38f8c212c47f50/userdata -p /run/containers/storage/overlay-containers/c65ab07d188ff0da2857402181db229477778de43e052dbfea38f8c212c47f50/userdata/pidfile -n ceph-00fdae1b-7fad-5f1b-8734-ba4d9298a6de-mds-cephfs-compute-0-zwnyoz --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/c65ab07d188ff0da2857402181db229477778de43e052dbfea38f8c212c47f50 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/c65ab07d188ff0da2857402181db229477778de43e052dbfea38f8c212c47f50/userdata/oci-log --conmon-pidfile /run/ceph-00fdae1b-7fad-5f1b-8734-ba4d9298a6de@mds.cephfs.compute-0.zwnyoz.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg c65ab07d188ff0da2857402181db229477778de43e052dbfea38f8c212c47f50
           │ │ ├─ceph-00fdae1b-7fad-5f1b-8734-ba4d9298a6de@mgr.compute-0.qsherl.service
           │ │ │ ├─libpod-payload-4d78867918d5dd4dba36f3a6dc6db4122866221ae6fbf48a37819c5ae84e8283
           │ │ │ │ ├─75198 /run/podman-init -- /usr/bin/ceph-mgr -n mgr.compute-0.qsherl -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
           │ │ │ │ └─75200 /usr/bin/ceph-mgr -n mgr.compute-0.qsherl -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
           │ │ │ └─runtime
           │ │ │   └─75196 /usr/bin/conmon --api-version 1 -c 4d78867918d5dd4dba36f3a6dc6db4122866221ae6fbf48a37819c5ae84e8283 -u 4d78867918d5dd4dba36f3a6dc6db4122866221ae6fbf48a37819c5ae84e8283 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/4d78867918d5dd4dba36f3a6dc6db4122866221ae6fbf48a37819c5ae84e8283/userdata -p /run/containers/storage/overlay-containers/4d78867918d5dd4dba36f3a6dc6db4122866221ae6fbf48a37819c5ae84e8283/userdata/pidfile -n ceph-00fdae1b-7fad-5f1b-8734-ba4d9298a6de-mgr-compute-0-qsherl --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/4d78867918d5dd4dba36f3a6dc6db4122866221ae6fbf48a37819c5ae84e8283 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/4d78867918d5dd4dba36f3a6dc6db4122866221ae6fbf48a37819c5ae84e8283/userdata/oci-log --conmon-pidfile /run/ceph-00fdae1b-7fad-5f1b-8734-ba4d9298a6de@mgr.compute-0.qsherl.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 4d78867918d5dd4dba36f3a6dc6db4122866221ae6fbf48a37819c5ae84e8283
           │ │ ├─ceph-00fdae1b-7fad-5f1b-8734-ba4d9298a6de@mon.compute-0.service
           │ │ │ ├─libpod-payload-4656a144eefb5f6bf26a1e6dd6df77bfd9faa9dce17b22c42a655c087745995a
           │ │ │ │ ├─74926 /run/podman-init -- /usr/bin/ceph-mon -n mon.compute-0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false
           │ │ │ │ └─74928 /usr/bin/ceph-mon -n mon.compute-0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false
           │ │ │ └─runtime
           │ │ │   └─74924 /usr/bin/conmon --api-version 1 -c 4656a144eefb5f6bf26a1e6dd6df77bfd9faa9dce17b22c42a655c087745995a -u 4656a144eefb5f6bf26a1e6dd6df77bfd9faa9dce17b22c42a655c087745995a -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/4656a144eefb5f6bf26a1e6dd6df77bfd9faa9dce17b22c42a655c087745995a/userdata -p /run/containers/storage/overlay-containers/4656a144eefb5f6bf26a1e6dd6df77bfd9faa9dce17b22c42a655c087745995a/userdata/pidfile -n ceph-00fdae1b-7fad-5f1b-8734-ba4d9298a6de-mon-compute-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/4656a144eefb5f6bf26a1e6dd6df77bfd9faa9dce17b22c42a655c087745995a --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/4656a144eefb5f6bf26a1e6dd6df77bfd9faa9dce17b22c42a655c087745995a/userdata/oci-log --conmon-pidfile /run/ceph-00fdae1b-7fad-5f1b-8734-ba4d9298a6de@mon.compute-0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 4656a144eefb5f6bf26a1e6dd6df77bfd9faa9dce17b22c42a655c087745995a
           │ │ ├─ceph-00fdae1b-7fad-5f1b-8734-ba4d9298a6de@osd.0.service
           │ │ │ ├─libpod-payload-5e169e1385f98bf8a58844e41c31305318f100b9850e1f4defaf308d2b1dfde7
           │ │ │ │ ├─85138 /run/podman-init -- /usr/bin/ceph-osd -n osd.0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --osd-objectstore=bluestore
           │ │ │ │ └─85140 /usr/bin/ceph-osd -n osd.0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --osd-objectstore=bluestore
           │ │ │ └─runtime
           │ │ │   └─85136 /usr/bin/conmon --api-version 1 -c 5e169e1385f98bf8a58844e41c31305318f100b9850e1f4defaf308d2b1dfde7 -u 5e169e1385f98bf8a58844e41c31305318f100b9850e1f4defaf308d2b1dfde7 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/5e169e1385f98bf8a58844e41c31305318f100b9850e1f4defaf308d2b1dfde7/userdata -p /run/containers/storage/overlay-containers/5e169e1385f98bf8a58844e41c31305318f100b9850e1f4defaf308d2b1dfde7/userdata/pidfile -n ceph-00fdae1b-7fad-5f1b-8734-ba4d9298a6de-osd-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/5e169e1385f98bf8a58844e41c31305318f100b9850e1f4defaf308d2b1dfde7 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/5e169e1385f98bf8a58844e41c31305318f100b9850e1f4defaf308d2b1dfde7/userdata/oci-log --conmon-pidfile /run/ceph-00fdae1b-7fad-5f1b-8734-ba4d9298a6de@osd.0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 5e169e1385f98bf8a58844e41c31305318f100b9850e1f4defaf308d2b1dfde7
           │ │ ├─ceph-00fdae1b-7fad-5f1b-8734-ba4d9298a6de@osd.1.service
           │ │ │ ├─libpod-payload-c0e0c03f97b0b2b02555f476cf4558ef6f7c2cd731350718d9262d59a0b7be03
           │ │ │ │ ├─86140 /run/podman-init -- /usr/bin/ceph-osd -n osd.1 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --osd-objectstore=bluestore
           │ │ │ │ └─86142 /usr/bin/ceph-osd -n osd.1 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --osd-objectstore=bluestore
           │ │ │ └─runtime
           │ │ │   └─86138 /usr/bin/conmon --api-version 1 -c c0e0c03f97b0b2b02555f476cf4558ef6f7c2cd731350718d9262d59a0b7be03 -u c0e0c03f97b0b2b02555f476cf4558ef6f7c2cd731350718d9262d59a0b7be03 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/c0e0c03f97b0b2b02555f476cf4558ef6f7c2cd731350718d9262d59a0b7be03/userdata -p /run/containers/storage/overlay-containers/c0e0c03f97b0b2b02555f476cf4558ef6f7c2cd731350718d9262d59a0b7be03/userdata/pidfile -n ceph-00fdae1b-7fad-5f1b-8734-ba4d9298a6de-osd-1 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/c0e0c03f97b0b2b02555f476cf4558ef6f7c2cd731350718d9262d59a0b7be03 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/c0e0c03f97b0b2b02555f476cf4558ef6f7c2cd731350718d9262d59a0b7be03/userdata/oci-log --conmon-pidfile /run/ceph-00fdae1b-7fad-5f1b-8734-ba4d9298a6de@osd.1.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg c0e0c03f97b0b2b02555f476cf4558ef6f7c2cd731350718d9262d59a0b7be03
           │ │ ├─ceph-00fdae1b-7fad-5f1b-8734-ba4d9298a6de@osd.2.service
           │ │ │ ├─libpod-payload-bb7cd2f636f6ef6017e815fa4141bf45b494cfb9652486980f5606492505725a
           │ │ │ │ ├─87153 /run/podman-init -- /usr/bin/ceph-osd -n osd.2 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --osd-objectstore=bluestore
           │ │ │ │ └─87155 /usr/bin/ceph-osd -n osd.2 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --osd-objectstore=bluestore
           │ │ │ └─runtime
           │ │ │   └─87151 /usr/bin/conmon --api-version 1 -c bb7cd2f636f6ef6017e815fa4141bf45b494cfb9652486980f5606492505725a -u bb7cd2f636f6ef6017e815fa4141bf45b494cfb9652486980f5606492505725a -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/bb7cd2f636f6ef6017e815fa4141bf45b494cfb9652486980f5606492505725a/userdata -p /run/containers/storage/overlay-containers/bb7cd2f636f6ef6017e815fa4141bf45b494cfb9652486980f5606492505725a/userdata/pidfile -n ceph-00fdae1b-7fad-5f1b-8734-ba4d9298a6de-osd-2 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/bb7cd2f636f6ef6017e815fa4141bf45b494cfb9652486980f5606492505725a --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/bb7cd2f636f6ef6017e815fa4141bf45b494cfb9652486980f5606492505725a/userdata/oci-log --conmon-pidfile /run/ceph-00fdae1b-7fad-5f1b-8734-ba4d9298a6de@osd.2.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg bb7cd2f636f6ef6017e815fa4141bf45b494cfb9652486980f5606492505725a
           │ │ └─ceph-00fdae1b-7fad-5f1b-8734-ba4d9298a6de@rgw.rgw.compute-0.kikquh.service
           │ │   ├─libpod-payload-69ac193e949f4ec1c85ec7f6eeac603563ef58b890ae60370ce1d5f216a4080c
           │ │   │ ├─93474 /run/podman-init -- /usr/bin/radosgw -n client.rgw.rgw.compute-0.kikquh -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --stop-timeout=120
           │ │   │ └─93487 /usr/bin/radosgw -n client.rgw.rgw.compute-0.kikquh -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --stop-timeout=120
           │ │   └─runtime
           │ │     └─93465 /usr/bin/conmon --api-version 1 -c 69ac193e949f4ec1c85ec7f6eeac603563ef58b890ae60370ce1d5f216a4080c -u 69ac193e949f4ec1c85ec7f6eeac603563ef58b890ae60370ce1d5f216a4080c -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/69ac193e949f4ec1c85ec7f6eeac603563ef58b890ae60370ce1d5f216a4080c/userdata -p /run/containers/storage/overlay-containers/69ac193e949f4ec1c85ec7f6eeac603563ef58b890ae60370ce1d5f216a4080c/userdata/pidfile -n ceph-00fdae1b-7fad-5f1b-8734-ba4d9298a6de-rgw-rgw-compute-0-kikquh --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/69ac193e949f4ec1c85ec7f6eeac603563ef58b890ae60370ce1d5f216a4080c --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/69ac193e949f4ec1c85ec7f6eeac603563ef58b890ae60370ce1d5f216a4080c/userdata/oci-log --conmon-pidfile /run/ceph-00fdae1b-7fad-5f1b-8734-ba4d9298a6de@rgw.rgw.compute-0.kikquh.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 69ac193e949f4ec1c85ec7f6eeac603563ef58b890ae60370ce1d5f216a4080c
           │ ├─system-getty.slice
           │ │ └─getty@tty1.service
           │ │   └─978 /sbin/agetty -o "-p -- \\u" --noclear - linux
           │ ├─system-serial\x2dgetty.slice
           │ │ └─serial-getty@ttyS0.service
           │ │   └─984 /sbin/agetty -o "-p -- \\u" --keep-baud 115200,57600,38400,9600 - vt220
           │ ├─systemd-hostnamed.service
           │ │ └─249805 /usr/lib/systemd/systemd-hostnamed
           │ ├─systemd-journald.service
           │ │ └─650 /usr/lib/systemd/systemd-journald
           │ ├─systemd-logind.service
           │ │ └─745 /usr/lib/systemd/systemd-logind
           │ ├─systemd-machined.service
           │ │ └─204630 /usr/lib/systemd/systemd-machined
           │ ├─systemd-udevd.service
           │ │ └─udev
           │ │   └─701 /usr/lib/systemd/systemd-udevd
           │ ├─tuned.service
           │ │ └─105788 /usr/bin/python3 -Es /usr/sbin/tuned -l -P
           │ ├─virtlogd.service
           │ │ └─204000 /usr/sbin/virtlogd
           │ ├─virtnodedevd.service
           │ │ └─240159 /usr/sbin/virtnodedevd --timeout 120
           │ └─virtqemud.service
           │   └─241006 /usr/sbin/virtqemud --timeout 120
           └─user.slice
             ├─user-1000.slice
             │ ├─session-1.scope
             │ │ └─4579 /usr/bin/python3
             │ ├─session-54.scope
             │ │ ├─246077 "sshd-session: zuul [priv]"
             │ │ ├─246080 "sshd-session: zuul@notty"
             │ │ ├─246081 sudo bash -c "rm -rf /var/tmp/sos-osp && mkdir /var/tmp/sos-osp && sos report --batch --all-logs --tmp-dir=/var/tmp/sos-osp  -p container,openstack_edpm,system,storage,virt"
             │ │ ├─246105 /usr/bin/python3 -s /sbin/sos report --batch --all-logs --tmp-dir=/var/tmp/sos-osp -p container,openstack_edpm,system,storage,virt
             │ │ ├─252668 timeout 15s turbostat --debug sleep 10
             │ │ ├─253118 timeout 300s ceph osd numa-status --format json-pretty
             │ │ ├─253119 /usr/bin/python3 -s /usr/bin/ceph osd numa-status --format json-pretty
             │ │ ├─253139 timeout 300s semanage module -l
             │ │ ├─253140 /usr/bin/python3 -EsI /usr/sbin/semanage module -l
             │ │ ├─253143 timeout 300s systemctl status --all
             │ │ └─253144 systemctl status --all
             │ └─user@1000.service
             │   ├─app.slice
             │   │ └─dbus-broker.service
             │   │   ├─14044 /usr/bin/dbus-broker-launch --scope user
             │   │   └─14045 dbus-broker --log 4 --controller 9 --machine-id 64f1d6692049d8be5e8b216cc203502c --max-bytes 100000000000000 --max-fds 25000000000000 --max-matches 5000000000
             │   ├─init.scope
             │   │ ├─4373 /usr/lib/systemd/systemd --user
             │   │ └─4375 "(sd-pam)"
             │   └─user.slice
             │     └─podman-pause-1c7804ac.scope
             │       └─14027 catatonit -P
             └─user-42477.slice
               ├─session-20.scope
               │ ├─76206 "sshd-session: ceph-admin [priv]"
               │ └─76226 "sshd-session: ceph-admin"
               ├─session-22.scope
               │ ├─76223 "sshd-session: ceph-admin [priv]"
               │ └─76228 "sshd-session: ceph-admin@notty"
               ├─session-23.scope
               │ ├─76254 "sshd-session: ceph-admin [priv]"
               │ └─76257 "sshd-session: ceph-admin@notty"
               ├─session-24.scope
               │ ├─76283 "sshd-session: ceph-admin [priv]"
               │ └─76286 "sshd-session: ceph-admin@notty"
               ├─session-25.scope
               │ ├─76312 "sshd-session: ceph-admin [priv]"
               │ └─76315 "sshd-session: ceph-admin@notty"
               ├─session-26.scope
               │ ├─76341 "sshd-session: ceph-admin [priv]"
               │ └─76344 "sshd-session: ceph-admin@notty"
               ├─session-27.scope
               │ ├─76370 "sshd-session: ceph-admin [priv]"
               │ └─76373 "sshd-session: ceph-admin@notty"
               ├─session-28.scope
               │ ├─76399 "sshd-session: ceph-admin [priv]"
               │ └─76402 "sshd-session: ceph-admin@notty"
               ├─session-29.scope
               │ ├─76428 "sshdUnit boot.automount could not be found.
-session: ceph-admin [priv]"
               │ └─76431 "sshd-session: ceph-admin@notty"
               ├─session-30.scope
               │ ├─76457 "sshd-session: ceph-admin [priv]"
               │ └─76460 "sshd-session: ceph-admin@notty"
               ├─session-31.scope
               │ ├─76484 "sshd-session: ceph-admin [priv]"
               │ └─76487 "sshd-session: ceph-admin@notty"
               ├─session-32.scope
               │ ├─76513 "sshd-session: ceph-admin [priv]"
               │ └─76516 "sshd-session: ceph-admin@notty"
               └─user@42477.service
                 └─init.scope
                   ├─76210 /usr/lib/systemd/systemd --user
                   └─76212 "(sd-pam)"

● proc-sys-fs-binfmt_misc.automount - Arbitrary Executable File Formats File System Automount Point
     Loaded: loaded (/usr/lib/systemd/system/proc-sys-fs-binfmt_misc.automount; static)
     Active: active (running) since Sat 2025-12-13 06:42:10 UTC; 54min ago
      Until: Sat 2025-12-13 06:42:10 UTC; 54min ago
   Triggers: ● proc-sys-fs-binfmt_misc.mount
      Where: /proc/sys/fs/binfmt_misc
       Docs: https://docs.kernel.org/admin-guide/binfmt-misc.html
             https://www.freedesktop.org/wiki/Software/systemd/APIFileSystems

Dec 13 07:13:49 compute-0 systemd[1]: proc-sys-fs-binfmt_misc.automount: Got automount request for /proc/sys/fs/binfmt_misc, triggered by 77212 (sysctl)

● dev-cdrom.device - QEMU_DVD-ROM config-2
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:1f.2-ata1-host0-target0:0:0-0:0:0:0-block-sr0.device
     Loaded: loaded
     Active: active (plugged) since Sat 2025-12-13 06:42:10 UTC; 54min ago
      Until: Sat 2025-12-13 06:42:10 UTC; 54min ago
     Device: /sys/devices/pci0000:00/0000:00:1f.2/ata1/host0/target0:0:0/0:0:0:0/block/sr0

● dev-ceph_vg0-ceph_lv0.device - /dev/ceph_vg0/ceph_lv0
    Follows: unit currently follows state of sys-devices-virtual-block-dm\x2d0.device
     Loaded: loaded
     Active: active (plugged) since Sat 2025-12-13 07:12:37 UTC; 24min ago
      Until: Sat 2025-12-13 07:12:37 UTC; 24min ago
     Device: /sys/devices/virtual/block/dm-0

● dev-ceph_vg1-ceph_lv1.device - /dev/ceph_vg1/ceph_lv1
    Follows: unit currently follows state of sys-devices-virtual-block-dm\x2d1.device
     Loaded: loaded
     Active: active (plugged) since Sat 2025-12-13 07:12:41 UTC; 24min ago
      Until: Sat 2025-12-13 07:12:41 UTC; 24min ago
     Device: /sys/devices/virtual/block/dm-1

● dev-ceph_vg2-ceph_lv2.device - /dev/ceph_vg2/ceph_lv2
    Follows: unit currently follows state of sys-devices-virtual-block-dm\x2d2.device
     Loaded: loaded
     Active: active (plugged) since Sat 2025-12-13 07:12:44 UTC; 24min ago
      Until: Sat 2025-12-13 07:12:44 UTC; 24min ago
     Device: /sys/devices/virtual/block/dm-2

● dev-disk-by\x2ddiskseq-1.device - /dev/disk/by-diskseq/1
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:02.2-0000:04:00.0-virtio2-block-vda.device
     Loaded: loaded
     Active: active (plugged) since Sat 2025-12-13 06:42:10 UTC; 54min ago
      Until: Sat 2025-12-13 06:42:10 UTC; 54min ago
     Device: /sys/devices/pci0000:00/0000:00:02.2/0000:04:00.0/virtio2/block/vda

● dev-disk-by\x2ddiskseq-11.device - /dev/disk/by-diskseq/11
    Follows: unit currently follows state of sys-devices-virtual-block-loop5.device
     Loaded: loaded
     Active: active (plugged) since Sat 2025-12-13 07:12:44 UTC; 24min ago
      Until: Sat 2025-12-13 07:12:44 UTC; 24min ago
     Device: /sys/devices/virtual/block/loop5

● dev-disk-by\x2ddiskseq-12.device - /dev/disk/by-diskseq/12
    Follows: unit currently follows state of sys-devices-virtual-block-dm\x2d2.device
     Loaded: loaded
     Active: active (plugged) since Sat 2025-12-13 07:12:44 UTC; 24min ago
      Until: Sat 2025-12-13 07:12:44 UTC; 24min ago
     Device: /sys/devices/virtual/block/dm-2

● dev-disk-by\x2ddiskseq-3.device - QEMU_DVD-ROM config-2
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:1f.2-ata1-host0-target0:0:0-0:0:0:0-block-sr0.device
     Loaded: loaded
     Active: active (plugged) since Sat 2025-12-13 06:42:10 UTC; 54min ago
      Until: Sat 2025-12-13 06:42:10 UTC; 54min ago
     Device: /sys/devices/pci0000:00/0000:00:1f.2/ata1/host0/target0:0:0/0:0:0:0/block/sr0

● dev-disk-by\x2ddiskseq-5.device - /dev/disk/by-diskseq/5
    Follows: unit currently follows state of sys-devices-virtual-block-loop3.device
     Loaded: loaded
     Active: active (plugged) since Sat 2025-12-13 07:12:37 UTC; 24min ago
      Until: Sat 2025-12-13 07:12:37 UTC; 24min ago
     Device: /sys/devices/virtual/block/loop3

● dev-disk-by\x2ddiskseq-6.device - /dev/disk/by-diskseq/6
    Follows: unit currently follows state of sys-devices-virtual-block-dm\x2d0.device
     Loaded: loaded
     Active: active (plugged) since Sat 2025-12-13 07:12:37 UTC; 24min ago
      Until: Sat 2025-12-13 07:12:37 UTC; 24min ago
     Device: /sys/devices/virtual/block/dm-0

● dev-disk-by\x2ddiskseq-8.device - /dev/disk/by-diskseq/8
    Follows: unit currently follows state of sys-devices-virtual-block-loop4.device
     Loaded: loaded
     Active: active (plugged) since Sat 2025-12-13 07:12:40 UTC; 24min ago
      Until: Sat 2025-12-13 07:12:40 UTC; 24min ago
     Device: /sys/devices/virtual/block/loop4

● dev-disk-by\x2ddiskseq-9.device - /dev/disk/by-diskseq/9
    Follows: unit currently follows state of sys-devices-virtual-block-dm\x2d1.device
     Loaded: loaded
     Active: active (plugged) since Sat 2025-12-13 07:12:41 UTC; 24min ago
      Until: Sat 2025-12-13 07:12:41 UTC; 24min ago
     Device: /sys/devices/virtual/block/dm-1

● dev-disk-by\x2did-ata\x2dQEMU_DVD\x2dROM_QM00001.device - QEMU_DVD-ROM config-2
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:1f.2-ata1-host0-target0:0:0-0:0:0:0-block-sr0.device
     Loaded: loaded
     Active: active (plugged) since Sat 2025-12-13 06:42:10 UTC; 54min ago
      Until: Sat 2025-12-13 06:42:10 UTC; 54min ago
     Device: /sys/devices/pci0000:00/0000:00:1f.2/ata1/host0/target0:0:0/0:0:0:0/block/sr0

● dev-disk-by\x2did-dm\x2dname\x2dceph_vg0\x2dceph_lv0.device - /dev/disk/by-id/dm-name-ceph_vg0-ceph_lv0
    Follows: unit currently follows state of sys-devices-virtual-block-dm\x2d0.device
     Loaded: loaded
     Active: active (plugged) since Sat 2025-12-13 07:12:37 UTC; 24min ago
      Until: Sat 2025-12-13 07:12:37 UTC; 24min ago
     Device: /sys/devices/virtual/block/dm-0

● dev-disk-by\x2did-dm\x2dname\x2dceph_vg1\x2dceph_lv1.device - /dev/disk/by-id/dm-name-ceph_vg1-ceph_lv1
    Follows: unit currently follows state of sys-devices-virtual-block-dm\x2d1.device
     Loaded: loaded
     Active: active (plugged) since Sat 2025-12-13 07:12:41 UTC; 24min ago
      Until: Sat 2025-12-13 07:12:41 UTC; 24min ago
     Device: /sys/devices/virtual/block/dm-1

● dev-disk-by\x2did-dm\x2dname\x2dceph_vg2\x2dceph_lv2.device - /dev/disk/by-id/dm-name-ceph_vg2-ceph_lv2
    Follows: unit currently follows state of sys-devices-virtual-block-dm\x2d2.device
     Loaded: loaded
     Active: active (plugged) since Sat 2025-12-13 07:12:44 UTC; 24min ago
      Until: Sat 2025-12-13 07:12:44 UTC; 24min ago
     Device: /sys/devices/virtual/block/dm-2

● dev-disk-by\x2did-dm\x2duuid\x2dLVM\x2dGsDItVgWRkPm0kLkXMuCd3ck4O27a0cZzr2qgUONrzEb12bE6w1lGGzMad2N6AE1.device - /dev/disk/by-id/dm-uuid-LVM-GsDItVgWRkPm0kLkXMuCd3ck4O27a0cZzr2qgUONrzEb12bE6w1lGGzMad2N6AE1
    Follows: unit currently follows state of sys-devices-virtual-block-dm\x2d2.device
     Loaded: loaded
     Active: active (plugged) since Sat 2025-12-13 07:12:44 UTC; 24min ago
      Until: Sat 2025-12-13 07:12:44 UTC; 24min ago
     Device: /sys/devices/virtual/block/dm-2

● dev-disk-by\x2did-dm\x2duuid\x2dLVM\x2dse4GyIrdpzvHS4clzb2AvwfsVLpfklRbtkUjW1SaXQbDe00Tiu0HCukU4umCyikh.device - /dev/disk/by-id/dm-uuid-LVM-se4GyIrdpzvHS4clzb2AvwfsVLpfklRbtkUjW1SaXQbDe00Tiu0HCukU4umCyikh
    Follows: unit currently follows state of sys-devices-virtual-block-dm\x2d0.device
     Loaded: loaded
     Active: active (plugged) since Sat 2025-12-13 07:12:37 UTC; 24min ago
      Until: Sat 2025-12-13 07:12:37 UTC; 24min ago
     Device: /sys/devices/virtual/block/dm-0

● dev-disk-by\x2did-dm\x2duuid\x2dLVM\x2dtJJ6QMblzEf3afKCh8rCifbMenVxYk54KJfBHDbTeIsse3f5ptwVe51jqy9DmfqM.device - /dev/disk/by-id/dm-uuid-LVM-tJJ6QMblzEf3afKCh8rCifbMenVxYk54KJfBHDbTeIsse3f5ptwVe51jqy9DmfqM
    Follows: unit currently follows state of sys-devices-virtual-block-dm\x2d1.device
     Loaded: loaded
     Active: active (plugged) since Sat 2025-12-13 07:12:41 UTC; 24min ago
      Until: Sat 2025-12-13 07:12:41 UTC; 24min ago
     Device: /sys/devices/virtual/block/dm-1

● dev-disk-by\x2did-lvm\x2dpv\x2duuid\x2dJZuWcj\x2dz4Qp\x2dM8sA\x2dO1Bs\x2dY6Jo\x2dut6A\x2ddGLJa7.device - /dev/disk/by-id/lvm-pv-uuid-JZuWcj-z4Qp-M8sA-O1Bs-Y6Jo-ut6A-dGLJa7
    Follows: unit currently follows state of sys-devices-virtual-block-loop5.device
     Loaded: loaded
     Active: active (plugged) since Sat 2025-12-13 07:12:44 UTC; 24min ago
      Until: Sat 2025-12-13 07:12:44 UTC; 24min ago
     Device: /sys/devices/virtual/block/loop5

● dev-disk-by\x2did-lvm\x2dpv\x2duuid\x2dPcv6dX\x2dqo6N\x2dBf0e\x2dtaEC\x2d2LLr\x2d9URo\x2dEfG9Rw.device - /dev/disk/by-id/lvm-pv-uuid-Pcv6dX-qo6N-Bf0e-taEC-2LLr-9URo-EfG9Rw
    Follows: unit currently follows state of sys-devices-virtual-block-loop4.device
     Loaded: loaded
     Active: active (plugged) since Sat 2025-12-13 07:12:41 UTC; 24min ago
      Until: Sat 2025-12-13 07:12:41 UTC; 24min ago
     Device: /sys/devices/virtual/block/loop4

● dev-disk-by\x2did-lvm\x2dpv\x2duuid\x2dqY4dLl\x2doOEd\x2dCMwN\x2dwdmJ\x2d4hcU\x2d4ytc\x2dPTunEc.device - /dev/disk/by-id/lvm-pv-uuid-qY4dLl-oOEd-CMwN-wdmJ-4hcU-4ytc-PTunEc
    Follows: unit currently follows state of sys-devices-virtual-block-loop3.device
     Loaded: loaded
     Active: active (plugged) since Sat 2025-12-13 07:12:37 UTC; 24min ago
      Until: Sat 2025-12-13 07:12:37 UTC; 24min ago
     Device: /sys/devices/virtual/block/loop3

● dev-disk-by\x2dlabel-config\x2d2.device - QEMU_DVD-ROM config-2
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:1f.2-ata1-host0-target0:0:0-0:0:0:0-block-sr0.device
     Loaded: loaded
     Active: active (plugged) since Sat 2025-12-13 06:42:10 UTC; 54min ago
      Until: Sat 2025-12-13 06:42:10 UTC; 54min ago
     Device: /sys/devices/pci0000:00/0000:00:1f.2/ata1/host0/target0:0:0/0:0:0:0/block/sr0

● dev-disk-by\x2dpartuuid-8c19d41b\x2d01.device - /dev/disk/by-partuuid/8c19d41b-01
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:02.2-0000:04:00.0-virtio2-block-vda-vda1.device
     Loaded: loaded
     Active: active (plugged) since Sat 2025-12-13 06:42:10 UTC; 54min ago
      Until: Sat 2025-12-13 06:42:10 UTC; 54min ago
     Device: /sys/devices/pci0000:00/0000:00:02.2/0000:04:00.0/virtio2/block/vda/vda1

● dev-disk-by\x2dpath-pci\x2d0000:00:1f.2\x2data\x2d1.0.device - QEMU_DVD-ROM config-2
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:1f.2-ata1-host0-target0:0:0-0:0:0:0-block-sr0.device
     Loaded: loaded
     Active: active (plugged) since Sat 2025-12-13 06:42:10 UTC; 54min ago
      Until: Sat 2025-12-13 06:42:10 UTC; 54min ago
     Device: /sys/devices/pci0000:00/0000:00:1f.2/ata1/host0/target0:0:0/0:0:0:0/block/sr0

● dev-disk-by\x2dpath-pci\x2d0000:00:1f.2\x2data\x2d1.device - QEMU_DVD-ROM config-2
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:1f.2-ata1-host0-target0:0:0-0:0:0:0-block-sr0.device
     Loaded: loaded
     Active: active (plugged) since Sat 2025-12-13 06:42:10 UTC; 54min ago
      Until: Sat 2025-12-13 06:42:10 UTC; 54min ago
     Device: /sys/devices/pci0000:00/0000:00:1f.2/ata1/host0/target0:0:0/0:0:0:0/block/sr0

● dev-disk-by\x2dpath-pci\x2d0000:04:00.0.device - /dev/disk/by-path/pci-0000:04:00.0
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:02.2-0000:04:00.0-virtio2-block-vda.device
     Loaded: loaded
     Active: active (plugged) since Sat 2025-12-13 06:42:10 UTC; 54min ago
      Until: Sat 2025-12-13 06:42:10 UTC; 54min ago
     Device: /sys/devices/pci0000:00/0000:00:02.2/0000:04:00.0/virtio2/block/vda

● dev-disk-by\x2dpath-pci\x2d0000:04:00.0\x2dpart1.device - /dev/disk/by-path/pci-0000:04:00.0-part1
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:02.2-0000:04:00.0-virtio2-block-vda-vda1.device
     Loaded: loaded
     Active: active (plugged) since Sat 2025-12-13 06:42:10 UTC; 54min ago
      Until: Sat 2025-12-13 06:42:10 UTC; 54min ago
     Device: /sys/devices/pci0000:00/0000:00:02.2/0000:04:00.0/virtio2/block/vda/vda1

● dev-disk-by\x2dpath-virtio\x2dpci\x2d0000:04:00.0.device - /dev/disk/by-path/virtio-pci-0000:04:00.0
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:02.2-0000:04:00.0-virtio2-block-vda.device
     Loaded: loaded
     Active: active (plugged) since Sat 2025-12-13 06:42:10 UTC; 54min ago
      Until: Sat 2025-12-13 06:42:10 UTC; 54min ago
     Device: /sys/devices/pci0000:00/0000:00:02.2/0000:04:00.0/virtio2/block/vda

● dev-disk-by\x2dpath-virtio\x2dpci\x2d0000:04:00.0\x2dpart1.device - /dev/disk/by-path/virtio-pci-0000:04:00.0-part1
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:02.2-0000:04:00.0-virtio2-block-vda-vda1.device
     Loaded: loaded
     Active: active (plugged) since Sat 2025-12-13 06:42:10 UTC; 54min ago
      Until: Sat 2025-12-13 06:42:10 UTC; 54min ago
     Device: /sys/devices/pci0000:00/0000:00:02.2/0000:04:00.0/virtio2/block/vda/vda1

● dev-disk-by\x2duuid-2025\x2d12\x2d13\x2d06\x2d42\x2d01\x2d00.device - QEMU_DVD-ROM config-2
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:1f.2-ata1-host0-target0:0:0-0:0:0:0-block-sr0.device
     Loaded: loaded
     Active: active (plugged) since Sat 2025-12-13 06:42:10 UTC; 54min ago
      Until: Sat 2025-12-13 06:42:10 UTC; 54min ago
     Device: /sys/devices/pci0000:00/0000:00:1f.2/ata1/host0/target0:0:0/0:0:0:0/block/sr0

● dev-disk-by\x2duuid-cbdedf45\x2ded1d\x2d4952\x2d82a8\x2d33a12c0ba266.device - /dev/disk/by-uuid/cbdedf45-ed1d-4952-82a8-33a12c0ba266
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:02.2-0000:04:00.0-virtio2-block-vda-vda1.device
     Loaded: loaded
     Active: active (plugged) since Sat 2025-12-13 06:42:10 UTC; 54min ago
      Until: Sat 2025-12-13 06:42:10 UTC; 54min ago
     Device: /sys/devices/pci0000:00/0000:00:02.2/0000:04:00.0/virtio2/block/vda/vda1

Dec 13 06:42:08 localhost systemd[1]: Found device /dev/disk/by-uuid/cbdedf45-ed1d-4952-82a8-33a12c0ba266.

● dev-dm\x2d0.device - /dev/dm-0
    Follows: unit currently follows state of sys-devices-virtual-block-dm\x2d0.device
     Loaded: loaded
     Active: active (plugged) since Sat 2025-12-13 07:12:37 UTC; 24min ago
      Until: Sat 2025-12-13 07:12:37 UTC; 24min ago
     Device: /sys/devices/virtual/block/dm-0

● dev-dm\x2d1.device - /dev/dm-1
    Follows: unit currently follows state of sys-devices-virtual-block-dm\x2d1.device
     Loaded: loaded
     Active: active (plugged) since Sat 2025-12-13 07:12:41 UTC; 24min ago
      Until: Sat 2025-12-13 07:12:41 UTC; 24min ago
     Device: /sys/devices/virtual/block/dm-1

● dev-dm\x2d2.device - /dev/dm-2
    Follows: unit currently follows state of sys-devices-virtual-block-dm\x2d2.device
     Loaded: loaded
     Active: active (plugged) since Sat 2025-12-13 07:12:44 UTC; 24min ago
      Until: Sat 2025-12-13 07:12:44 UTC; 24min ago
     Device: /sys/devices/virtual/block/dm-2

● dev-loop3.device - /dev/loop3
    Follows: unit currently follows state of sys-devices-virtual-block-loop3.device
     Loaded: loaded
     Active: active (plugged) since Sat 2025-12-13 07:12:37 UTC; 24min ago
      Until: Sat 2025-12-13 07:12:37 UTC; 24min ago
     Device: /sys/devices/virtual/block/loop3

● dev-loop4.device - /dev/loop4
    Follows: unit currently follows state of sys-devices-virtual-block-loop4.device
     Loaded: loaded
     Active: active (plugged) since Sat 2025-12-13 07:12:40 UTC; 24min ago
      Until: Sat 2025-12-13 07:12:40 UTC; 24min ago
     Device: /sys/devices/virtual/block/loop4

● dev-loop5.device - /dev/loop5
    Follows: unit currently follows state of sys-devices-virtual-block-loop5.device
     Loaded: loaded
     Active: active (plugged) since Sat 2025-12-13 07:12:44 UTC; 24min ago
      Until: Sat 2025-12-13 07:12:44 UTC; 24min ago
     Device: /sys/devices/virtual/block/loop5

● dev-mapper-ceph_vg0\x2dceph_lv0.device - /dev/mapper/ceph_vg0-ceph_lv0
    Follows: unit currently follows state of sys-devices-virtual-block-dm\x2d0.device
     Loaded: loaded
     Active: active (plugged) since Sat 2025-12-13 07:12:37 UTC; 24min ago
      Until: Sat 2025-12-13 07:12:37 UTC; 24min ago
     Device: /sys/devices/virtual/block/dm-0

● dev-mapper-ceph_vg1\x2dceph_lv1.device - /dev/mapper/ceph_vg1-ceph_lv1
    Follows: unit currently follows state of sys-devices-virtual-block-dm\x2d1.device
     Loaded: loaded
     Active: active (plugged) since Sat 2025-12-13 07:12:41 UTC; 24min ago
      Until: Sat 2025-12-13 07:12:41 UTC; 24min ago
     Device: /sys/devices/virtual/block/dm-1

● dev-mapper-ceph_vg2\x2dceph_lv2.device - /dev/mapper/ceph_vg2-ceph_lv2
    Follows: unit currently follows state of sys-devices-virtual-block-dm\x2d2.device
     Loaded: loaded
     Active: active (plugged) since Sat 2025-12-13 07:12:44 UTC; 24min ago
      Until: Sat 2025-12-13 07:12:44 UTC; 24min ago
     Device: /sys/devices/virtual/block/dm-2

● dev-rfkill.device - /dev/rfkill
    Follows: unit currently follows state of sys-devices-virtual-misc-rfkill.device
     Loaded: loaded
     Active: active (plugged) since Sat 2025-12-13 06:42:11 UTC; 54min ago
      Until: Sat 2025-12-13 06:42:11 UTC; 54min ago
     Device: /sys/devices/virtual/misc/rfkill

● dev-sr0.device - QEMU_DVD-ROM config-2
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:1f.2-ata1-host0-target0:0:0-0:0:0:0-block-sr0.device
     Loaded: loaded
     Active: active (plugged) since Sat 2025-12-13 06:42:10 UTC; 54min ago
      Until: Sat 2025-12-13 06:42:10 UTC; 54min ago
     Device: /sys/devices/pci0000:00/0000:00:1f.2/ata1/host0/target0:0:0/0:0:0:0/block/sr0

● dev-ttyS0.device - /dev/ttyS0
    Follows: unit currently follows state of sys-devices-pnp0-00:00-tty-ttyS0.device
     Loaded: loaded
     Active: active (plugged) since Sat 2025-12-13 06:42:10 UTC; 54min ago
      Until: Sat 2025-12-13 06:42:10 UTC; 54min ago
     Device: /sys/devices/pnp0/00:00/tty/ttyS0

Dec 13 06:42:10 localhost systemd[1]: Condition check resulted in /dev/ttyS0 being skipped.

● dev-ttyS1.device - /dev/ttyS1
    Follows: unit currently follows state of sys-devices-platform-serial8250-tty-ttyS1.device
     Loaded: loaded
     Active: active (plugged) since Sat 2025-12-13 06:42:10 UTC; 54min ago
      Until: Sat 2025-12-13 06:42:10 UTC; 54min ago
     Device: /sys/devices/platform/serial8250/tty/ttyS1

● dev-ttyS2.device - /dev/ttyS2
    Follows: unit currently follows state of sys-devices-platform-serial8250-tty-ttyS2.device
     Loaded: loaded
     Active: active (plugged) since Sat 2025-12-13 06:42:10 UTC; 54min ago
      Until: Sat 2025-12-13 06:42:10 UTC; 54min ago
     Device: /sys/devices/platform/serial8250/tty/ttyS2

● dev-ttyS3.device - /dev/ttyS3
    Follows: unit currently follows state of sys-devices-platform-serial8250-tty-ttyS3.device
     Loaded: loaded
     Active: active (plugged) since Sat 2025-12-13 06:42:10 UTC; 54min ago
      Until: Sat 2025-12-13 06:42:10 UTC; 54min ago
     Device: /sys/devices/platform/serial8250/tty/ttyS3

● dev-vda.device - /dev/vda
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:02.2-0000:04:00.0-virtio2-block-vda.device
     Loaded: loaded
     Active: active (plugged) since Sat 2025-12-13 06:42:10 UTC; 54min ago
      Until: Sat 2025-12-13 06:42:10 UTC; 54min ago
     Device: /sys/devices/pci0000:00/0000:00:02.2/0000:04:00.0/virtio2/block/vda

● dev-vda1.device - /dev/vda1
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:02.2-0000:04:00.0-virtio2-block-vda-vda1.device
     Loaded: loaded
     Active: active (plugged) since Sat 2025-12-13 06:42:10 UTC; 54min ago
      Until: Sat 2025-12-13 06:42:10 UTC; 54min ago
     Device: /sys/devices/pci0000:00/0000:00:02.2/0000:04:00.0/virtio2/block/vda/vda1

● sys-devices-pci0000:00-0000:00:02.1-0000:03:00.0-virtio1-net-eth0.device - Virtio 1.0 network device (QEMU)
     Loaded: loaded
     Active: active (plugged) since Sat 2025-12-13 06:42:10 UTC; 54min ago
      Until: Sat 2025-12-13 06:42:10 UTC; 54min ago
     Device: /sys/devices/pci0000:00/0000:00:02.1/0000:03:00.0/virtio1/net/eth0

● sys-devices-pci0000:00-0000:00:02.2-0000:04:00.0-virtio2-block-vda-vda1.device - /sys/devices/pci0000:00/0000:00:02.2/0000:04:00.0/virtio2/block/vda/vda1
     Loaded: loaded
     Active: active (plugged) since Sat 2025-12-13 06:42:10 UTC; 54min ago
      Until: Sat 2025-12-13 06:42:10 UTC; 54min ago
     Device: /sys/devices/pci0000:00/0000:00:02.2/0000:04:00.0/virtio2/block/vda/vda1

● sys-devices-pci0000:00-0000:00:02.2-0000:04:00.0-virtio2-block-vda.device - /sys/devices/pci0000:00/0000:00:02.2/0000:04:00.0/virtio2/block/vda
     Loaded: loaded
     Active: active (plugged) since Sat 2025-12-13 06:42:10 UTC; 54min ago
      Until: Sat 2025-12-13 06:42:10 UTC; 54min ago
     Device: /sys/devices/pci0000:00/0000:00:02.2/0000:04:00.0/virtio2/block/vda

● sys-devices-pci0000:00-0000:00:02.5-0000:07:00.0-virtio5-net-eth1.device - Virtio 1.0 network device (QEMU)
     Loaded: loaded
     Active: active (plugged) since Sat 2025-12-13 06:44:03 UTC; 52min ago
      Until: Sat 2025-12-13 06:44:03 UTC; 52min ago
     Device: /sys/devices/pci0000:00/0000:00:02.5/0000:07:00.0/virtio5/net/eth1

● sys-devices-pci0000:00-0000:00:1f.2-ata1-host0-target0:0:0-0:0:0:0-block-sr0.device - QEMU_DVD-ROM config-2
     Loaded: loaded
     Active: active (plugged) since Sat 2025-12-13 06:42:10 UTC; 54min ago
      Until: Sat 2025-12-13 06:42:10 UTC; 54min ago
     Device: /sys/devices/pci0000:00/0000:00:1f.2/ata1/host0/target0:0:0/0:0:0:0/block/sr0

● sys-devices-platform-serial8250-tty-ttyS1.device - /sys/devices/platform/serial8250/tty/ttyS1
     Loaded: loaded
     Active: active (plugged) since Sat 2025-12-13 06:42:10 UTC; 54min ago
      Until: Sat 2025-12-13 06:42:10 UTC; 54min ago
     Device: /sys/devices/platform/serial8250/tty/ttyS1

● sys-devices-platform-serial8250-tty-ttyS2.device - /sys/devices/platform/serial8250/tty/ttyS2
     Loaded: loaded
     Active: active (plugged) since Sat 2025-12-13 06:42:10 UTC; 54min ago
      Until: Sat 2025-12-13 06:42:10 UTC; 54min ago
     Device: /sys/devices/platform/serial8250/tty/ttyS2

● sys-devices-platform-serial8250-tty-ttyS3.device - /sys/devices/platform/serial8250/tty/ttyS3
     Loaded: loaded
     Active: active (plugged) since Sat 2025-12-13 06:42:10 UTC; 54min ago
      Until: Sat 2025-12-13 06:42:10 UTC; 54min ago
     Device: /sys/devices/platform/serial8250/tty/ttyS3

● sys-devices-pnp0-00:00-tty-ttyS0.device - /sys/devices/pnp0/00:00/tty/ttyS0
     Loaded: loaded
     Active: active (plugged) since Sat 2025-12-13 06:42:10 UTC; 54min ago
      Until: Sat 2025-12-13 06:42:10 UTC; 54min ago
     Device: /sys/devices/pnp0/00:00/tty/ttyS0

● sys-devices-virtual-block-dm\x2d0.device - /sys/devices/virtual/block/dm-0
     Loaded: loaded
     Active: active (plugged) since Sat 2025-12-13 07:12:37 UTC; 24min ago
      Until: Sat 2025-12-13 07:12:37 UTC; 24min ago
     Device: /sys/devices/virtual/block/dm-0

● sys-devices-virtual-block-dm\x2d1.device - /sys/devices/virtual/block/dm-1
     Loaded: loaded
     Active: active (plugged) since Sat 2025-12-13 07:12:41 UTC; 24min ago
      Until: Sat 2025-12-13 07:12:41 UTC; 24min ago
     Device: /sys/devices/virtual/block/dm-1

● sys-devices-virtual-block-dm\x2d2.device - /sys/devices/virtual/block/dm-2
     Loaded: loaded
     Active: active (plugged) since Sat 2025-12-13 07:12:44 UTC; 24min ago
      Until: Sat 2025-12-13 07:12:44 UTC; 24min ago
     Device: /sys/devices/virtual/block/dm-2

● sys-devices-virtual-block-loop3.device - /sys/devices/virtual/block/loop3
     Loaded: loaded
     Active: active (plugged) since Sat 2025-12-13 07:12:37 UTC; 24min ago
      Until: Sat 2025-12-13 07:12:37 UTC; 24min ago
     Device: /sys/devices/virtual/block/loop3

● sys-devices-virtual-block-loop4.device - /sys/devices/virtual/block/loop4
     Loaded: loaded
     Active: active (plugged) since Sat 2025-12-13 07:12:40 UTC; 24min ago
      Until: Sat 2025-12-13 07:12:40 UTC; 24min ago
     Device: /sys/devices/virtual/block/loop4

● sys-devices-virtual-block-loop5.device - /sys/devices/virtual/block/loop5
     Loaded: loaded
     Active: active (plugged) since Sat 2025-12-13 07:12:44 UTC; 24min ago
      Until: Sat 2025-12-13 07:12:44 UTC; 24min ago
     Device: /sys/devices/virtual/block/loop5

● sys-devices-virtual-misc-rfkill.device - /sys/devices/virtual/misc/rfkill
     Loaded: loaded
     Active: active (plugged) since Sat 2025-12-13 06:42:11 UTC; 54min ago
      Until: Sat 2025-12-13 06:42:11 UTC; 54min ago
     Device: /sys/devices/virtual/misc/rfkill

● sys-devices-virtual-net-br\x2dex.device - /sys/devices/virtual/net/br-ex
     Loaded: loaded
     Active: active (plugged) since Sat 2025-12-13 07:10:17 UTC; 26min ago
      Until: Sat 2025-12-13 07:10:17 UTC; 26min ago
     Device: /sys/devices/virtual/net/br-ex

● sys-devices-virtual-net-br\x2dint.device - /sys/devices/virtual/net/br-int
     Loaded: loaded
     Active: active (plugged) since Sat 2025-12-13 07:22:49 UTC; 14min ago
      Until: Sat 2025-12-13 07:22:49 UTC; 14min ago
     Device: /sys/devices/virtual/net/br-int

● sys-devices-virtual-net-genev_sys_6081.device - /sys/devices/virtual/net/genev_sys_6081
     Loaded: loaded
     Active: active (plugged) since Sat 2025-12-13 07:22:49 UTC; 14min ago
      Until: Sat 2025-12-13 07:22:49 UTC; 14min ago
     Device: /sys/devices/virtual/net/genev_sys_6081

● sys-devices-virtual-net-ovs\x2dsystem.device - /sys/devices/virtual/net/ovs-system
     Loaded: loaded
     Active: active (plugged) since Sat 2025-12-13 07:10:17 UTC; 26min ago
      Until: Sat 2025-12-13 07:10:17 UTC; 26min ago
     Device: /sys/devices/virtual/net/ovs-system

● sys-devices-virtual-net-vlan20.device - /sys/devices/virtual/net/vlan20
     Loaded: loaded
     Active: active (plugged) since Sat 2025-12-13 07:10:17 UTC; 26min ago
      Until: Sat 2025-12-13 07:10:17 UTC; 26min ago
     Device: /sys/devices/virtual/net/vlan20

● sys-devices-virtual-net-vlan21.device - /sys/devices/virtual/net/vlan21
     Loaded: loaded
     Active: active (plugged) since Sat 2025-12-13 07:10:17 UTC; 26min ago
      Until: Sat 2025-12-13 07:10:17 UTC; 26min ago
     Device: /sys/devices/virtual/net/vlan21

● sys-devices-virtual-net-vlan22.device - /sys/devices/virtual/net/vlan22
     Loaded: loaded
     Active: active (plugged) since Sat 2025-12-13 07:10:17 UTC; 26min ago
      Until: Sat 2025-12-13 07:10:17 UTC; 26min ago
     Device: /sys/devices/virtual/net/vlan22

● sys-devices-virtual-net-vlan23.device - /sys/devices/virtual/net/vlan23
     Loaded: loaded
     Active: active (plugged) since Sat 2025-12-13 07:10:17 UTC; 26min ago
      Until: Sat 2025-12-13 07:10:17 UTC; 26min ago
     Device: /sys/devices/virtual/net/vlan23

● sys-module-configfs.device - /sys/module/configfs
     Loaded: loaded
     Active: active (plugged) since Sat 2025-12-13 06:42:10 UTC; 54min ago
      Until: Sat 2025-12-13 06:42:10 UTC; 54min ago
     Device: /sys/module/configfs

● sys-module-fuse.device - /sys/module/fuse
     Loaded: loaded
     Active: active (plugged) since Sat 2025-12-13 06:42:10 UTC; 54min ago
      Until: Sat 2025-12-13 06:42:10 UTC; 54min ago
     Device: /sys/module/fuse

● sys-subsystem-net-devices-br\x2dex.device - /sys/subsystem/net/devices/br-ex
     Loaded: loaded
     Active: active (plugged) since Sat 2025-12-13 07:10:17 UTC; 26min ago
      Until: Sat 2025-12-13 07:10:17 UTC; 26min ago
     Device: /sys/devices/virtual/net/br-ex

● sys-subsystem-net-devices-br\x2dint.device - /sys/subsystem/net/devices/br-int
     Loaded: loaded
     Active: acUnit boot.mount could not be found.
tive (plugged) since Sat 2025-12-13 07:22:49 UTC; 14min ago
      Until: Sat 2025-12-13 07:22:49 UTC; 14min ago
     Device: /sys/devices/virtual/net/br-int

● sys-subsystem-net-devices-eth0.device - Virtio 1.0 network device (QEMU)
     Loaded: loaded
     Active: active (plugged) since Sat 2025-12-13 06:42:10 UTC; 54min ago
      Until: Sat 2025-12-13 06:42:10 UTC; 54min ago
     Device: /sys/devices/pci0000:00/0000:00:02.1/0000:03:00.0/virtio1/net/eth0

● sys-subsystem-net-devices-eth1.device - Virtio 1.0 network device (QEMU)
     Loaded: loaded
     Active: active (plugged) since Sat 2025-12-13 06:44:03 UTC; 52min ago
      Until: Sat 2025-12-13 06:44:03 UTC; 52min ago
     Device: /sys/devices/pci0000:00/0000:00:02.5/0000:07:00.0/virtio5/net/eth1

● sys-subsystem-net-devices-genev_sys_6081.device - /sys/subsystem/net/devices/genev_sys_6081
     Loaded: loaded
     Active: active (plugged) since Sat 2025-12-13 07:22:49 UTC; 14min ago
      Until: Sat 2025-12-13 07:22:49 UTC; 14min ago
     Device: /sys/devices/virtual/net/genev_sys_6081

● sys-subsystem-net-devices-ovs\x2dsystem.device - /sys/subsystem/net/devices/ovs-system
     Loaded: loaded
     Active: active (plugged) since Sat 2025-12-13 07:10:17 UTC; 26min ago
      Until: Sat 2025-12-13 07:10:17 UTC; 26min ago
     Device: /sys/devices/virtual/net/ovs-system

● sys-subsystem-net-devices-vlan20.device - /sys/subsystem/net/devices/vlan20
     Loaded: loaded
     Active: active (plugged) since Sat 2025-12-13 07:10:17 UTC; 26min ago
      Until: Sat 2025-12-13 07:10:17 UTC; 26min ago
     Device: /sys/devices/virtual/net/vlan20

● sys-subsystem-net-devices-vlan21.device - /sys/subsystem/net/devices/vlan21
     Loaded: loaded
     Active: active (plugged) since Sat 2025-12-13 07:10:17 UTC; 26min ago
      Until: Sat 2025-12-13 07:10:17 UTC; 26min ago
     Device: /sys/devices/virtual/net/vlan21

● sys-subsystem-net-devices-vlan22.device - /sys/subsystem/net/devices/vlan22
     Loaded: loaded
     Active: active (plugged) since Sat 2025-12-13 07:10:17 UTC; 26min ago
      Until: Sat 2025-12-13 07:10:17 UTC; 26min ago
     Device: /sys/devices/virtual/net/vlan22

● sys-subsystem-net-devices-vlan23.device - /sys/subsystem/net/devices/vlan23
     Loaded: loaded
     Active: active (plugged) since Sat 2025-12-13 07:10:17 UTC; 26min ago
      Until: Sat 2025-12-13 07:10:17 UTC; 26min ago
     Device: /sys/devices/virtual/net/vlan23

● -.mount - Root Mount
     Loaded: loaded (/etc/fstab; generated)
     Active: active (mounted)
      Where: /
       What: /dev/vda1
       Docs: man:fstab(5)
             man:systemd-fstab-generator(8)

● dev-hugepages.mount - Huge Pages File System
     Loaded: loaded (/usr/lib/systemd/system/dev-hugepages.mount; static)
     Active: active (mounted) since Sat 2025-12-13 06:42:10 UTC; 54min ago
      Until: Sat 2025-12-13 06:42:10 UTC; 54min ago
      Where: /dev/hugepages
       What: hugetlbfs
       Docs: https://docs.kernel.org/admin-guide/mm/hugetlbpage.html
             https://www.freedesktop.org/wiki/Software/systemd/APIFileSystems
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48568)
     Memory: 52.0K (peak: 564.0K)
        CPU: 5ms
     CGroup: /dev-hugepages.mount

Dec 13 06:42:10 localhost systemd[1]: Mounted Huge Pages File System.

● dev-hugepages1G.mount - /dev/hugepages1G
     Loaded: loaded (/etc/fstab; generated)
     Active: active (mounted) since Sat 2025-12-13 07:11:52 UTC; 24min ago
      Until: Sat 2025-12-13 07:11:52 UTC; 24min ago
      Where: /dev/hugepages1G
       What: none
       Docs: man:fstab(5)
             man:systemd-fstab-generator(8)

● dev-hugepages2M.mount - /dev/hugepages2M
     Loaded: loaded (/etc/fstab; generated)
     Active: active (mounted) since Sat 2025-12-13 07:11:53 UTC; 24min ago
      Until: Sat 2025-12-13 07:11:53 UTC; 24min ago
      Where: /dev/hugepages2M
       What: none
       Docs: man:fstab(5)
             man:systemd-fstab-generator(8)

● dev-mqueue.mount - POSIX Message Queue File System
     Loaded: loaded (/usr/lib/systemd/system/dev-mUnit home.mount could not be found.
queue.mount; static)
     Active: active (mounted) since Sat 2025-12-13 06:42:10 UTC; 54min ago
      Until: Sat 2025-12-13 06:42:10 UTC; 54min ago
      Where: /dev/mqueue
       What: mqueue
       Docs: man:mq_overview(7)
             https://www.freedesktop.org/wiki/Software/systemd/APIFileSystems
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48568)
     Memory: 4.0K (peak: 552.0K)
        CPU: 2ms
     CGroup: /dev-mqueue.mount

Dec 13 06:42:10 localhost systemd[1]: Mounted POSIX Message Queue File System.

○ proc-fs-nfsd.mount - NFSD configuration filesystem
     Loaded: loaded (/usr/lib/systemd/system/proc-fs-nfsd.mount; static)
     Active: inactive (dead)
      Where: /proc/fs/nfsd
       What: nfsd

● proc-sys-fs-binfmt_misc.mount - Arbitrary Executable File Formats File System
     Loaded: loaded (/usr/lib/systemd/system/proc-sys-fs-binfmt_misc.mount; disabled; preset: disabled)
     Active: active (mounted) since Sat 2025-12-13 07:13:49 UTC; 23min ago
      Until: Sat 2025-12-13 07:13:49 UTC; 23min ago
TriggeredBy: ● proc-sys-fs-binfmt_misc.automount
      Where: /proc/sys/fs/binfmt_misc
       What: binfmt_misc
       Docs: https://docs.kernel.org/admin-guide/binfmt-misc.html
             https://www.freedesktop.org/wiki/Software/systemd/APIFileSystems
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48568)
     Memory: 8.0K (peak: 548.0K)
        CPU: 5ms
     CGroup: /proc-sys-fs-binfmt_misc.mount

Dec 13 07:13:49 compute-0 systemd[1]: Mounting Arbitrary Executable File Formats File System...
Dec 13 07:13:49 compute-0 systemd[1]: Mounted Arbitrary Executable File Formats File System.

● run-credentials-systemd\x2dsysctl.service.mount - /run/credentials/systemd-sysctl.service
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Sat 2025-12-13 07:09:19 UTC; 27min ago
      Until: Sat 2025-12-13 07:09:19 UTC; 27min ago
      Where: /run/credentials/systemd-sysctl.service
       What: none

● run-credentials-systemd\x2dsysusers.service.mount - /run/credentials/systemd-sysusers.service
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Sat 2025-12-13 06:42:10 UTC; 54min ago
      Until: Sat 2025-12-13 06:42:10 UTC; 54min ago
      Where: /run/credentials/systemd-sysusers.service
       What: none

● run-credentials-systemd\x2dtmpfiles\x2dsetup.service.mount - /run/credentials/systemd-tmpfiles-setup.service
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Sat 2025-12-13 06:42:11 UTC; 54min ago
      Until: Sat 2025-12-13 06:42:11 UTC; 54min ago
      Where: /run/credentials/systemd-tmpfiles-setup.service
       What: none

● run-credentials-systemd\x2dtmpfiles\x2dsetup\x2ddev.service.mount - /run/credentials/systemd-tmpfiles-setup-dev.service
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Sat 2025-12-13 06:42:10 UTC; 54min ago
      Until: Sat 2025-12-13 06:42:10 UTC; 54min ago
      Where: /run/credentials/systemd-tmpfiles-setup-dev.service
       What: none

● run-netns.mount - /run/netns
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Sat 2025-12-13 07:11:30 UTC; 25min ago
      Until: Sat 2025-12-13 07:11:30 UTC; 25min ago
      Where: /run/netns
       What: tmpfs

● run-user-1000.mount - /run/user/1000
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Sat 2025-12-13 06:42:43 UTC; 54min ago
      Until: Sat 2025-12-13 06:42:43 UTC; 54min ago
      Where: /run/user/1000
       What: tmpfs

● run-user-42477.mount - /run/user/42477
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Sat 2025-12-13 07:13:43 UTC; 23min ago
      Until: Sat 2025-12-13 07:13:43 UTC; 23min ago
      Where: /run/user/42477
       What: tmpfs

● sys-fs-fuse-connections.mount - FUSE Control File System
     Loaded: loaded (/usr/lib/systemd/system/sys-fs-fuse-connections.mount; static)
     Active: active (mounted) since Sat 2025-12-13 06:42:10 UTC; 54min ago
      Until: Sat 2025-12-13 06:42:10 UTUnit sysroot.mount could not be found.
C; 54min ago
      Where: /sys/fs/fuse/connections
       What: fusectl
       Docs: https://docs.kernel.org/filesystems/fuse.html
             https://www.freedesktop.org/wiki/Software/systemd/APIFileSystems
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48568)
     Memory: 4.0K (peak: 548.0K)
        CPU: 2ms
     CGroup: /sys-fs-fuse-connections.mount

Dec 13 06:42:10 localhost systemd[1]: Mounting FUSE Control File System...
Dec 13 06:42:10 localhost systemd[1]: Mounted FUSE Control File System.

● sys-kernel-config.mount - Kernel Configuration File System
     Loaded: loaded (/proc/self/mountinfo; static)
     Active: active (mounted) since Sat 2025-12-13 06:42:10 UTC; 54min ago
      Until: Sat 2025-12-13 06:42:10 UTC; 54min ago
      Where: /sys/kernel/config
       What: configfs
       Docs: https://docs.kernel.org/filesystems/configfs.html
             https://www.freedesktop.org/wiki/Software/systemd/APIFileSystems

● sys-kernel-debug-tracing.mount - /sys/kernel/debug/tracing
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Sat 2025-12-13 07:36:18 UTC; 32s ago
      Until: Sat 2025-12-13 07:36:18 UTC; 32s ago
      Where: /sys/kernel/debug/tracing
       What: tracefs

● sys-kernel-debug.mount - Kernel Debug File System
     Loaded: loaded (/usr/lib/systemd/system/sys-kernel-debug.mount; static)
     Active: active (mounted) since Sat 2025-12-13 06:42:10 UTC; 54min ago
      Until: Sat 2025-12-13 06:42:10 UTC; 54min ago
      Where: /sys/kernel/debug
       What: debugfs
       Docs: https://docs.kernel.org/filesystems/debugfs.html
             https://www.freedesktop.org/wiki/Software/systemd/APIFileSystems
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48568)
     Memory: 4.0K (peak: 472.0K)
        CPU: 2ms
     CGroup: /sys-kernel-debug.mount

Dec 13 06:42:10 localhost systemd[1]: Mounted Kernel Debug File System.

● sys-kernel-tracing.mount - Kernel Trace File System
     Loaded: loaded (/usr/lib/systemd/system/sys-kernel-tracing.mount; static)
     Active: active (mounted) since Sat 2025-12-13 06:42:10 UTC; 54min ago
      Until: Sat 2025-12-13 06:42:10 UTC; 54min ago
      Where: /sys/kernel/tracing
       What: tracefs
       Docs: https://docs.kernel.org/trace/ftrace.html
             https://www.freedesktop.org/wiki/Software/systemd/APIFileSystems
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48568)
     Memory: 4.0K (peak: 552.0K)
        CPU: 3ms
     CGroup: /sys-kernel-tracing.mount

Dec 13 06:42:10 localhost systemd[1]: Mounted Kernel Trace File System.

○ tmp.mount - Temporary Directory /tmp
     Loaded: loaded (/usr/lib/systemd/system/tmp.mount; disabled; preset: disabled)
     Active: inactive (dead)
      Where: /tmp
       What: tmpfs
       Docs: https://systemd.io/TEMPORARY_DIRECTORIES
             man:file-hierarchy(7)
             https://www.freedesktop.org/wiki/Software/systemd/APIFileSystems

● var-lib-containers-storage-overlay-1de23cd3ce62c8812bfbedb19351e447a14c16ae4f654415c23d9eadaf14158e-merged.mount - /var/lib/containers/storage/overlay/1de23cd3ce62c8812bfbedb19351e447a14c16ae4f654415c23d9eadaf14158e/merged
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Sat 2025-12-13 07:13:22 UTC; 23min ago
      Until: Sat 2025-12-13 07:13:22 UTC; 23min ago
      Where: /var/lib/containers/storage/overlay/1de23cd3ce62c8812bfbedb19351e447a14c16ae4f654415c23d9eadaf14158e/merged
       What: overlay

● var-lib-containers-storage-overlay-346fc788cab02aea4507e4bda75119dcdb6967076b2f735cd53af7434813aca9-merged.mount - /var/lib/containers/storage/overlay/346fc788cab02aea4507e4bda75119dcdb6967076b2f735cd53af7434813aca9/merged
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Sat 2025-12-13 07:13:23 UTC; 23min ago
      Until: Sat 2025-12-13 07:13:23 UTC; 23min ago
      Where: /var/lib/containers/storage/overlay/346fc788cab02aea4507e4bda75119dcdb6967076b2f735cd53af7434813aca9/merged
       What: overlay

● var-lib-containers-storage-overlay-3897b70dc7cab4fb8705dd286c4e00c11f6f6071eb2e509679c4e4e93e2d82ee-merged.mount - /var/lib/containers/storage/overlay/3897b70dc7cab4fb8705dd286c4e00c11f6f6071eb2e509679c4e4e93e2d82ee/merged
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Sat 2025-12-13 07:13:55 UTC; 22min ago
      Until: Sat 2025-12-13 07:13:55 UTC; 22min ago
      Where: /var/lib/containers/storage/overlay/3897b70dc7cab4fb8705dd286c4e00c11f6f6071eb2e509679c4e4e93e2d82ee/merged
       What: overlay

● var-lib-containers-storage-overlay-49e51303f4b9f368d744b6dadce3bbf2364b12d9f150d990d2abdc488ca47952-merged.mount - /var/lib/containers/storage/overlay/49e51303f4b9f368d744b6dadce3bbf2364b12d9f150d990d2abdc488ca47952/merged
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Sat 2025-12-13 07:14:15 UTC; 22min ago
      Until: Sat 2025-12-13 07:14:15 UTC; 22min ago
      Where: /var/lib/containers/storage/overlay/49e51303f4b9f368d744b6dadce3bbf2364b12d9f150d990d2abdc488ca47952/merged
       What: overlay

● var-lib-containers-storage-overlay-6b99b624df972157fdfc5e1964b2f648ce2236d9f885434f04fdbb4225095713-merged.mount - /var/lib/containers/storage/overlay/6b99b624df972157fdfc5e1964b2f648ce2236d9f885434f04fdbb4225095713/merged
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Sat 2025-12-13 07:29:17 UTC; 7min ago
      Until: Sat 2025-12-13 07:29:17 UTC; 7min ago
      Where: /var/lib/containers/storage/overlay/6b99b624df972157fdfc5e1964b2f648ce2236d9f885434f04fdbb4225095713/merged
       What: overlay

● var-lib-containers-storage-overlay-7c900ea433a65f9109f55cb6fe57286edb5b6f4dee643c19d1929e16cfeec254-merged.mount - /var/lib/containers/storage/overlay/7c900ea433a65f9109f55cb6fe57286edb5b6f4dee643c19d1929e16cfeec254/merged
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Sat 2025-12-13 07:28:04 UTC; 8min ago
      Until: Sat 2025-12-13 07:28:04 UTC; 8min ago
      Where: /var/lib/containers/storage/overlay/7c900ea433a65f9109f55cb6fe57286edb5b6f4dee643c19d1929e16cfeec254/merged
       What: overlay

● var-lib-containers-storage-overlay-97e60b40e88c9d4273841e05a580f706923cb5a4635c1fb0bec6354585657969-merged.mount - /var/lib/containers/storage/overlay/97e60b40e88c9d4273841e05a580f706923cb5a4635c1fb0bec6354585657969/merged
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Sat 2025-12-13 07:22:49 UTC; 14min ago
      Until: Sat 2025-12-13 07:22:49 UTC; 14min ago
      Where: /var/lib/containers/storage/overlay/97e60b40e88c9d4273841e05a580f706923cb5a4635c1fb0bec6354585657969/merged
       What: overlay

● var-lib-containers-storage-overlay-a89559cb37910374ddd1f527bb0a82bdc91c2d7a0c74c265319fb98ee5101af6-merged.mount - /var/lib/containers/storage/overlay/a89559cb37910374ddd1f527bb0a82bdc91c2d7a0c74c265319fb98ee5101af6/merged
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Sat 2025-12-13 07:14:12 UTC; 22min ago
      Until: Sat 2025-12-13 07:14:12 UTC; 22min ago
      Where: /var/lib/containers/storage/overlay/a89559cb37910374ddd1f527bb0a82bdc91c2d7a0c74c265319fb98ee5101af6/merged
       What: overlay

● var-lib-containers-storage-overlay-c5ac2e3cc0f49fbd08a64ac89f3699fdf738171896df38043320a4a42d495566-merged.mount - /var/lib/containers/storage/overlay/c5ac2e3cc0f49fbd08a64ac89f3699fdf738171896df38043320a4a42d495566/merged
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Sat 2025-12-13 07:23:39 UTC; 13min ago
      Until: Sat 2025-12-13 07:23:39 UTC; 13min ago
      Where: /var/lib/containers/storage/overlay/c5ac2e3cc0f49fbd08a64ac89f3699fdf738171896df38043320a4a42d495566/merged
       What: overlay

● var-lib-containers-storage-overlay-cf02c05f4f9aa98d6346eecda97a1eaa8bcc35c64dca7a62c6f94dc4c5161700-merged.mount - /var/lib/containers/storage/overlay/cf02c05f4f9aa98d6346eecda97a1eaa8bcc35c64dca7a62c6f94dc4c5161700/merged
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Sat 2025-12-13 07:14:58 UTC; 21min ago
      Until: Sat 2025-12-13 07:14:58 UTC; 21min ago
      Where: /var/lib/containers/storage/overlay/cf02c05f4f9aa98d6346eecda97a1eaa8bcc35c64dca7a62c6f94dc4c5161700/merged
       What: overlay

● var-lib-containers-storage-overlay-e333fa43a805bfdea08e5fddc8346c8cf7d0194486896d26b03368e3d204987f-merged.mount - /var/lib/containers/storage/overlay/e333fa43a805bfdea08e5fddc8346c8cf7d0194486896d26b03368e3d204987f/merged
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Sat 2025-12-13 07:14:56 UTC; 21min ago
      Until: Sat 2025-12-13 07:14:56 UTC; 21min ago
      Where: /var/lib/containers/storage/overlay/e333fa43a805bfdea08e5fddc8346c8cf7d0194486896d26b03368e3d204987f/merged
       What: overlay

● var-lib-containers-storage-overlay-f187ab4b9e239a28c546dd35fd1006ef1c99f0252f30548ee49ab2fe96259030-merged.mount - /var/lib/containers/storage/overlay/f187ab4b9e239a28c546dd35fd1006ef1c99f0252f30548ee49ab2fe96259030/merged
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Sat 2025-12-13 07:14:18 UTC; 22min ago
      Until: Sat 2025-12-13 07:14:18 UTC; 22min ago
      Where: /var/lib/containers/storage/overlay/f187ab4b9e239a28c546dd35fd1006ef1c99f0252f30548ee49ab2fe96259030/merged
       What: overlay

● var-lib-containers-storage-overlay.mount - /var/lib/containers/storage/overlay
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Sat 2025-12-13 07:13:21 UTC; 23min ago
      Until: Sat 2025-12-13 07:13:21 UTC; 23min ago
      Where: /var/lib/containers/storage/overlay
       What: /dev/vda1

● var-lib-containers-storage-overlay\x2dcontainers-1929b765dac802841eb5d5f56597ea7bfd15768bcf514c3ef50eb60bf1b13d07-userdata-shm.mount - /var/lib/containers/storage/overlay-containers/1929b765dac802841eb5d5f56597ea7bfd15768bcf514c3ef50eb60bf1b13d07/userdata/shm
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Sat 2025-12-13 07:23:39 UTC; 13min ago
      Until: Sat 2025-12-13 07:23:39 UTC; 13min ago
      Where: /var/lib/containers/storage/overlay-containers/1929b765dac802841eb5d5f56597ea7bfd15768bcf514c3ef50eb60bf1b13d07/userdata/shm
       What: shm

● var-lib-containers-storage-overlay\x2dcontainers-85647c0fe69de86f6774be6672640c6d932467db58f079f97876cd7dae0a1306-userdata-shm.mount - /var/lib/containers/storage/overlay-containers/85647c0fe69de86f6774be6672640c6d932467db58f079f97876cd7dae0a1306/userdata/shm
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Sat 2025-12-13 07:29:17 UTC; 7min ago
      Until: Sat 2025-12-13 07:29:17 UTC; 7min ago
      Where: /var/lib/containers/storage/overlay-containers/85647c0fe69de86f6774be6672640c6d932467db58f079f97876cd7dae0a1306/userdata/shm
       What: shm

● var-lib-containers-storage-overlay\x2dcontainers-d4b07d1867f144077f7c5c5cc5ae5c3e4d24058947898d6fee77240ec3efb8ed-userdata-shm.mount - /var/lib/containers/storage/overlay-containers/d4b07d1867f144077f7c5c5cc5ae5c3e4d24058947898d6fee77240ec3efb8ed/userdata/shm
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Sat 2025-12-13 07:22:49 UTC; 14min ago
      Until: Sat 2025-12-13 07:22:49 UTC; 14min ago
      Where: /var/lib/containers/storage/overlay-containers/d4b07d1867f144077f7c5c5cc5ae5c3e4d24058947898d6fee77240ec3efb8ed/userdata/shm
       What: shm

● var-lib-containers-storage-overlay\x2dcontainers-f696b337a701eeb12548640e55e827503e894b0e602e4d8080c3212ba22210e6-userdata-shm.mount - /var/lib/containers/storage/overlay-containers/f696b337a701eeb12548640e55e827503e894b0e602e4d8080c3212ba22210e6/userdata/shm
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Sat 2025-12-13 07:28:04 UTC; 8min ago
      Until: Sat 2025-12-13 07:28:04 UTC; 8min ago
      Where: /var/lib/containers/storage/overlay-containers/f696b337a701eeb12548640e55e827503e894b0e602e4d8080c3212ba22210e6/userdata/shm
       What: shm

○ var-lib-machines.mount - Virtual Machine and Container Storage (Compatibility)
     Loaded: loaded (/usr/lib/systemd/system/var-lib-machines.mount; static)
     Active: inactive (dead)
  Condition: start condition failed at Sat 2025-12-13 07:26:42 UTC; 10min ago
      Where: /var/lib/machines
       What: /var/lib/machines.raw

Dec 13 07:26:42 compute-0 systemd[1]: Virtual Machine and Container Storage (Compatibility) was skipped because of an unmet condition check (ConditionPathExists=/var/lib/machines.raw).

● var-lib-nfs-rpc_pipefs.mount - RPC Pipe File System
     Loaded: loaded (/proc/self/mountinfo; static)
     Active: active (mounted) since Sat 2025-12-13 06:42:10 UTC; 54min ago
      Until: Sat 2025-12-13 06:42:10 UTC; 54min ago
      Where: /var/lib/nfs/rpc_pipefs
       What: rpc_pipefs

● systemd-ask-password-console.path - Dispatch Password Requests to Console Directory Watch
     Loaded: loaded (/usr/lib/systemd/system/systemd-ask-password-console.path; static)
     Active: active (waiting) since Sat 2025-12-13 06:42:10 UTC; 54min ago
      Until: Sat 2025-12-13 06:42:10 UTC; 54min ago
   Triggers: ● systemd-ask-password-console.service
       Docs: man:systemd-ask-password-console.path(8)

● systemd-ask-password-wall.path - Forward Password Requests to Wall Directory Watch
     Loaded: loaded (/usr/lib/systemd/system/systemd-ask-password-wall.path; static)
     Active: active (waiting) since Sat 2025-12-13 06:42:10 UTC; 54min ago
      Until: Sat 2025-12-13 06:42:10 UTC; 54min ago
   Triggers: ● systemd-ask-password-wall.service
       Docs: man:systemd-ask-password-wall.path(8)

● init.scope - System and Service Manager
     Loaded: loaded
  Transient: yes
     Active: active (running) since Sat 2025-12-13 06:42:07 UTC; 54min ago
       Docs: man:systemd(1)
         IO: 2.9M read, 0B written
      Tasks: 1 (limit: 48568)
     Memory: 50.5M (peak: 67.8M)
        CPU: 44.461s
     CGroup: /init.scope
             └─1 /usr/lib/systemd/systemd --switched-root --system --deserialize 31

Dec 13 07:36:36 compute-0 systemd[1]: Started libpod-conmon-7ef045a8093e37ddfe81fb914959b324beccf6e9409d5e86e4bc2d506d3e7a83.scope.
Dec 13 07:36:36 compute-0 systemd[1]: Started libcrun container.
Dec 13 07:36:36 compute-0 systemd[1]: libpod-7ef045a8093e37ddfe81fb914959b324beccf6e9409d5e86e4bc2d506d3e7a83.scope: Deactivated successfully.
Dec 13 07:36:36 compute-0 systemd[1]: var-lib-containers-storage-overlay-bc7b8449204da5322c2256cbdb7e34c3cd08a36bac73ce34ddd7064160553db4-merged.mount: Deactivated successfully.
Dec 13 07:36:36 compute-0 systemd[1]: libpod-conmon-7ef045a8093e37ddfe81fb914959b324beccf6e9409d5e86e4bc2d506d3e7a83.scope: Deactivated successfully.
Dec 13 07:36:37 compute-0 systemd[1]: Started libpod-conmon-404541e0807a459ae6ec7f22f774933b836db8dd393ed1f5af5f1eac4d23db94.scope.
Dec 13 07:36:37 compute-0 systemd[1]: Started libcrun container.
Dec 13 07:36:37 compute-0 systemd[1]: libpod-404541e0807a459ae6ec7f22f774933b836db8dd393ed1f5af5f1eac4d23db94.scope: Deactivated successfully.
Dec 13 07:36:37 compute-0 systemd[1]: var-lib-containers-storage-overlay-3bf735cd2afa77a110740d8bea3c1686099be5dc68bd2760ad55048f93416a7e-merged.mount: Deactivated successfully.
Dec 13 07:36:37 compute-0 systemd[1]: libpod-conmon-404541e0807a459ae6ec7f22f774933b836db8dd393ed1f5af5f1eac4d23db94.scope: Deactivated successfully.

● libpod-1929b765dac802841eb5d5f56597ea7bfd15768bcf514c3ef50eb60bf1b13d07.scope - libcrun container
     Loaded: loaded (/run/systemd/transient/libpod-1929b765dac802841eb5d5f56597ea7bfd15768bcf514c3ef50eb60bf1b13d07.scope; transient)
  Transient: yes
    Drop-In: /run/systemd/transient/libpod-1929b765dac802841eb5d5f56597ea7bfd15768bcf514c3ef50eb60bf1b13d07.scope.d
             └─dep.conf
     Active: active (running) since Sat 2025-12-13 07:23:39 UTC; 13min ago
         IO: 5.1M read, 220.0K written
      Tasks: 5 (limit: 4096)
     Memory: 226.8M (peak: 228.8M)
        CPU: 3.591s
     CGroup: /machine.slice/libpod-1929b765dac802841eb5d5f56597ea7bfd15768bcf514c3ef50eb60bf1b13d07.scope
             └─container
               ├─154118 dumb-init --single-child -- kolla_start
               ├─154121 "neutron-ovn-metadata-agent (/usr/bin/python3 /usr/bin/neutron-ovn-metadata-agent)"
               ├─154224 "neutron-ovn-metadata-agent (/usr/bin/python3 /usr/bin/neutron-ovn-metadata-agent)"
               └─154229 /usr/bin/python3 /bin/privsep-helper --config-file /etc/neutron/neutron.conf --config-dir /etc/neutron.conf.d --privsep_context neutron.privileged.namespace_cmd --privsep_sock_path /tmp/tmpj55srbhp/privsep.sock

Dec 13 07:23:39 compute-0 systemd[1]: Started libcrun container.

● libpod-85647c0fe69de86f6774be6672640c6d932467db58f079f97876cd7dae0a1306.scope - libcrun container
     Loaded: loaded (/run/systemd/transient/libpod-85647c0fe69de86f6774be6672640c6d932467db58f079f97876cd7dae0a1306.scope; transient)
  Transient: yes
     Active: active (running) since Sat 2025-12-13 07:29:17 UTC; 7min ago
         IO: 0B read, 36.0K written
      Tasks: 23 (limit: 4096)
     Memory: 134.5M (peak: 165.1M)
        CPU: 10.403s
     CGroup: /machine.slice/libpod-85647c0fe69de86f6774be6672640c6d932467db58f079f97876cd7dae0a1306.scope
             └─container
               ├─241224 dumb-init --single-child -- kolla_start
               └─241226 /usr/bin/python3 /usr/bin/nova-compute

Dec 13 07:29:17 compute-0 systemd[1]: Started libcrun container.

● libpod-d4b07d1867f144077f7c5c5cc5ae5c3e4d24058947898d6fee77240ec3efb8ed.scope - libcrun container
     Loaded: loaded (/run/systemd/transient/libpod-d4b07d1867f144077f7c5c5cc5ae5c3e4d24058947898d6fee77240ec3efb8ed.scope; transient)
  Transient: yes
    Drop-In: /run/systemd/transient/libpod-d4b07d1867f144077f7c5c5cc5ae5c3e4d24058947898d6fee77240ec3efb8ed.scope.d
             └─dep.conf
     Active: active (running) since Sat 2025-12-13 07:22:49 UTC; 14min ago
         IO: 4.6M read, 4.0K written
      Tasks: 6 (limit: 4096)
     Memory: 11.7M (peak: 14.6M)
        CPU: 827ms
     CGroup: /machine.slice/libpod-d4b07d1867f144077f7c5c5cc5ae5c3e4d24058947898d6fee77240ec3efb8ed.scope
             └─container
               ├─144649 dumb-init --single-child -- kolla_start
               └─144652 /usr/bin/ovn-controller --pidfile unix:/run/openvswitch/db.sock -p /etc/pki/tls/private/ovndb.key -c /etc/pki/tls/certs/ovndb.crt -C /etc/pki/tls/certs/ovndbca.crt

Dec 13 07:22:49 compute-0 systemd[1]: Started libcrun container.

● libpod-f696b337a701eeb12548640e55e827503e894b0e602e4d8080c3212ba22210e6.scope - libcrun container
     Loaded: loaded (/run/systemd/transient/libpod-f696b337a701eeb12548640e55e827503e894b0e602e4d8080c3212ba22210e6.scope; transient)
  Transient: yes
     Active: active (running) since Sat 2025-12-13 07:28:04 UTC; 8min ago
         IO: 0B read, 4.0K written
      Tasks: 8 (limit: 4096)
     Memory: 19.0M (peak: 21.1M)
        CPU: 284ms
     CGroup: /machine.slice/libpod-f696b337a701eeb12548640e55e827503e894b0e602e4d8080c3212ba22210e6.scope
             └─container
               ├─223788 dumb-init --single-child -- kolla_start
               └─223792 /usr/sbin/multipathd -d

Dec 13 07:28:04 compute-0 systemd[1]: Started libcrun container.
Dec 13 07:28:04 compute-0 sudo[223793]:     root : PWD=/ ; USER=root ; COMMAND=/usr/local/bin/kolla_set_configs
Dec 13 07:28:04 compute-0 sudo[223793]: pam_systemd(sudo:session): Failed to connect to system bus: No such file or directory
Dec 13 07:28:04 compute-0 sudo[223793]: pam_unix(sudo:session): session opened for user root(uid=0) by (uid=0)
Dec 13 07:28:04 compute-0 sudo[223793]: pam_unix(sudo:session): session closed for user root
Dec 13 07:28:04 compute-0 sudo[223818]:     root : PWD=/ ; USER=root ; COMMAND=/usr/local/bin/kolla_copy_cacerts
Dec 13 07:28:04 compute-0 sudo[223818]: pam_systemd(sudo:session): Failed to connect to system bus: No such file or directory
Dec 13 07:28:04 compute-0 sudo[223818]: pam_unix(sudo:session): session opened for user root(uid=0) by (uid=0)
Dec 13 07:28:04 compute-0 sudo[223818]: pam_unix(sudo:session): session closed for user root

● session-1.scope - Session 1 of User zuul
     Loaded: loaded (/run/systemd/transient/session-1.scope; transient)
  Transient: yes
     Active: active (abandoned) since Sat 2025-12-13 06:42:43 UTC; 54min ago
         IO: 0B read, 0B written
      Tasks: 1
     Memory: 17.1M (peak: 38.4M)
        CPU: 1min 8.370s
     CGroup: /user.slice/user-1000.slice/session-1.scope
             └─4579 /usr/bin/python3

Dec 13 06:44:54 np0005558317 sudo[7408]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Dec 13 06:44:54 np0005558317 python3[7410]: ansible-ansible.legacy.stat Invoked with path=/etc/ci/env/networking-info.yml follow=False get_checksum=True checksum_algorithm=sha1 get_md5=False get_mime=True get_attributes=True
Dec 13 06:44:54 np0005558317 sudo[7408]: pam_unix(sudo:session): session closed for user root
Dec 13 06:44:54 np0005558317 sudo[7481]:     zuul : PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-iabpmjemgedlddopmoanszlarqqzdbtf ; OS_CLOUD=ibm-bm3-nodepool /usr/bin/python3'
Dec 13 06:44:54 np0005558317 sudo[7481]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Dec 13 06:44:54 np0005558317 python3[7483]: ansible-ansible.legacy.copy Invoked with dest=/etc/ci/env/networking-info.yml owner=root group=root mode=0644 src=/home/zuul/.ansible/tmp/ansible-tmp-1765608294.0741224-273-27766293396627/source _original_basename=tmpb2ss1gkr follow=False checksum=480db894146ef2cc1376d935191c022003cc0988 backup=False force=True unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None remote_src=None local_follow=None seuser=None serole=None selevel=None setype=None attributes=None
Dec 13 06:44:54 np0005558317 sudo[7481]: pam_unix(sudo:session): session closed for user root
Dec 13 06:45:54 np0005558317 sshd-session[4382]: Received disconnect from 192.168.25.12 port 58500:11: disconnected by user
Dec 13 06:45:54 np0005558317 sshd-session[4382]: Disconnected from user zuul 192.168.25.12 port 58500
Dec 13 06:45:54 np0005558317 sshd-session[4369]: pam_unix(sshd:session): session closed for user zuul

● session-20.scope - Session 20 of User ceph-admin
     Loaded: loaded (/run/systemd/transient/session-20.scope; transient)
  Transient: yes
     Active: active (running) since Sat 2025-12-13 07:13:43 UTC; 23min ago
         IO: 0B read, 0B written
      Tasks: 2
     Memory: 1.0M (peak: 1.3M)
        CPU: 24ms
     CGroup: /user.slice/user-42477.slice/session-20.scope
             ├─76206 "sshd-session: ceph-admin [priv]"
             └─76226 "sshd-session: ceph-admin"

Dec 13 07:13:43 compute-0 systemd[1]: Started Session 20 of User ceph-admin.

● session-22.scope - Session 22 of User ceph-admin
     Loaded: loaded (/run/systemd/transient/session-22.scope; transient)
  Transient: yes
     Active: active (running) since Sat 2025-12-13 07:13:43 UTC; 23min ago
         IO: 0B read, 0B written
      Tasks: 2
     Memory: 1.1M (peak: 3.9M)
        CPU: 73ms
     CGroup: /user.slice/user-42477.slice/session-22.scope
             ├─76223 "sshd-session: ceph-admin [priv]"
             └─76228 "sshd-session: ceph-admin@notty"

Dec 13 07:13:43 compute-0 systemd[1]: Started Session 22 of User ceph-admin.
Dec 13 07:13:43 compute-0 sudo[76229]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Dec 13 07:13:43 compute-0 sudo[76229]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Dec 13 07:13:43 compute-0 sudo[76229]: pam_unix(sudo:session): session closed for user root

● session-23.scope - Session 23 of User ceph-admin
     Loaded: loaded (/run/systemd/transient/session-23.scope; transient)
  Transient: yes
     Active: active (running) since Sat 2025-12-13 07:13:43 UTC; 23min ago
         IO: 0B read, 0B written
      Tasks: 2
     Memory: 1.1M (peak: 5.8M)
        CPU: 72ms
     CGroup: /user.slice/user-42477.slice/session-23.scope
             ├─76254 "sshd-session: ceph-admin [priv]"
             └─76257 "sshd-session: ceph-admin@notty"

Dec 13 07:13:43 compute-0 systemd[1]: Started Session 23 of User ceph-admin.
Dec 13 07:13:43 compute-0 sudo[76258]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/00fdae1b-7fad-5f1b-8734-ba4d9298a6de/cephadm.ed5a13ad26f7f55dd30e9b63855e4e581fd86973bec1d21a12ed0bb26af19c8b --timeout 895 check-host --expect-hostname compute-0
Dec 13 07:13:43 compute-0 sudo[76258]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Dec 13 07:13:43 compute-0 sudo[76258]: pam_unix(sudo:session): session closed for user root

● session-24.scope - Session 24 of User ceph-admin
     Loaded: loaded (/run/systemd/transient/session-24.scope; transient)
  Transient: yes
     Active: active (running) since Sat 2025-12-13 07:13:43 UTC; 23min ago
         IO: 0B read, 0B written
      Tasks: 2
     Memory: 1.1M (peak: 4.5M)
        CPU: 64ms
     CGroup: /user.slice/user-42477.slice/session-24.scope
             ├─76283 "sshd-session: ceph-admin [priv]"
             └─76286 "sshd-session: ceph-admin@notty"

Dec 13 07:13:43 compute-0 systemd[1]: Started Session 24 of User ceph-admin.
Dec 13 07:13:43 compute-0 sudo[76287]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/ls /var/lib/ceph/00fdae1b-7fad-5f1b-8734-ba4d9298a6de/cephadm.ed5a13ad26f7f55dd30e9b63855e4e581fd86973bec1d21a12ed0bb26af19c8b
Dec 13 07:13:43 compute-0 sudo[76287]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Dec 13 07:13:43 compute-0 sudo[76287]: pam_unix(sudo:session): session closed for user root

● session-25.scope - Session 25 of User ceph-admin
     Loaded: loaded (/run/systemd/transient/session-25.scope; transient)
  Transient: yes
     Active: active (running) since Sat 2025-12-13 07:13:44 UTC; 23min ago
         IO: 0B read, 0B written
      Tasks: 2
     Memory: 1.1M (peak: 4.1M)
        CPU: 63ms
     CGroup: /user.slice/user-42477.slice/session-25.scope
             ├─76312 "sshd-session: ceph-admin [priv]"
             └─76315 "sshd-session: ceph-admin@notty"

Dec 13 07:13:44 compute-0 systemd[1]: Started Session 25 of User ceph-admin.
Dec 13 07:13:44 compute-0 sudo[76316]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/mkdir -p /var/lib/ceph/00fdae1b-7fad-5f1b-8734-ba4d9298a6de
Dec 13 07:13:44 compute-0 sudo[76316]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Dec 13 07:13:44 compute-0 sudo[76316]: pam_unix(sudo:session): session closed for user root

● session-26.scope - Session 26 of User ceph-admin
     Loaded: loaded (/run/systemd/transient/session-26.scope; transient)
  Transient: yes
     Active: active (running) since Sat 2025-12-13 07:13:44 UTC; 23min ago
         IO: 0B read, 0B written
      Tasks: 2
     Memory: 1.1M (peak: 4.2M)
        CPU: 72ms
     CGroup: /user.slice/user-42477.slice/session-26.scope
             ├─76341 "sshd-session: ceph-admin [priv]"
             └─76344 "sshd-session: ceph-admin@notty"

Dec 13 07:13:44 compute-0 systemd[1]: Started Session 26 of User ceph-admin.
Dec 13 07:13:44 compute-0 sudo[76345]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/mkdir -p /tmp/cephadm-00fdae1b-7fad-5f1b-8734-ba4d9298a6de/var/lib/ceph/00fdae1b-7fad-5f1b-8734-ba4d9298a6de
Dec 13 07:13:44 compute-0 sudo[76345]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Dec 13 07:13:44 compute-0 sudo[76345]: pam_unix(sudo:session): session closed for user root

● session-27.scope - Session 27 of User ceph-admin
     Loaded: loaded (/run/systemd/transient/session-27.scope; transient)
  Transient: yes
     Active: active (running) since Sat 2025-12-13 07:13:44 UTC; 23min ago
         IO: 0B read, 0B written
      Tasks: 2
     Memory: 1.1M (peak: 4.2M)
        CPU: 63ms
     CGroup: /user.slice/user-42477.slice/session-27.scope
             ├─76370 "sshd-session: ceph-admin [priv]"
             └─76373 "sshd-session: ceph-admin@notty"

Dec 13 07:13:44 compute-0 systemd[1]: Started Session 27 of User ceph-admin.
Dec 13 07:13:44 compute-0 sudo[76374]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/touch /tmp/cephadm-00fdae1b-7fad-5f1b-8734-ba4d9298a6de/var/lib/ceph/00fdae1b-7fad-5f1b-8734-ba4d9298a6de/cephadm.ed5a13ad26f7f55dd30e9b63855e4e581fd86973bec1d21a12ed0bb26af19c8b.new
Dec 13 07:13:44 compute-0 sudo[76374]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Dec 13 07:13:44 compute-0 sudo[76374]: pam_unix(sudo:session): session closed for user root

● session-28.scope - Session 28 of User ceph-admin
     Loaded: loaded (/run/systemd/transient/session-28.scope; transient)
  Transient: yes
     Active: active (running) since Sat 2025-12-13 07:13:44 UTC; 23min ago
         IO: 0B read, 0B written
      Tasks: 2
     Memory: 1.1M (peak: 4.0M)
        CPU: 62ms
     CGroup: /user.slice/user-42477.slice/session-28.scope
             ├─76399 "sshd-session: ceph-admin [priv]"
             └─76402 "sshd-session: ceph-admin@notty"

Dec 13 07:13:44 compute-0 systemd[1]: Started Session 28 of User ceph-admin.
Dec 13 07:13:44 compute-0 sudo[76403]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/chown -R ceph-admin /tmp/cephadm-00fdae1b-7fad-5f1b-8734-ba4d9298a6de
Dec 13 07:13:44 compute-0 sudo[76403]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Dec 13 07:13:44 compute-0 sudo[76403]: pam_unix(sudo:session): session closed for user root

● session-29.scope - Session 29 of User ceph-admin
     Loaded: loaded (/run/systemd/transient/session-29.scope; transient)
  Transient: yes
     Active: active (running) since Sat 2025-12-13 07:13:45 UTC; 23min ago
         IO: 0B read, 0B written
      Tasks: 2
     Memory: 1.1M (peak: 4.1M)
        CPU: 63ms
     CGroup: /user.slice/user-42477.slice/session-29.scope
             ├─76428 "sshd-session: ceph-admin [priv]"
             └─76431 "sshd-session: ceph-admin@notty"

Dec 13 07:13:45 compute-0 systemd[1]: Started Session 29 of User ceph-admin.
Dec 13 07:13:45 compute-0 sudo[76432]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/chmod 644 /tmp/cephadm-00fdae1b-7fad-5f1b-8734-ba4d9298a6de/var/lib/ceph/00fdae1b-7fad-5f1b-8734-ba4d9298a6de/cephadm.ed5a13ad26f7f55dd30e9b63855e4e581fd86973bec1d21a12ed0bb26af19c8b.new
Dec 13 07:13:45 compute-0 sudo[76432]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Dec 13 07:13:45 compute-0 sudo[76432]: pam_unix(sudo:session): session closed for user root

● session-30.scope - Session 30 of User ceph-admin
     Loaded: loaded (/run/systemd/transient/session-30.scope; transient)
  Transient: yes
     Active: active (running) since Sat 2025-12-13 07:13:45 UTC; 23min ago
         IO: 0B read, 1016.0K written
      Tasks: 2
     Memory: 2.2M (peak: 4.0M)
        CPU: 73ms
     CGroup: /user.slice/user-42477.slice/session-30.scope
             ├─76457 "sshd-session: ceph-admin [priv]"
             └─76460 "sshd-session: ceph-admin@notty"

Dec 13 07:13:45 compute-0 systemd[1]: Started Session 30 of User ceph-admin.

● session-31.scope - Session 31 of User ceph-admin
     Loaded: loaded (/run/systemd/transient/session-31.scope; transient)
  Transient: yes
     Active: active (running) since Sat 2025-12-13 07:13:46 UTC; 23min ago
         IO: 0B read, 0B written
      Tasks: 2
     Memory: 1.1M (peak: 6.0M)
        CPU: 74ms
     CGroup: /user.slice/user-42477.slice/session-31.scope
             ├─76484 "sshd-session: ceph-admin [priv]"
             └─76487 "sshd-session: ceph-admin@notty"

Dec 13 07:13:46 compute-0 systemd[1]: Started Session 31 of User ceph-admin.
Dec 13 07:13:46 compute-0 sudo[76488]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/mv -Z /tmp/cephadm-00fdae1b-7fad-5f1b-8734-ba4d9298a6de/var/lib/ceph/00fdae1b-7fad-5f1b-8734-ba4d9298a6de/cephadm.ed5a13ad26f7f55dd30e9b63855e4e581fd86973bec1d21a12ed0bb26af19c8b.new /var/lib/ceph/00fdae1b-7fad-5f1b-8734-ba4d9298a6de/cephadm.ed5a13ad26f7f55dd30e9b63855e4e581fd86973bec1d21a12ed0bb26af19c8b
Dec 13 07:13:46 compute-0 sudo[76488]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Dec 13 07:13:46 compute-0 sudo[76488]: pam_unix(sudo:session): session closed for user root

● session-32.scope - Session 32 of User ceph-admin
     Loaded: loaded (/run/systemd/transient/session-32.scope; transient)
  Transient: yes
     Active: active (running) since Sat 2025-12-13 07:13:46 UTC; 23min ago
         IO: 2.7M read, 93.4M written
      Tasks: 2
     Memory: 7.2M (peak: 59.4M)
        CPU: 1min 17.659s
     CGroup: /user.slice/user-42477.slice/session-32.scope
             ├─76513 "sshd-session: ceph-admin [priv]"
             └─76516 "sshd-session: ceph-admin@notty"

Dec 13 07:36:37 compute-0 podman[249934]: 2025-12-13 07:36:37.1878562 +0000 UTC m=+0.112081716 container init 404541e0807a459ae6ec7f22f774933b836db8dd393ed1f5af5f1eac4d23db94 (image=quay.io/ceph/ceph@sha256:1228c3d05e45fbc068a8c33614e4409b6dac688bcc77369b06009b5830fa8d86, name=blissful_aryabhata, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, org.label-schema.license=GPLv2, CEPH_REF=tentacle, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.41.3, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20251030, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=69f84cc2651aa259a15bc192ddaabd3baba07489)
Dec 13 07:36:37 compute-0 podman[249934]: 2025-12-13 07:36:37.09598201 +0000 UTC m=+0.020207506 image pull 524f3da276461682bec27427fb8a63b5139c40ad4185939aede197474a6817b3 quay.io/ceph/ceph@sha256:1228c3d05e45fbc068a8c33614e4409b6dac688bcc77369b06009b5830fa8d86
Dec 13 07:36:37 compute-0 podman[249934]: 2025-12-13 07:36:37.200129848 +0000 UTC m=+0.124355344 container start 404541e0807a459ae6ec7f22f774933b836db8dd393ed1f5af5f1eac4d23db94 (image=quay.io/ceph/ceph@sha256:1228c3d05e45fbc068a8c33614e4409b6dac688bcc77369b06009b5830fa8d86, name=blissful_aryabhata, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, CEPH_SHA1=69f84cc2651aa259a15bc192ddaabd3baba07489, CEPH_REF=tentacle, org.label-schema.schema-version=1.0, io.buildah.version=1.41.3, org.label-schema.build-date=20251030, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Dec 13 07:36:37 compute-0 podman[249934]: 2025-12-13 07:36:37.201229926 +0000 UTC m=+0.125455422 container attach 404541e0807a459ae6ec7f22f774933b836db8dd393ed1f5af5f1eac4d23db94 (image=quay.io/ceph/ceph@sha256:1228c3d05e45fbc068a8c33614e4409b6dac688bcc77369b06009b5830fa8d86, name=blissful_aryabhata, ceph=True, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=69f84cc2651aa259a15bc192ddaabd3baba07489, org.label-schema.schema-version=1.0, io.buildah.version=1.41.3, org.label-schema.build-date=20251030, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=tentacle)
Dec 13 07:36:37 compute-0 podman[249934]: 2025-12-13 07:36:37.806201108 +0000 UTC m=+0.730426605 container died 404541e0807a459ae6ec7f22f774933b836db8dd393ed1f5af5f1eac4d23db94 (image=quay.io/ceph/ceph@sha256:1228c3d05e45fbc068a8c33614e4409b6dac688bcc77369b06009b5830fa8d86, name=blissful_aryabhata, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.41.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, org.label-schema.build-date=20251030, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=69f84cc2651aa259a15bc192ddaabd3baba07489, OSD_FLAVOR=default, org.label-schema.license=GPLv2, CEPH_REF=tentacle, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Dec 13 07:36:37 compute-0 podman[249934]: 2025-12-13 07:36:37.857963317 +0000 UTC m=+0.782188813 container remove 404541e0807a459ae6ec7f22f774933b836db8dd393ed1f5af5f1eac4d23db94 (image=quay.io/ceph/ceph@sha256:1228c3d05e45fbc068a8c33614e4409b6dac688bcc77369b06009b5830fa8d86, name=blissful_aryabhata, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, org.label-schema.license=GPLv2, CEPH_REF=tentacle, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.41.3, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=69f84cc2651aa259a15bc192ddaabd3baba07489, org.label-schema.build-date=20251030)
Dec 13 07:36:37 compute-0 sudo[249775]: pam_unix(sudo:session): session closed for user root
Dec 13 07:36:38 compute-0 sudo[250109]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/ls /etc/sysctl.d
Dec 13 07:36:38 compute-0 sudo[250109]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Dec 13 07:36:38 compute-0 sudo[250109]: pam_unix(sudo:session): session closed for user root

● session-54.scope - Session 54 of User zuul
     Loaded: loaded (/run/systemd/transient/session-54.scope; transient)
  Transient: yes
     Active: active (running) since Sat 2025-12-13 07:36:09 UTC; 41s ago
         IO: 305.4M read, 221.0M written
      Tasks: 31
     Memory: 706.3M (peak: 763.9M)
        CPU: 1min 39.449s
     CGroup: /user.slice/user-1000.slice/session-54.scope
             ├─246077 "sshd-session: zuul [priv]"
             ├─246080 "sshd-session: zuul@notty"
             ├─246081 sudo bash -c "rm -rf /var/tmp/sos-osp && mkdir /var/tmp/sos-osp && sos report --batch --all-logs --tmp-dir=/var/tmp/sos-osp  -p container,openstack_edpm,system,storage,virt"
             ├─246105 /usr/bin/python3 -s /sbin/sos report --batch --all-logs --tmp-dir=/var/tmp/sos-osp -p container,openstack_edpm,system,storage,virt
             ├─252668 timeout 15s turbostat --debug sleep 10
             ├─253143 timeout 300s systemctl status --all
             ├─253144 systemctl status --all
             ├─253147 timeout 300s ceph osd perf --format json-pretty
             ├─253148 /usr/bin/python3 -s /usr/bin/ceph osd perf --format json-pretty
             ├─253180 timeout 300s tpm2_readclock
             └─253181 tpm2_readclock

Dec 13 07:36:09 compute-0 systemd[1]: Started Session 54 of User zuul.
Dec 13 07:36:09 compute-0 sudo[246081]:     zuul : PWD=/home/zuul ; USER=root ; COMMAND=/bin/bash -c 'rm -rf /var/tmp/sos-osp && mkdir /var/tmp/sos-osp && sos report --batch --all-logs --tmp-dir=/var/tmp/sos-osp  -p container,openstack_edpm,system,storage,virt'
Dec 13 07:36:09 compute-0 sudo[246081]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Dec 13 07:36:18 compute-0 ovs-vsctl[246408]: ovs|00001|db_ctl_base|ERR|no key "dpdk-init" in Open_vSwitch record "." column other_config
Dec 13 07:36:43 compute-0 ovs-appctl[251847]: ovs|00001|daemon_unix|WARN|/var/run/openvswitch/ovs-monitor-ipsec.pid: open: No such file or directory
Dec 13 07:36:43 compute-0 ovs-appctl[251853]: ovs|00001|daemon_unix|WARN|/var/run/openvswitch/ovs-monitor-ipsec.pid: open: No such file Unit apparmor.service could not be found.
Unit apt-daily.service could not be found.
or directory
Dec 13 07:36:43 compute-0 ovs-appctl[251862]: ovs|00001|daemon_unix|WARN|/var/run/openvswitch/ovs-monitor-ipsec.pid: open: No such file or directory

○ 1929b765dac802841eb5d5f56597ea7bfd15768bcf514c3ef50eb60bf1b13d07-35b4a96ef7c0aecc.service - /usr/bin/podman healthcheck run 1929b765dac802841eb5d5f56597ea7bfd15768bcf514c3ef50eb60bf1b13d07
     Loaded: loaded (/run/systemd/transient/1929b765dac802841eb5d5f56597ea7bfd15768bcf514c3ef50eb60bf1b13d07-35b4a96ef7c0aecc.service; transient)
  Transient: yes
     Active: inactive (dead) since Sat 2025-12-13 07:36:26 UTC; 24s ago
   Duration: 118ms
TriggeredBy: ● 1929b765dac802841eb5d5f56597ea7bfd15768bcf514c3ef50eb60bf1b13d07-35b4a96ef7c0aecc.timer
    Process: 247867 ExecStart=/usr/bin/podman healthcheck run 1929b765dac802841eb5d5f56597ea7bfd15768bcf514c3ef50eb60bf1b13d07 (code=exited, status=0/SUCCESS)
   Main PID: 247867 (code=exited, status=0/SUCCESS)
        CPU: 59ms

Dec 13 07:36:26 compute-0 podman[247867]: 2025-12-13 07:36:26.760459982 +0000 UTC m=+0.100722168 container health_status 1929b765dac802841eb5d5f56597ea7bfd15768bcf514c3ef50eb60bf1b13d07 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn@sha256:85729a662800e6b42ceb088545fed39a2ac58704b4a37fd540cdef3ebf9e59a2, name=ovn_metadata_agent, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.schema-version=1.0, config_data={'cgroupns': 'host', 'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'EDPM_CONFIG_HASH': '0823bd3e096c75f72e4a95820d41b0d4b6a1172bd2892ddb9f29b788a11bc87d'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_metadata_agent', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn@sha256:85729a662800e6b42ceb088545fed39a2ac58704b4a37fd540cdef3ebf9e59a2', 'net': 'host', 'pid': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/run/openvswitch:/run/openvswitch:z', '/var/lib/config-data/ansible-generated/neutron-ovn-metadata-agent:/etc/neutron.conf.d:z', '/run/netns:/run/netns:shared', '/var/lib/kolla/config_files/ovn_metadata_agent.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/neutron:/var/lib/neutron:shared,z', '/var/lib/neutron/ovn_metadata_haproxy_wrapper:/usr/local/bin/haproxy:ro', '/var/lib/neutron/kill_scripts:/etc/neutron/kill_scripts:ro', '/var/lib/openstack/cacerts/neutron-metadata/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_metadata_agent:/openstack:ro,z']}, io.buildah.version=1.41.3, tcib_build_tag=c3923531bcda0b0811b2d5053f189beb, tcib_managed=true, config_id=ovn_metadata_agent, container_name=ovn_metadata_agent, maintainer=OpenStack Kubernetes Operator team, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20251202, org.label-schema.vendor=CentOS, managed_by=edpm_ansible, org.label-schema.license=GPLv2)

● auditd.service - Security Auditing Service
     Loaded: loaded (/usr/lib/systemd/system/auditd.service; enabled; preset: enabled)
     Active: active (running) since Sat 2025-12-13 06:42:10 UTC; 54min ago
       Docs: man:auditd(8)
             https://github.com/linux-audit/audit-documentation
   Main PID: 673 (auditd)
         IO: 0B read, 21.4M written
      Tasks: 4 (limit: 48568)
     Memory: 15.0M (peak: 15.4M)
        CPU: 2.861s
     CGroup: /system.slice/auditd.service
             ├─673 /sbin/auditd
             └─675 /usr/sbin/sedispatch

Dec 13 06:42:10 localhost augenrules[693]: failure 1
Dec 13 06:42:10 localhost augenrules[693]: pid 673
Dec 13 06:42:10 localhost augenrules[693]: rate_limit 0
Dec 13 06:42:10 localhost augenrules[693]: backlog_limit 8192
Dec 13 0Unit auto-cpufreq.service could not be found.
Unit autofs.service could not be found.
6:42:10 localhost augenrules[693]: lost 0
Dec 13 06:42:10 localhost augenrules[693]: backlog 0
Dec 13 06:42:10 localhost augenrules[693]: backlog_wait_time 60000
Dec 13 06:42:10 localhost augenrules[693]: backlog_wait_time_actual 0
Dec 13 06:42:10 localhost systemd[1]: Started Security Auditing Service.
Dec 13 07:26:35 compute-0 auditd[673]: Audit daemon rotating log files

○ auth-rpcgss-module.service - Kernel Module supporting RPCSEC_GSS
     Loaded: loaded (/usr/lib/systemd/system/auth-rpcgss-module.service; static)
     Active: inactive (dead)
  Condition: start condition failed at Sat 2025-12-13 06:42:10 UTC; 54min ago

● ceph-00fdae1b-7fad-5f1b-8734-ba4d9298a6de@crash.compute-0.service - Ceph crash.compute-0 for 00fdae1b-7fad-5f1b-8734-ba4d9298a6de
     Loaded: loaded (/etc/systemd/system/ceph-00fdae1b-7fad-5f1b-8734-ba4d9298a6de@.service; enabled; preset: disabled)
     Active: active (running) since Sat 2025-12-13 07:13:55 UTC; 22min ago
   Main PID: 79701 (conmon)
         IO: 0B read, 170.5K written
      Tasks: 3 (limit: 48568)
     Memory: 7.7M (peak: 23.6M)
        CPU: 349ms
     CGroup: /system.slice/system-ceph\x2d00fdae1b\x2d7fad\x2d5f1b\x2d8734\x2dba4d9298a6de.slice/ceph-00fdae1b-7fad-5f1b-8734-ba4d9298a6de@crash.compute-0.service
             ├─libpod-payload-8e6a4f61ea03b0deb3b22f2359f4e4ace46ba5e323139a1f6359205360a9b0cc
             │ ├─79703 /run/podman-init -- /usr/bin/ceph-crash -n client.crash.compute-0
             │ └─79705 /usr/bin/python3 -s /usr/bin/ceph-crash -n client.crash.compute-0
             └─runtime
               └─79701 /usr/bin/conmon --api-version 1 -c 8e6a4f61ea03b0deb3b22f2359f4e4ace46ba5e323139a1f6359205360a9b0cc -u 8e6a4f61ea03b0deb3b22f2359f4e4ace46ba5e323139a1f6359205360a9b0cc -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/8e6a4f61ea03b0deb3b22f2359f4e4ace46ba5e323139a1f6359205360a9b0cc/userdata -p /run/containers/storage/overlay-containers/8e6a4f61ea03b0deb3b22f2359f4e4ace46ba5e323139a1f6359205360a9b0cc/userdata/pidfile -n ceph-00fdae1b-7fad-5f1b-8734-ba4d9298a6de-crash-compute-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/8e6a4f61ea03b0deb3b22f2359f4e4ace46ba5e323139a1f6359205360a9b0cc --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/8e6a4f61ea03b0deb3b22f2359f4e4ace46ba5e323139a1f6359205360a9b0cc/userdata/oci-log --conmon-pidfile /run/ceph-00fdae1b-7fad-5f1b-8734-ba4d9298a6de@crash.compute-0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 8e6a4f61ea03b0deb3b22f2359f4e4ace46ba5e323139a1f6359205360a9b0cc

Dec 13 07:13:55 compute-0 systemd[1]: Started Ceph crash.compute-0 for 00fdae1b-7fad-5f1b-8734-ba4d9298a6de.
Dec 13 07:13:55 compute-0 ceph-00fdae1b-7fad-5f1b-8734-ba4d9298a6de-crash-compute-0[79701]: INFO:ceph-crash:pinging cluster to exercise our key
Dec 13 07:13:55 compute-0 ceph-00fdae1b-7fad-5f1b-8734-ba4d9298a6de-crash-compute-0[79701]: 2025-12-13T07:13:55.581+0000 7f1275645640 -1 auth: unable to find a keyring on /etc/ceph/ceph.client.admin.keyring,/etc/ceph/ceph.keyring,/etc/ceph/keyring,/etc/ceph/keyring.bin: (2) No such file or directory
Dec 13 07:13:55 compute-0 ceph-00fdae1b-7fad-5f1b-8734-ba4d9298a6de-crash-compute-0[79701]: 2025-12-13T07:13:55.581+0000 7f1275645640 -1 AuthRegistry(0x7f1270052d90) no keyring found at /etc/ceph/ceph.client.admin.keyring,/etc/ceph/ceph.keyring,/etc/ceph/keyring,/etc/ceph/keyring.bin, disabling cephx
Dec 13 07:13:55 compute-0 ceph-00fdae1b-7fad-5f1b-8734-ba4d9298a6de-crash-compute-0[79701]: 2025-12-13T07:13:55.585+0000 7f1275645640 -1 auth: unable to find a keyring on /etc/ceph/ceph.client.admin.keyring,/etc/ceph/ceph.keyring,/etc/ceph/keyring,/etc/ceph/keyring.bin: (2) No such file or directory
Dec 13 07:13:55 compute-0 ceph-00fdae1b-7fad-5f1b-8734-ba4d9298a6de-crash-compute-0[79701]: 2025-12-13T07:13:55.585+0000 7f1275645640 -1 AuthRegistry(0x7f1275643fe0) no keyring found at /etc/ceph/ceph.client.admin.keyring,/etc/ceph/ceph.keyring,/etc/ceph/keyring,/etc/ceph/keyring.bin, disabling cephx
Dec 13 07:13:55 compute-0 ceph-00fdae1b-7fad-5f1b-8734-ba4d9298a6de-crash-compute-0[79701]: 2025-12-13T07:13:55.586+0000 7f126effd640 -1 monclient(hunting): handle_auth_bad_method server allowed_methods [2] but i only support [1]
Dec 13 07:13:55 compute-0 ceph-00fdae1b-7fad-5f1b-8734-ba4d9298a6de-crash-compute-0[79701]: 2025-12-13T07:13:55.587+0000 7f1275645640 -1 monclient: authenticate NOTE: no keyring found; disabled cephx authentication
Dec 13 07:13:55 compute-0 ceph-00fdae1b-7fad-5f1b-8734-ba4d9298a6de-crash-compute-0[79701]: [errno 13] RADOS permission denied (error connecting to the cluster)
Dec 13 07:13:55 compute-0 ceph-00fdae1b-7fad-5f1b-8734-ba4d9298a6de-crash-compute-0[79701]: INFO:ceph-crash:monitoring path /var/lib/ceph/crash, delay 600s

● ceph-00fdae1b-7fad-5f1b-8734-ba4d9298a6de@mds.cephfs.compute-0.zwnyoz.service - Ceph mds.cephfs.compute-0.zwnyoz for 00fdae1b-7fad-5f1b-8734-ba4d9298a6de
     Loaded: loaded (/etc/systemd/system/ceph-00fdae1b-7fad-5f1b-8734-ba4d9298a6de@.service; enabled; preset: disabled)
     Active: active (running) since Sat 2025-12-13 07:14:58 UTC; 21min ago
   Main PID: 93860 (conmon)
         IO: 0B read, 187.5K written
      Tasks: 31 (limit: 48568)
     Memory: 23.8M (peak: 24.5M)
        CPU: 1.568s
     CGroup: /system.slice/system-ceph\x2d00fdae1b\x2d7fad\x2d5f1b\x2d8734\x2dba4d9298a6de.slice/ceph-00fdae1b-7fad-5f1b-8734-ba4d9298a6de@mds.cephfs.compute-0.zwnyoz.service
             ├─libpod-payload-c65ab07d188ff0da2857402181db229477778de43e052dbfea38f8c212c47f50
             │ ├─93862 /run/podman-init -- /usr/bin/ceph-mds -n mds.cephfs.compute-0.zwnyoz -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ └─93864 /usr/bin/ceph-mds -n mds.cephfs.compute-0.zwnyoz -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             └─runtime
               └─93860 /usr/bin/conmon --api-version 1 -c c65ab07d188ff0da2857402181db229477778de43e052dbfea38f8c212c47f50 -u c65ab07d188ff0da2857402181db229477778de43e052dbfea38f8c212c47f50 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/c65ab07d188ff0da2857402181db229477778de43e052dbfea38f8c212c47f50/userdata -p /run/containers/storage/overlay-containers/c65ab07d188ff0da2857402181db229477778de43e052dbfea38f8c212c47f50/userdata/pidfile -n ceph-00fdae1b-7fad-5f1b-8734-ba4d9298a6de-mds-cephfs-compute-0-zwnyoz --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/c65ab07d188ff0da2857402181db229477778de43e052dbfea38f8c212c47f50 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/c65ab07d188ff0da2857402181db229477778de43e052dbfea38f8c212c47f50/userdata/oci-log --conmon-pidfile /run/ceph-00fdae1b-7fad-5f1b-8734-ba4d9298a6de@mds.cephfs.compute-0.zwnyoz.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg c65ab07d188ff0da2857402181db229477778de43e052dbfea38f8c212c47f50

Dec 13 07:36:20 compute-0 ceph-mds[93864]: mds.cephfs.compute-0.zwnyoz asok_command: dump loads {prefix=dump loads} (starting...)
Dec 13 07:36:20 compute-0 ceph-mds[93864]: mds.cephfs.compute-0.zwnyoz asok_command: dump tree {prefix=dump tree,root=/} (starting...)
Dec 13 07:36:20 compute-0 ceph-mds[93864]: mds.cephfs.compute-0.zwnyoz asok_command: dump_blocked_ops {prefix=dump_blocked_ops} (starting...)
Dec 13 07:36:20 compute-0 ceph-mds[93864]: mds.cephfs.compute-0.zwnyoz asok_command: dump_historic_ops {prefix=dump_historic_ops} (starting...)
Dec 13 07:36:21 compute-0 ceph-mds[93864]: mds.cephfs.compute-0.zwnyoz asok_command: dump_historic_ops_by_duration {prefix=dump_historic_ops_by_duration} (starting...)
Dec 13 07:36:21 compute-0 ceph-mds[93864]: mds.cephfs.compute-0.zwnyoz asok_command: dump_ops_in_flight {prefix=dump_ops_in_flight} (starting...)
Dec 13 07:36:21 compute-0 ceph-mds[93864]: mds.cephfs.compute-0.zwnyoz asok_command: get subtrees {prefix=get subtrees} (starting...)
Dec 13 07:36:21 compute-0 ceph-mds[93864]: mds.cephfs.compute-0.zwnyoz asok_command: ops {prefix=ops} (starting...)
Dec 13 07:36:22 compute-0 ceph-mds[93864]: mds.cephfs.compute-0.zwnyoz asok_command: session ls {prefix=session ls} (starting...)
Dec 13 07:36:22 compute-0 ceph-mds[93864]: mds.cephfs.compute-0.zwnyoz asok_command: status {prefix=status} (starting...)

● ceph-00fdae1b-7fad-5f1b-8734-ba4d9298a6de@mgr.compute-0.qsherl.service - Ceph mgr.compute-0.qsherl for 00fdae1b-7fad-5f1b-8734-ba4d9298a6de
     Loaded: loaded (/etc/systemd/system/ceph-00fdae1b-7fad-5f1b-8734-ba4d9298a6de@.service; enabled; preset: disabled)
     Active: active (running) since Sat 2025-12-13 07:13:23 UTC; 23min ago
   Main PID: 75196 (conmon)
         IO: 0B read, 2.3M written
      Tasks: 144 (limit: 48568)
     Memory: 520.3M (peak: 521.2M)
        CPU: 26.608s
     CGroup: /system.slice/system-ceph\x2d00fdae1b\x2d7fad\x2d5f1b\x2d8734\x2dba4d9298a6de.slice/ceph-00fdae1b-7fad-5f1b-8734-ba4d9298a6de@mgr.compute-0.qsherl.service
             ├─libpod-payload-4d78867918d5dd4dba36f3a6dc6db4122866221ae6fbf48a37819c5ae84e8283
             │ ├─75198 /run/podman-init -- /usr/bin/ceph-mgr -n mgr.compute-0.qsherl -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ └─75200 /usr/bin/ceph-mgr -n mgr.compute-0.qsherl -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             └─runtime
               └─75196 /usr/bin/conmon --api-version 1 -c 4d78867918d5dd4dba36f3a6dc6db4122866221ae6fbf48a37819c5ae84e8283 -u 4d78867918d5dd4dba36f3a6dc6db4122866221ae6fbf48a37819c5ae84e8283 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/4d78867918d5dd4dba36f3a6dc6db4122866221ae6fbf48a37819c5ae84e8283/userdata -p /run/containers/storage/overlay-containers/4d78867918d5dd4dba36f3a6dc6db4122866221ae6fbf48a37819c5ae84e8283/userdata/pidfile -n ceph-00fdae1b-7fad-5f1b-8734-ba4d9298a6de-mgr-compute-0-qsherl --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/4d78867918d5dd4dba36f3a6dc6db4122866221ae6fbf48a37819c5ae84e8283 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/4d78867918d5dd4dba36f3a6dc6db4122866221ae6fbf48a37819c5ae84e8283/userdata/oci-log --conmon-pidfile /run/ceph-00fdae1b-7fad-5f1b-8734-ba4d9298a6de@mgr.compute-0.qsherl.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 4d78867918d5dd4dba36f3a6dc6db4122866221ae6fbf48a37819c5ae84e8283

Dec 13 07:36:48 compute-0 ceph-mgr[75200]: [pg_autoscaler INFO root] Pool 'default.rgw.log' root_id -1 using 4.1969867161554995e-06 of space, bias 1.0, pg target 0.0012590960148466499 quantized to 32 (current 32)
Dec 13 07:36:48 compute-0 ceph-mgr[75200]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Dec 13 07:36:48 compute-0 ceph-mgr[75200]: [pg_autoscaler INFO root] Pool 'default.rgw.control' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Dec 13 07:36:48 compute-0 ceph-mgr[75200]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Dec 13 07:36:48 compute-0 ceph-mgr[75200]: [pg_autoscaler INFO root] Pool 'default.rgw.meta' root_id -1 using 1.2718141564107572e-07 of space, bias 4.0, pg target 0.00015261769876929088 quantized to 32 (current 32)
Dec 13 07:36:48 compute-0 ceph-mgr[75200]: log_channel(cluster) log [DBG] : pgmap v801: 321 pgs: 321 active+clean; 461 KiB data, 136 MiB used, 60 GiB / 60 GiB avail
Dec 13 07:36:49 compute-0 ceph-mgr[75200]: log_channel(audit) log [DBG] : from='client.14610 -' entity='client.admin' cmd=[{"prefix": "osd blocked-by", "target": ["mon-mgr", ""], "format": "json-pretty"}]: dispatch
Dec 13 07:36:49 compute-0 ceph-mgr[75200]: log_channel(audit) log [DBG] : from='client.14614 -' entity='client.admin' cmd=[{"prefix": "osd df", "output_method": "tree", "target": ["mon-mgr", ""], "format": "json-pretty"}]: dispatch
Dec 13 07:36:50 compute-0 ceph-mgr[75200]: log_channel(audit) log [DBG] : from='client.14616 -' entity='client.admin' cmd=[{"prefix": "osd df", "target": ["mon-mgr", ""], "format": "json-pretty"}]: dispatch
Dec 13 07:36:50 compute-0 ceph-mgr[75200]: log_channel(cluster) log [DBG] : pgmap v802: 321 pgs: 321 active+clean; 461 KiB data, 136 MiB used, 60 GiB / 60 GiB avail

● ceph-00fdae1b-7fad-5f1b-8734-ba4d9298a6de@mon.compute-0.service - Ceph mon.compute-0 for 00fdae1b-7fad-5f1b-8734-ba4d9298a6de
     Loaded: loaded (/etc/systemd/system/ceph-00fdae1b-7fad-5f1b-8734-ba4d9298a6de@.service; enabled; preset: disabled)
     Active: active (running) since Sat 2025-12-13 07:13:22 UTC; 23min ago
   Main PID: 74924 (conmon)
         IO: 2.3M read, 133.9M written
      Tasks: 27 (limit: 48568)
     Memory: 60.4M (peak: 73.6M)
        CPU: 9.536s
     CGroup: /system.slice/system-ceph\x2d00fdae1b\x2d7fad\x2d5f1b\x2d8734\x2dba4d9298a6de.slice/ceph-00fdae1b-7fad-5f1b-8734-ba4d9298a6de@mon.compute-0.service
             ├─libpod-payload-4656a144eefb5f6bf26a1e6dd6df77bfd9faa9dce17b22c42a655c087745995a
             │ ├─74926 /run/podman-init -- /usr/bin/ceph-mon -n mon.compute-0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false
             │ └─74928 /usr/bin/ceph-mon -n mon.compute-0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false
             └─runtime
               └─74924 /usr/bin/conmon --api-version 1 -c 4656a144eefb5f6bf26a1e6dd6df77bfd9faa9dce17b22c42a655c087745995a -u 4656a144eefb5f6bf26a1e6dd6df77bfd9faa9dce17b22c42a655c087745995a -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/4656a144eefb5f6bf26a1e6dd6df77bfd9faa9dce17b22c42a655c087745995a/userdata -p /run/containers/storage/overlay-containers/4656a144eefb5f6bf26a1e6dd6df77bfd9faa9dce17b22c42a655c087745995a/userdata/pidfile -n ceph-00fdae1b-7fad-5f1b-8734-ba4d9298a6de-mon-compute-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/4656a144eefb5f6bf26a1e6dd6df77bfd9faa9dce17b22c42a655c087745995a --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/4656a144eefb5f6bf26a1e6dd6df77bfd9faa9dce17b22c42a655c087745995a/userdata/oci-log --conmon-pidfile /run/ceph-00fdae1b-7fad-5f1b-8734-ba4d9298a6de@mon.compute-0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 4656a144eefb5f6bf26a1e6dd6df77bfd9faa9dce17b22c42a655c087745995a

Dec 13 07:36:49 compute-0 ceph-mon[74928]: from='client.? 192.168.122.100:0/1785970505' entity='client.admin' cmd={"prefix": "osd blocklist ls", "format": "json-pretty"} : dispatch
Dec 13 07:36:49 compute-0 ceph-mon[74928]: from='client.14614 -' entity='client.admin' cmd=[{"prefix": "osd df", "output_method": "tree", "target": ["mon-mgr", ""], "format": "json-pretty"}]: dispatch
Dec 13 07:36:50 compute-0 ceph-mon[74928]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd dump", "format": "json-pretty"} v 0)
Dec 13 07:36:50 compute-0 ceph-mon[74928]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/2657197604' entity='client.admin' cmd={"prefix": "osd dump", "format": "json-pretty"} : dispatch
Dec 13 07:36:50 compute-0 ceph-mon[74928]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd numa-status", "format": "json-pretty"} v 0)
Dec 13 07:36:50 compute-0 ceph-mon[74928]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/1044118940' entity='client.admin' cmd={"prefix": "osd numa-status", "format": "json-pretty"} : dispatch
Dec 13 07:36:50 compute-0 ceph-mon[74928]: from='client.14616 -' entity='client.admin' cmd=[{"prefix": "osd df", "target": ["mon-mgr", ""], "format": "json-pretty"}]: dispatch
Dec 13 07:36:50 compute-0 ceph-mon[74928]: from='client.? 192.168.122.100:0/2657197604' entity='client.admin' cmd={"prefix": "osd dump", "format": "json-pretty"} : dispatch
Dec 13 07:36:50 compute-0 ceph-mon[74928]: pgmap v802: 321 pgs: 321 active+clean; 461 KiB data, 136 MiB used, 60 GiB / 60 GiB avail
Dec 13 07:36:50 compute-0 ceph-mon[74928]: from='client.? 192.168.122.100:0/1044118940' entity='client.admin' cmd={"prefix": "osd numa-status", "format": "json-pretty"} : dispatch

● ceph-00fdae1b-7fad-5f1b-8734-ba4d9298a6de@osd.0.service - Ceph osd.0 for 00fdae1b-7fad-5f1b-8734-ba4d9298a6de
     Loaded: loaded (/etc/systemd/system/ceph-00fdae1b-7fad-5f1b-8734-ba4d9298a6de@.service; enabled; preset: disabled)
     Active: active (running) since Sat 2025-12-13 07:14:12 UTC; 22min ago
   Main PID: 85136 (conmon)
         IO: 6.5M read, 1.2G written
      Tasks: 61 (limit: 48568)
     Memory: 484.8M (peak: 513.4M)
        CPU: 5.794s
     CGroup: /system.slice/system-ceph\x2d00fdae1b\x2d7fad\x2d5f1b\x2d8734\x2dba4d9298a6de.slice/ceph-00fdae1b-7fad-5f1b-8734-ba4d9298a6de@osd.0.service
             ├─libpod-payload-5e169e1385f98bf8a58844e41c31305318f100b9850e1f4defaf308d2b1dfde7
             │ ├─85138 /run/podman-init -- /usr/bin/ceph-osd -n osd.0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --osd-objectstore=bluestore
             │ └─85140 /usr/bin/ceph-osd -n osd.0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --osd-objectstore=bluestore
             └─runtime
               └─85136 /usr/bin/conmon --api-version 1 -c 5e169e1385f98bf8a58844e41c31305318f100b9850e1f4defaf308d2b1dfde7 -u 5e169e1385f98bf8a58844e41c31305318f100b9850e1f4defaf308d2b1dfde7 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/5e169e1385f98bf8a58844e41c31305318f100b9850e1f4defaf308d2b1dfde7/userdata -p /run/containers/storage/overlay-containers/5e169e1385f98bf8a58844e41c31305318f100b9850e1f4defaf308d2b1dfde7/userdata/pidfile -n ceph-00fdae1b-7fad-5f1b-8734-ba4d9298a6de-osd-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/5e169e1385f98bf8a58844e41c31305318f100b9850e1f4defaf308d2b1dfde7 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/5e169e1385f98bf8a58844e41c31305318f100b9850e1f4defaf308d2b1dfde7/userdata/oci-log --conmon-pidfile /run/ceph-00fdae1b-7fad-5f1b-8734-ba4d9298a6de@osd.0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 5e169e1385f98bf8a58844e41c31305318f100b9850e1f4defaf308d2b1dfde7

Dec 13 07:36:33 compute-0 ceph-osd[85140]: monclient: tick
Dec 13 07:36:33 compute-0 ceph-osd[85140]: monclient: _check_auth_tickets
Dec 13 07:36:33 compute-0 ceph-osd[85140]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-12-13T07:36:01.685190+0000)
Dec 13 07:36:33 compute-0 ceph-osd[85140]: prioritycache tune_memory target: 4294967296 mapped: 75710464 unmapped: 1794048 heap: 77504512 old mem: 2845415832 new mem: 2845415832
Dec 13 07:36:33 compute-0 ceph-osd[85140]: monclient: tick
Dec 13 07:36:33 compute-0 ceph-osd[85140]: monclient: _check_auth_tickets
Dec 13 07:36:33 compute-0 ceph-osd[85140]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-12-13T07:36:02.685298+0000)
Dec 13 07:36:33 compute-0 ceph-osd[85140]: prioritycache tune_memory target: 4294967296 mapped: 75833344 unmapped: 1671168 heap: 77504512 old mem: 2845415832 new mem: 2845415832
Dec 13 07:36:33 compute-0 ceph-osd[85140]: osd.0 115 heartbeat osd_stat(store_statfs(0x4fcec9000/0x0/0x4ffc00000, data 0xabb5e/0x163000, compress 0x0/0x0/0x0, omap 0x108ad, meta 0x2bbf753), peers [1,2] op hist [])
Dec 13 07:36:33 compute-0 ceph-osd[85140]: do_command 'log dump' '{prefix=log dump}'

● ceph-00fdae1b-7fad-5f1b-8734-ba4d9298a6de@osd.1.service - Ceph osd.1 for 00fdae1b-7fad-5f1b-8734-ba4d9298a6de
     Loaded: loaded (/etc/systemd/system/ceph-00fdae1b-7fad-5f1b-8734-ba4d9298a6de@.service; enabled; preset: disabled)
     Active: active (running) since Sat 2025-12-13 07:14:15 UTC; 22min ago
   Main PID: 86138 (conmon)
         IO: 6.6M read, 1.2G written
      Tasks: 61 (limit: 48568)
     Memory: 421.4M (peak: 521.4M)
        CPU: 6.278s
     CGroup: /system.slice/system-ceph\x2d00fdae1b\x2d7fad\x2d5f1b\x2d8734\x2dba4d9298a6de.slice/ceph-00fdae1b-7fad-5f1b-8734-ba4d9298a6de@osd.1.service
             ├─libpod-payload-c0e0c03f97b0b2b02555f476cf4558ef6f7c2cd731350718d9262d59a0b7be03
             │ ├─86140 /run/podman-init -- /usr/bin/ceph-osd -n osd.1 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --osd-objectstore=bluestore
             │ └─86142 /usr/bin/ceph-osd -n osd.1 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --osd-objectstore=bluestore
             └─runtime
               └─86138 /usr/bin/conmon --api-version 1 -c c0e0c03f97b0b2b02555f476cf4558ef6f7c2cd731350718d9262d59a0b7be03 -u c0e0c03f97b0b2b02555f476cf4558ef6f7c2cd731350718d9262d59a0b7be03 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/c0e0c03f97b0b2b02555f476cf4558ef6f7c2cd731350718d9262d59a0b7be03/userdata -p /run/containers/storage/overlay-containers/c0e0c03f97b0b2b02555f476cf4558ef6f7c2cd731350718d9262d59a0b7be03/userdata/pidfile -n ceph-00fdae1b-7fad-5f1b-8734-ba4d9298a6de-osd-1 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/c0e0c03f97b0b2b02555f476cf4558ef6f7c2cd731350718d9262d59a0b7be03 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/c0e0c03f97b0b2b02555f476cf4558ef6f7c2cd731350718d9262d59a0b7be03/userdata/oci-log --conmon-pidfile /run/ceph-00fdae1b-7fad-5f1b-8734-ba4d9298a6de@osd.1.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg c0e0c03f97b0b2b02555f476cf4558ef6f7c2cd731350718d9262d59a0b7be03

Dec 13 07:36:29 compute-0 ceph-osd[86142]: monclient: _check_auth_tickets
Dec 13 07:36:29 compute-0 ceph-osd[86142]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-12-13T07:35:57.848664+0000)
Dec 13 07:36:29 compute-0 ceph-osd[86142]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Dec 13 07:36:29 compute-0 ceph-osd[86142]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Dec 13 07:36:29 compute-0 ceph-osd[86142]: prioritycache tune_memory target: 4294967296 mapped: 82083840 unmapped: 1794048 heap: 83877888 old mem: 2845415832 new mem: 2845415832
Dec 13 07:36:29 compute-0 ceph-osd[86142]: bluestore.MempoolThread _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 993721 data_alloc: 218103808 data_used: 7527
Dec 13 07:36:29 compute-0 ceph-osd[86142]: monclient: tick
Dec 13 07:36:29 compute-0 ceph-osd[86142]: monclient: _check_auth_tickets
Dec 13 07:36:29 compute-0 ceph-osd[86142]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-12-13T07:35:58.848760+0000)
Dec 13 07:36:29 compute-0 ceph-osd[86142]: do_command 'log dump' '{prefix=log dump}'

● ceph-00fdae1b-7fad-5f1b-8734-ba4d9298a6de@osd.2.service - Ceph osd.2 for 00fdae1b-7fad-5f1b-8734-ba4d9298a6de
     Loaded: loaded (/etc/systemd/system/ceph-00fdae1b-7fad-5f1b-8734-ba4d9298a6de@.service; enabled; preset: disabled)
     Active: active (running) since Sat 2025-12-13 07:14:18 UTC; 22min ago
   Main PID: 87151 (conmon)
         IO: 6.4M read, 1.2G written
      Tasks: 61 (limit: 48568)
     Memory: 452.0M (peak: 516.3M)
        CPU: 5.611s
     CGroup: /system.slice/system-ceph\x2d00fdae1b\x2d7fad\x2d5f1b\x2d8734\x2dba4d9298a6de.slice/ceph-00fdae1b-7fad-5f1b-8734-ba4d9298a6de@osd.2.service
             ├─libpod-payload-bb7cd2f636f6ef6017e815fa4141bf45b494cfb9652486980f5606492505725a
             │ ├─87153 /run/podman-init -- /usr/bin/ceph-osd -n osd.2 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --osd-objectstore=bluestore
             │ └─87155 /usr/bin/ceph-osd -n osd.2 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --osd-objectstore=bluestore
             └─runtime
               └─87151 /usr/bin/conmon --api-version 1 -c bb7cd2f636f6ef6017e815fa4141bf45b494cfb9652486980f5606492505725a -u bb7cd2f636f6ef6017e815fa4141bf45b494cfb9652486980f5606492505725a -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/bb7cd2f636f6ef6017e815fa4141bf45b494cfb9652486980f5606492505725a/userdata -p /run/containers/storage/overlay-containers/bb7cd2f636f6ef6017e815fa4141bf45b494cfb9652486980f5606492505725a/userdata/pidfile -n ceph-00fdae1b-7fad-5f1b-8734-ba4d9298a6de-osd-2 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/bb7cd2f636f6ef6017e815fa4141bf45b494cfb9652486980f5606492505725a --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/bb7cd2f636f6ef6017e815fa4141bf45b494cfb9652486980f5606492505725a/userdata/oci-log --conmon-pidfile /run/ceph-00fdae1b-7fad-5f1b-8734-ba4d9298a6de@osd.2.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg bb7cd2f636f6ef6017e815fa4141bf45b494cfb9652486980f5606492505725a

Dec 13 07:36:25 compute-0 ceph-osd[87155]: monclient: tick
Dec 13 07:36:25 compute-0 ceph-osd[87155]: monclient: _check_auth_tickets
Dec 13 07:36:25 compute-0 ceph-osd[87155]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-12-13T07:35:53.700678+0000)
Dec 13 07:36:25 compute-0 ceph-osd[87155]: osd.2 115 heartbeat osd_stat(store_statfs(0x4fcec6000/0x0/0x4ffc00000, data 0xac77e/0x166000, compress 0x0/0x0/0x0, omap 0x112df, meta 0x2bbed21), peers [0,1] op hist [])
Dec 13 07:36:25 compute-0 ceph-osd[87155]: prioritycache tune_memory target: 4294967296 mapped: 76619776 unmapped: 2015232 heap: 78635008 old mem: 2845415832 new mem: 2845415832
Dec 13 07:36:25 compute-0 ceph-osd[87155]: monclient: tick
Dec 13 07:36:25 compute-0 ceph-osd[87155]: monclient: _check_auth_tickets
Dec 13 07:36:25 compute-0 ceph-osd[87155]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-12-13T07:35:54.700780+0000)
Dec 13 07:36:25 compute-0 ceph-osd[87155]: osd.2 115 heartbeat osd_stat(store_statfs(0x4fcec6000/0x0/0x4ffc00000, data 0xac77e/0x166000, compress 0x0/0x0/0x0, omap 0x112df, meta 0x2bbed21), peers [0,1] op hist [])
Dec 13 07:36:25 compute-0 ceph-osd[87155]: do_command 'log dump' '{prefix=log dump}'

● ceph-00fdae1b-7fad-5f1b-8734-ba4d9298a6de@rgw.rgw.compute-0.kikquh.service - Ceph rgw.rgw.compute-0.kikquh for 00fdae1b-7fad-5f1b-8734-ba4d9298a6de
     Loaded: loaded (/etc/systemd/system/ceph-00fdae1b-7fad-5f1b-8734-ba4d9298a6de@.service; enabled; preset: disabled)
     Active: active (running) since Sat 2025-12-13 07:14:56 UTC; 21min ago
   Main PID: 93465 (conmon)
         IO: 0B read, 191.0K written
      Tasks: 614 (limit: 48568)
     Memory: 105.5M (peak: 106.0M)
        CPU: 3.828s
     CGroup: /system.slice/system-ceph\x2d00fdae1b\x2d7fad\x2d5f1b\x2d8734\x2dba4d9298a6de.slice/ceph-00fdae1b-7fad-5f1b-8734-ba4d9298a6de@rgw.rgw.compute-0.kikquh.service
             ├─libpod-payload-69ac193e949f4ec1c85ec7f6eeac603563ef58b890ae60370ce1d5f216a4080c
             │ ├─93474 /run/podman-init -- /usr/bin/radosgw -n client.rgw.rgw.compute-0.kikquh -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --stop-timeout=120
             │ └─93487 /usr/bin/radosgw -n client.rgw.rgw.compute-0.kikquh -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --stop-timeout=120
             └─runtime
               └─93465 /usr/bin/conmon --api-version 1 -c 69ac193e949f4ec1c85ec7f6eeac603563ef58b890ae60370ce1d5f216a4080c -u 69ac193e949f4ec1c85ec7f6eeac603563ef58b890ae60370ce1d5f216a4080c -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/69ac193e949f4ec1c85ec7f6eeac603563ef58b890ae60370ce1d5f216a4080c/userdata -p /run/containers/storage/overlay-containers/69ac193e949f4ec1c85ec7f6eeac603563ef58b890ae60370ce1d5f216a4080c/userdata/pidfile -n ceph-00fdae1b-7fad-5f1b-8734-ba4d9298a6de-rgw-rgw-compute-0-kikquh --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/69ac193e949f4ec1c85ec7f6eeac603563ef58b890ae60370ce1d5f216a4080c --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/69ac193e949f4ec1c85ec7f6eeac603563ef58b890ae60370ce1d5f216a4080c/userdata/oci-log --conmon-pidfile /run/ceph-00fdae1b-7fad-5f1b-8734-ba4d9298a6de@rgw.rgw.compute-0.kikquh.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 69ac193e949f4ec1c85ec7f6eeac603563ef58b890ae60370ce1d5f216a4080c

Dec 13 07:14:56 compute-0 radosgw[93487]: framework conf key: endpoint, val: 192.168.122.100:8082
Dec 13 07:14:56 compute-0 radosgw[93487]: init_numa not setting numa affinity
Dec 13 07:15:05 compute-0 radosgw[93487]: v1 topic migration: starting v1 topic migration..
Dec 13 07:15:05 compute-0 radosgw[93487]: v1 topic migration: finished v1 topic migration
Dec 13 07:15:05 compute-0 radosgw[93487]: framework: beast
Dec 13 07:15:05 compute-0 radosgw[93487]: framework conf key: ssl_certificate, val: config://rgw/cert/$realm/$zone.crt
Dec 13 07:15:05 compute-0 radosgw[93487]: framework conf key: ssl_private_key, val: config://rgw/cert/$realm/$zone.key
Dec 13 07:15:05 compute-0 radosgw[93487]: starting handler: beast
Dec 13 07:15:05 compute-0 radosgw[93487]: set uid:gid to 167:167 (ceph:ceph)
Dec 13 07:15:05 compute-0 radosgw[93487]: mgrc service_daemon_register rgw.14256 metadata {arch=x86_64,ceph_release=tentacle,ceph_version=ceph version 20.2.0 (69f84cc2651aa259a15bc192ddaabd3baba07489) tentacle (stable - RelWithDebInfo),ceph_version_short=20.2.0,container_hostname=compute-0,container_image=quay.io/ceph/ceph@sha256:1228c3d05e45fbc068a8c33614e4409b6dac688bcc77369b06009b5830fa8d86,cpu=AMD EPYC 7763 64-Core Processor,distro=centos,distro_description=CentOS Stream 9,distro_version=9,frontend_config#0=beast endpoint=192.168.122.100:8082,frontend_type#0=beast,hostname=compute-0,id=rgw.compute-0.kikquh,kernel_description=#1 SMP PREEMPT_DYNAMIC Fri Dec 5 11:18:23 UTC 2025,kernel_version=5.14.0-648.el9.x86_64,mem_swap_kb=1048572,mem_total_kb=7865356,num_handles=1,os=Linux,pid=2,realm_id=,realm_name=,zone_id=c41c06c0-96f4-44f4-8e75-f5ee0f887dbd,zone_name=default,zonegroup_id=3619564b-3f09-447d-be0a-4c55dcbaaf7a,zonegroup_name=default}

● ceph-osd-losetup-0.service - Ceph OSD losetup
     Loaded: loaded (/etc/systemd/system/ceph-osd-losetup-0.service; enabled; preset: disabled)
     Active: active (exited) since Sat 2025-12-13 07:12:39 UTC; 24min ago
   Main PID: 72413 (code=exited, status=0/SUCCESS)
        CPU: 12ms

Dec 13 07:12:39 compute-0 systemd[1]: Starting Ceph OSD losetup...
Dec 13 07:12:39 compute-0 bash[72414]: /dev/loop3: [64513]:4327953 (/var/lib/ceph-osd-0.img)
Dec 13 07:12:39 compute-0 systemd[1]: Finished Ceph OSD losetup.

● ceph-osd-losetup-1.service - Ceph OSD losetup
     Loaded: loaded (/etc/systemd/system/ceph-osd-losetup-1.service; enabled; preset: disabled)
     Active: active (exited) since Sat 2025-12-13 07:12:42 UTC; 24min ago
   Main PID: 72780 (code=exited, status=0/SUCCESS)
        CPU: 13ms

Dec 13 07:12:42 compute-0 systemd[1]: Starting Ceph OSD losetup...
Dec 13 07:12:42 compute-0 bash[72781]: /dev/loop4: [64513]:4327955 (/var/lib/ceph-osd-1.img)
Dec 13 07:12:42 compute-0 systemd[1]: Finished Ceph OSD losetup.

● ceph-osd-losetup-2.service - Ceph OSD losetup
     Loaded: loaded (/etc/systemd/system/ceph-osd-losetup-2.service; enabled; preset: disabled)
     Active: active (exited) since Sat 2025-12-13 07:12:45 UTC; 24min ago
   Main PID: 73147 (code=exited, status=0/SUCCESS)
        CPU: 13ms

Dec 13 07:12:45 compute-0 systemd[1]: Starting Ceph OSD losetup...
Dec 13 07:12:45 compute-0 bash[73148]: /dev/loop5: [64513]:4327967 (/var/lib/ceph-osd-2.img)
Dec 13 07:12:45 compute-0 systemd[1]: Finished Ceph OSD losetup.

● chronyd.service - NTP client/server
     Loaded: loaded (/usr/lib/systemd/system/chronyd.service; enabled; preset: enabled)
     Active: active (running) since Sat 2025-12-13 07:11:05 UTC; 25min ago
       Docs: man:chronyd(8)
             man:chrony.conf(5)
   Main PID: 58459 (chronyd)
         IO: 0B read, 0B written
      Tasks: 1 (limit: 48568)
     Memory: 1016.0K (peak: 1.9M)
        CPU: 44ms
     CGroup: /system.slice/chronyd.service
             └─58459 /usr/sbin/chronyd -F 2

Dec 13 07:11:05 compute-0 systemd[1]: Starting NTP client/server...
Dec 13 07:11:05 compute-0 chronyd[58459]: chronyd version 4.8 starting (+CMDMON +REFCLOCK +RTC +PRIVDROP +SCFILTER +SIGND +NTS +SECHASH +IPV6 +DEBUG)
Dec 13 07:11:05 compute-0 chronyd[58459]: Frequency -4.566 +/- 0.327 ppm read from /var/lib/chrony/drift
Dec 13 07:11:05 compute-0 chronyd[58459]: Loaded seccomp filter (level 2)
Dec 13 07:11:05 compute-0 systemd[1]: Started NTP client/server.
Dec 13 07:13:16 compute-0 chronyd[58459]: Selected source 141.11.228.173 (pool.ntp.org)

● cloud-config.service - Cloud-init: Config Stage
     Loaded: loaded (/usr/lib/systemd/system/cloud-config.service; enabled; preset: disabled)
     Active: active (exited) since Sat 2025-12-13 06:42:17 UTC; 54min ago
   Main PID: 959 (code=exited, status=0/SUCCESS)
        CPU: 338ms

Dec 13 06:42:16 np0005558317 systemd[1]: Starting Cloud-init: Config Stage...
Dec 13 06:42:17 np0005558317 cloud-init[1112]: Cloud-init v. 24.4-7.el9 running 'modules:config' at Sat, 13 Dec 2025 06:42:17 +0000. Up 10.77 seconds.
Dec 13 06:42:17 np0005558317 systemd[1]: Finished Cloud-init: Config Stage.

● cloud-final.service - Cloud-init: Final Stage
     Loaded: loaded (/usr/lib/systemd/system/cloud-final.service; enabled; preset: disabled)
     Active: active (exited) since Sat 2025-12-13 06:42:17 UTC; 54min ago
   Main PID: 1158 (code=exited, status=0/SUCCESS)
        CPU: 383ms

Dec 13 06:42:17 np0005558317 cloud-init[1258]: Cloud-init v. 24.4-7.el9 running 'modules:final' at Sat, 13 Dec 2025 06:42:17 +0000. Up 11.10 seconds.
Dec 13 06:42:17 np0005558317 cloud-init[1288]: #############################################################
Dec 13 06:42:17 np0005558317 cloud-init[1291]: -----BEGIN SSH HOST KEY FINGERPRINTS-----
Dec 13 06:42:17 np0005558317 cloud-init[1300]: 256 SHA256:ZhJNiM3whCkPRB3WQX/q5XpxOzDsefXdlDO7VjNeKk0 root@np0005558317 (ECDSA)
Dec 13 06:42:17 np0005558317 cloud-init[1308]: 256 SHA256:CGZApeQZ/l1aGDFrHyDPsKEwVnJrfSUR1oPeef/8Nyg root@np0005558317 (ED25519)
Dec 13 06:42:17 np0005558317 cloud-init[1312]: 3072 SHA256:GSQgTqH8TLCYUmB1S+je0NSUnMG2hLMJ9cqychnvHnQ root@np0005558317 (RSA)
Dec 13 06:42:17 np0005558317 cloud-init[1316]: -----END SSH HOST KEY FINGERPRINTS-----
Dec 13 06:42:17 np0005558317 cloud-init[1318]: #############################################################
Dec 13 06:42:17 np0005558317 cloud-init[1258]: Cloud-init v. 24.4-7.el9 finished at Sat, 13 Dec 2025 06:42:17 +0000. Datasource DataSourceConfigDrive [net,ver=2][source=/dev/sr0].  Up 11.23 seconds
Dec 13 06:42:17 np0005558317 systemd[1]: Finished Cloud-init: Final Stage.

● cloud-init-local.service - Cloud-init: Local Stage (pre-network)
     Loaded: loaded (/usr/lib/systemd/system/cloud-init-local.service; enabled; preset: disabled)
     Active: active (exited) since Sat 2025-12-13 06:42:11 UTC; 54min ago
   Main PID: 737 (code=exited, status=0/SUCCESS)
        CPU: 594ms

Dec 13 06:42:10 localhost systemd[1]: Starting Cloud-init: Local Stage (pre-network)...
Dec 13 06:42:11 localhost cloud-init[794]: Cloud-init v. 24.4-7.el9 running 'init-local' at Sat, 13 Dec 2025 06:42:11 +0000. Up 4.93 seconds.
Dec 13 06:42:11 np0005558317 systemd[1]: Finished Cloud-init: Local Stage (pre-network).

● cloud-init.service - Cloud-init: Network Stage
     Loaded: loaded (/usr/lib/systemd/system/cloud-init.service; enabled; preset: disabled)
     Active: active (exited) since Sat 2025-12-13 06:42:16 UTC; 54min ago
   Main PID: 860 (code=exited, status=0/SUCCESS)
        CPU: 1.001s

Dec 13 06:42:16 np0005558317 cloud-init[878]: |.*o*oB.Boo       |
Dec 13 06:42:16 np0005558317 cloud-init[878]: |  *o=.B.= o      |
Dec 13 06:42:16 np0005558317 cloud-init[878]: |  .+ +.B + .     |
Dec 13 06:42:16 np0005558317 cloud-init[878]: |    . + S . .    |
Dec 13 06:42:16 np0005558317 cloud-init[878]: |             .   |
Dec 13 06:42:16 np0005558317 cloud-init[878]: |              +  |
Dec 13 06:42:16 np0005558317 cloud-init[878]: |           E . +.|
Dec 13 06:42:16 np0005558317 cloud-init[878]: |            .   =|
Dec 13 06:42:16 np0005558317 cloud-init[878]: +----[SHA256]-----+
Dec 13 06:42:16 np0005558317 systemd[1]: Finished Cloud-init: Network Stage.

○ cpupower.service - Configure CPU power related settings
     Loaded: loaded (/usr/lib/systemd/system/cpupower.service; disabled; preset: disabled)
     Active: inactive (dead)

● crond.service - Command Scheduler
     Loaded: loaded (/usr/lib/systemd/system/crond.service; enabled; preset: enabled)
     Active: active (running) since Sat 2025-12-13 06:42:16 UTC; 54min ago
   Main PID: 968 (crond)
         IO: 160.0K read, 4.0K written
      Tasks: 2 (limit: 48568)
     Memory: 1.5M (peak: 5.1M)
        CPU: 58ms
     CGroup: /system.slice/crond.service
             ├─  968 /usr/sbin/crond -n
             └─30916 /usr/sbin/anacron -s

Dec 13 07:01:01 compute-0 CROND[30905]: (root) CMD (run-parts /etc/cron.hourly)
Dec 13 07:01:01 compute-0 run-parts[30908]: (/etc/cron.hourly) starting 0anacron
Dec 13 07:01:01 compute-0 anacron[30916]: Anacron started on 2025-12-13
Dec 13 07:01:02 compute-0 anacron[30916]: Will run job `cron.daily' in 28 min.
Dec 13 07:01:02 compute-0 anacron[30916]: Will run job `cron.weekly' in 48 min.
Dec 13 07:01:02 compute-0 anacron[30916]: Will run job `cron.monthly' in 68 min.
Dec 13 07:01:02 compute-0 anacron[30916]: Jobs will be executed sequentially
Dec 13 07:01:02 compute-0 run-parts[30918]: (/etc/cron.hourly) finished 0anacron
Dec 13 07:29:01 compute-0 anacron[30916]: Job `cron.daily' started
Dec 13 07:29:01 compute-0 anacron[30916]: Job `cron.daily' terminated

○ d4b07d1867f144077f7c5c5cc5ae5c3e4d24058947898d6fee77240ec3efb8ed-304b4c4e52c8cee3.service - /usr/bin/podman healthcheck run d4b07d1867f144077f7c5c5cc5ae5c3e4d24058947898d6fee77240ec3efb8ed
     Loaded: loaded (/run/systemd/transient/d4b07d1867f144077f7c5c5cc5ae5c3e4d24058947898d6fee77240ec3efb8ed-304b4c4e52c8cee3.service; transient)
  Transient: yes
     Active: inactive (dead) since Sat 2025-12-13 07:36:37 UTC; 13s ago
   Duration: 199ms
TriggeredBy: ● d4b07d1867f144077f7c5c5cc5ae5c3e4d24058947898d6fee77240ec3efb8ed-304b4c4e52c8cee3.timer
    Process: 250063 ExecStart=/usr/bin/podman healthcheck run d4b07d1867f144077f7c5c5cc5ae5c3e4d24058947898d6fee77240ec3efb8ed (code=exited, status=0/SUCCESS)
   Main PID: 250063 (code=exited, status=0/SUCCESS)
        CPU: 53ms

Dec 13 07:36:37 compute-0 podman[250063]: 2025-12-13 07:36:37.920364168 +0000 UTC m=+0.179607024 container health_status d4b07d1867f144077f7c5cUnit display-manager.service could not be found.
5cc5ae5c3e4d24058947898d6fee77240ec3efb8ed (image=quay.io/podified-antelope-centos9/openstack-ovn-controller@sha256:fa24ce4aa285e3632c86a53e8d0385d4c788d049da42dd06570ad9d44aae00de, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, config_id=ovn_controller, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, tcib_build_tag=c3923531bcda0b0811b2d5053f189beb, container_name=ovn_controller, managed_by=edpm_ansible, org.label-schema.build-date=20251202, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, tcib_managed=true, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller@sha256:fa24ce4aa285e3632c86a53e8d0385d4c788d049da42dd06570ad9d44aae00de', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team)

● dbus-broker.service - D-Bus System Message Bus
     Loaded: loaded (/usr/lib/systemd/system/dbus-broker.service; enabled; preset: enabled)
     Active: active (running) since Sat 2025-12-13 06:42:10 UTC; 54min ago
TriggeredBy: ● dbus.socket
       Docs: man:dbus-broker-launch(1)
   Main PID: 727 (dbus-broker-lau)
         IO: 0B read, 0B written
      Tasks: 2 (limit: 48568)
     Memory: 2.9M (peak: 3.6M)
        CPU: 3.683s
     CGroup: /system.slice/dbus-broker.service
             ├─727 /usr/bin/dbus-broker-launch --scope system --audit
             └─734 dbus-broker --log 4 --controller 9 --machine-id 64f1d6692049d8be5e8b216cc203502c --max-bytes 536870912 --max-fds 4096 --max-matches 131072 --audit

Dec 13 07:09:07 compute-0 dbus-broker-launch[727]: Noticed file-system modification, trigger reload.
Dec 13 07:09:07 compute-0 dbus-broker-launch[727]: Noticed file-system modification, trigger reload.
Dec 13 07:09:44 compute-0 dbus-broker-launch[734]: avc:  op=load_policy lsm=selinux seqno=9 res=1
Dec 13 07:09:51 compute-0 dbus-broker-launch[734]: avc:  op=load_policy lsm=selinux seqno=10 res=1
Dec 13 07:22:04 compute-0 dbus-broker-launch[734]: avc:  op=load_policy lsm=selinux seqno=11 res=1
Dec 13 07:25:11 compute-0 dbus-broker-launch[734]: avc:  op=load_policy lsm=selinux seqno=13 res=1
Dec 13 07:25:37 compute-0 dbus-broker-launch[727]: Noticed file-system modification, trigger reload.
Dec 13 07:25:37 compute-0 dbus-broker-launch[734]: avc:  op=load_policy lsm=selinux seqno=14 res=1
Dec 13 07:25:37 compute-0 dbus-broker-launch[727]: Noticed file-system modification, trigger reload.
Dec 13 07:26:35 compute-0 dbus-broker-launch[734]: avc:  op=load_policy lsm=selinux seqno=15 res=1

○ dm-event.service - Device-mapper event daemon
     Loaded: loaded (/usr/lib/systemd/system/dm-event.service; static)
     Active: inactive (dead)
TriggeredBy: ● dm-event.socket
       Docs: man:dmeventd(8)

○ dnf-makecache.service - dnf makecache
     Loaded: loaded (/usr/lib/systemd/system/dnf-makecache.service; static)
     Active: inactive (dead) since Sat 2025-12-13 06:52:21 UTC; 44min ago
TriggeredBy: ● dnf-makecache.timer
   Main PID: 29973 (code=exited, status=0/SUCCESS)
        CPU: 548ms

Dec 13 06:52:15 compute-0 systemd[1]: Starting dnf makecache...
Dec 13 06:52:15 compute-0 dnf[29973]: Failed determining last makecache time.
Dec 13 06:52:17 compute-0 dnf[29973]: CentOS Stream 9 - BaseOS                        5.1 kB/s | 7.3 kB     00:01
Dec 13 06:52:19 compute-0 dnf[29973]: CentOS Stream 9 - AppStream                     3.4 kB/s | 7.8 kB     00:02
Dec 13 06:52:20 compute-0 dnf[29973]: CentOS Stream 9 - CRB                            17 kB/s | 7.2 kB     00:00
Dec 13 06:52:21 compute-0 dnf[29973]: CentOS Stream 9 - Extras packages               8.3 kB/s | 8.3 kB     00:00
Dec 13 06:52:21 compute-0 dnf[29973]: Metadata cache created.
Dec 13 06:52:21 compute-0 systemd[1]: dnf-makecache.service: Deactivated successfully.
Dec 13 06:52:21 compute-0 systemd[1]: Finished dnf makecache.

○ dracut-cmdline.service - dracut cmdline hook
     Loaded: loaded (/usr/lib/systemd/system/dracut-cmdline.service; static)
     Active: inactive (dead) since Sat 2025-12-13 06:42:09 UTC; 54min ago
   Duration: 1.426s
       Docs: man:dracut-cmdline.service(8)
             man:dracut.bootup(7)
   Main PID: 296 (code=exited, status=0/SUCCESS)
        CPU: 99ms

Dec 13 06:42:08 localhost systemd[1]: Starting dracut cmdline hook...
Dec 13 06:42:08 localhost dracut-cmdline[296]: dracut-9 dracut-057-102.git20250818.el9
Dec 13 06:42:08 localhost dracut-cmdline[296]: Using kernel command line parameters:    BOOT_IMAGE=(hd0,msdos1)/boot/vmlinuz-5.14.0-648.el9.x86_64 root=UUID=cbdedf45-ed1d-4952-82a8-33a12c0ba266 ro console=ttyS0,115200n8 no_timer_check net.ifnames=0 crashkernel=1G-2G:192M,2G-64G:256M,64G-:512M
Dec 13 06:42:08 localhost systemd[1]: Finished dracut cmdline hook.
Dec 13 06:42:09 localhost systemd[1]: dracut-cmdline.service: Deactivated successfully.
Dec 13 06:42:09 localhost systemd[1]: Stopped dracut cmdline hook.

○ dracut-initqueue.service - dracut initqueue hook
     Loaded: loaded (/usr/lib/systemd/system/dracut-initqueue.service; static)
     Active: inactive (dead) since Sat 2025-12-13 06:42:09 UTC; 54min ago
   Duration: 579ms
       Docs: man:dracut-initqueue.service(8)
             man:dracut.bootup(7)
   Main PID: 462 (code=exited, status=0/SUCCESS)
        CPU: 22ms

Dec 13 06:42:08 localhost systemd[1]: Starting dracut initqueue hook...
Dec 13 06:42:09 localhost systemd[1]: Finished dracut initqueue hook.
Dec 13 06:42:09 localhost systemd[1]: dracut-initqueue.service: Deactivated successfully.
Dec 13 06:42:09 localhost systemd[1]: Stopped dracut initqueue hook.

○ dracut-mount.service - dracut mount hook
     Loaded: loaded (/usr/lib/systemd/system/dracut-mount.service; static)
     Active: inactive (dead) since Sat 2025-12-13 06:42:09 UTC; 54min ago
   Duration: 98ms
       Docs: man:dracut-mount.service(8)
             man:dracut.bootup(7)
   Main PID: 540 (code=exited, status=0/SUCCESS)
        CPU: 10ms

Dec 13 06:42:09 localhost systemd[1]: Starting dracut mount hook...
Dec 13 06:42:09 localhost systemd[1]: Finished dracut mount hook.
Dec 13 06:42:09 localhost systemd[1]: dracut-mount.service: Deactivated successfully.
Dec 13 06:42:09 localhost systemd[1]: Stopped dracut mount hook.

○ dracut-pre-mount.service - dracut pre-mount hook
     Loaded: loaded (/usr/lib/systemd/system/dracut-pre-mount.service; static)
     Active: inactive (dead) since Sat 2025-12-13 06:42:09 UTC; 54min ago
   Duration: 552ms
       Docs: man:dracut-pre-mount.service(8)
             man:dracut.bootup(7)
   Main PID: 518 (code=exited, status=0/SUCCESS)
        CPU: 6ms

Dec 13 06:42:09 localhost systemd[1]: Starting dracut pre-mount hook...
Dec 13 06:42:09 localhost systemd[1]: Finished dracut pre-mount hook.
Dec 13 06:42:09 localhost systemd[1]: dracut-pre-mount.service: Deactivated successfully.
Dec 13 06:42:09 localhost systemd[1]: Stopped dracut pre-mount hook.

○ dracut-pre-pivot.service - dracut pre-pivot and cleanup hook
     Loaded: loaded (/usr/lib/systemd/system/dracut-pre-pivot.service; static)
     Active: inactive (dead) since Sat 2025-12-13 06:42:09 UTC; 54min ago
   Duration: 22ms
       Docs: man:dracut-pre-pivot.service(8)
             man:dracut.bootup(7)
   Main PID: 546 (code=exited, status=0/SUCCESS)
        CPU: 61ms

Dec 13 06:42:09 localhost systemd[1]: Starting dracut pre-pivot and cleanup hook...
Dec 13 06:42:09 localhost systemd[1]: Finished dracut pre-pivot and cleanup hook.
Dec 13 06:42:09 localhost systemd[1]: dracut-pre-pivot.service: Deactivated successfully.
Dec 13 06:42:09 localhost systemd[1]: Stopped dracut pre-pivot and cleanup hook.

○ dracut-pre-trigger.service - dracut pre-trigger hook
     Loaded: loaded (/usr/lib/systemd/system/dracut-pre-trigger.service; static)
     Active: inactive (dead) since Sat 2025-12-13 06:42:09 UTC; 54min ago
   Duration: 1.165s
       Docs: man:dracut-pre-trigger.service(8)
             man:dracut.bootup(7)
   Main PID: 435 (code=exited, status=0/SUCCESS)
        CPU: 13ms

Dec 13 06:42:08 localhost systemd[1]: Starting dracut pre-trigger hook...
Dec 13 06:42:08 localhost systemd[1]: Finished dracut pre-trigger hook.
Dec 13 06:42:09 localhost systemd[1]: dracut-pre-trigger.service: Deactivated successfully.
Dec 13 06:42:09 localhost systemd[1]: Stopped dracut pre-trigger hook.

○ dracut-pre-udev.service - dracut pre-udev hook
     Loaded: loaded (/usr/lib/systemd/system/dracut-pre-udev.service; static)
     Active: inactive (dead) since Sat 2025-12-13 06:42:09 UTC; 54min ago
   Duration: 1.246s
       Docs: man:dracut-pre-udev.service(8)
             man:dracut.bootup(7)
   Main PID: 385 (code=exited, status=0/SUCCESS)
        CPU: 176ms

Dec 13 06:42:08 localhost systemd[1]: Starting dracut pre-udev hook...
Dec 13 06:42:08 localhost rpc.statd[412]: Version 2.5.4 starting
Dec 13 06:42:08 localhost rpc.statd[412]: Initializing NSM state
Dec 13 06:42:08 localhost rpc.idmapd[417]: Setting log level to 0
Dec 13 06:42:08 localhost systemd[1]: Finished dracut pre-udev hook.
Dec 13 06:42:09 localhost rpc.idmapd[417]: exiting on signal 15
Dec 13 06:42:09 localhost systemd[1]: dracut-pre-udev.service: Deactivated successfully.
Dec 13 06:42:09 localhost systemd[1]: Stopped dracut pre-udev hook.

○ dracut-shutdown-onfailure.service - Service executing upon dracut-shutdown failure to perform cleanup
     Loaded: loaded (/usr/lib/systemd/system/dracut-shutdown-onfailure.service; static)
     Active: inactive (dead)
       Docs: man:dracut-shutdown.service(8)

● dracut-shutdown.service - Restore /run/initramfs on shutdown
     Loaded: loaded (/usr/lib/systemd/system/dracut-shutdown.service; static)
     Active: active (exited) since Sat 2025-12-13 06:42:10 UTC; 54min ago
       Docs: man:dracut-shutdown.service(8)
   Main PID: 738 (code=exited, status=0/SUCCESS)
        CPU: 2ms

Dec 13 06:42:10 localhost systemd[1]: Starting Restore /run/initramfs on shutdown...
Dec 13 06:42:10 localhost systemd[1]: Finished Restore /run/initramfs on shutdown.

● edpm-container-shutdown.service - EDPM Container Shutdown
     Loaded: loaded (/etc/systemd/system/edpm-container-shutdown.service; enabled; preset: enabled)
     Active: active (exited) since Sat 2025-12-13 07:11:27 UTC; 25min ago
       Docs: https://github.com/openstack-k8s-operators/docs
   Main PID: 61453 (code=exited, status=0/SUCCESS)
        CPU: 2ms

Dec 13 07:11:27 compute-0 systemd[1]: Starting EDPM Container Shutdown...
Dec 13 07:11:27 compute-0 systemd[1]: Finished EDPM Container Shutdown.

○ edpm_libvirt_guests.service - Suspend libvirt Guests in edpm
     Loaded: loaded (/etc/systemd/system/edpm_libvirt_guests.service; enabled; preset: disabled)
     Active: inactive (dead)
       Docs: man:libvirtd(8)
             https://libvirt.org

● edpm_multipathd.service - multipathd container
     Loaded: loaded (/etc/systemd/system/edpm_multipathd.service; enabled; preset: disabled)
     Active: active (running) since Sat 2025-12-13 07:28:04 UTC; 8min ago
   Main PID: 223786 (conmon)
         IO: 0B read, 111.5K written
      Tasks: 1 (limit: 48568)
     Memory: 668.0K (peak: 18.6M)
        CPU: 76ms
     CGroup: /system.slice/edpm_multipathd.service
             └─223786 /usr/bin/conmon --api-version 1 -c f696b337a701eeb12548640e55e827503e894b0e602e4d8080c3212ba22210e6 -u f696b337a701eeb12548640e55e827503e894b0e602e4d8080c3212ba22210e6 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/f696b337a701eeb12548640e55e827503e894b0e602e4d8080c3212ba22210e6/userdata -p /run/containers/storage/overlay-containers/f696b337a701eeb12548640e55e827503e894b0e602e4d8080c3212ba22210e6/userdata/pidfile -n multipathd --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/f696b337a701eeb12548640e55e827503e894b0e602e4d8080c3212ba22210e6 --full-attach -s -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/f696b337a701eeb12548640e55e827503e894b0e602e4d8080c3212ba22210e6/userdata/oci-log --conmon-pidfile /run/multipathd.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg f696b337a701eeb12548640e55e827503e894b0e602e4d8080c3212ba22210e6

Dec 13 07:28:04 compute-0 multipathd[223786]: + sudo kolla_copy_cacerts
Dec 13 07:28:04 compute-0 multipathd[223786]: + [[ ! -n '' ]]
Dec 13 07:28:04 compute-0 multipathd[223786]: + . kolla_extend_start
Dec 13 07:28:04 compute-0 multipathd[223786]: Running command: '/usr/sbin/multipathd -d'
Dec 13 07:28:04 compute-0 multipathd[223786]: + echo 'Running command: '\''/usr/sbin/multipathd -d'\'''
Dec 13 07:28:04 compute-0 multipathd[223786]: + umask 0022
Dec 13 07:28:04 compute-0 multipathd[223786]: + exec /usr/sbin/multipathd -d
Dec 13 07:28:04 compute-0 multipathd[223786]: 2758.075287 | --------start up--------
Dec 13 07:28:04 compute-0 multipathd[223786]: 2758.075300 | read /etc/multipath.conf
Dec 13 07:28:04 compute-0 multipathd[223786]: 2758.079325 | path checkers start up

● edpm_nova_compute.service - nova_compute container
     Loaded: loaded (/etc/systemd/system/edpm_nova_compute.service; enabled; preset: disabled)
     Active: active (running) since Sat 2025-12-13 07:29:17 UTC; 7min ago
    Process: 241209 ExecStart=/usr/bin/podman start nova_compute (code=exited, status=0/SUCCESS)
   Main PID: 241222 (conmon)
         IO: 0B read, 91.5K written
      Tasks: 1 (limit: 48568)
     Memory: 680.0K (peak: 18.2M)
        CPU: 151ms
     CGroup: /system.slice/edpm_nova_compute.service
             └─241222 /usr/bin/conmon --api-version 1 -c 85647c0fe69de86f6774be6672640c6d932467db58f079f97876cd7dae0a1306 -u 85647c0fe69de86f6774be6672640c6d932467db58f079f97876cd7dae0a1306 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/85647c0fe69de86f6774be6672640c6d932467db58f079f97876cd7dae0a1306/userdata -p /run/containers/storage/overlay-containers/85647c0fe69de86f6774be6672640c6d932467db58f079f97876cd7dae0a1306/userdata/pidfile -n nova_compute --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/85647c0fe69de86f6774be6672640c6d932467db58f079f97876cd7dae0a1306 --full-attach -s -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/85647c0fe69de86f6774be6672640c6d932467db58f079f97876cd7dae0a1306/userdata/oci-log --conmon-pidfile /run/nova_compute.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg 85647c0fe69de86f6774be6672640c6d932467db58f079f97876cd7dae0a1306

Dec 13 07:36:26 compute-0 nova_compute[241222]: 2025-12-13 07:36:26.563 241226 DEBUG oslo_service.periodic_task [None req-d86864b1-4a5a-40d1-837d-f2d512596900 - - - - - -] Running periodic task ComputeManager._check_instance_build_time run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210[00m
Dec 13 07:36:26 compute-0 nova_compute[241222]: 2025-12-13 07:36:26.567 241226 DEBUG oslo_service.periodic_task [None req-d86864b1-4a5a-40d1-837d-f2d512596900 - - - - - -] Running periodic task ComputeManager._heal_instance_info_cache run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210[00m
Dec 13 07:36:26 compute-0 nova_compute[241222]: 2025-12-13 07:36:26.567 241226 DEBUG nova.compute.manager [None req-d86864b1-4a5a-40d1-837d-f2d512596900 - - - - - -] Starting heal instance info cache _heal_instance_info_cache /usr/lib/python3.9/site-packages/nova/compute/manager.py:9858[00m
Dec 13 07:36:26 compute-0 nova_compute[241222]: 2025-12-13 07:36:26.567 241226 DEBUG nova.compute.manager [None req-d86864b1-4a5a-40d1-837d-f2d512596900 - - - - - -] Rebuilding the list of instances to heal _heal_instance_info_cache /usr/lib/python3.9/site-packages/nova/compute/manager.py:9862[00m
Dec 13 07:36:26 compute-0 nova_compute[241222]: 2025-12-13 07:36:26.581 241226 DEBUG nova.compute.manager [None req-d86864b1-4a5a-40d1-837d-f2d512596900 - - - - - -] Didn't find any instances for network info cache update. _heal_instance_info_cache /usr/lib/python3.9/site-packages/nova/compute/manager.py:9944[00m
Dec 13 07:36:26 compute-0 nova_compute[241222]: 2025-12-13 07:36:26.581 241226 DEBUG oslo_service.periodic_task [None req-d86864b1-4a5a-40d1-837d-f2d512596900 - - - - - -] Running periodic task ComputeManager._reclaim_queued_deletes run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210[00m
Dec 13 07:36:26 compute-0 nova_compute[241222]: 2025-12-13 07:36:26.581 241226 DEBUG nova.compute.manager [None req-d86864b1-4a5a-40d1-837d-f2d512596900 - - - - - -] CONF.reclaim_instance_interval <= 0, skipping... _reclaim_queued_deletes /usr/lib/python3.9/site-packages/nova/compute/manager.py:10477[00m
Dec 13 07:36:27 compute-0 nova_compute[241222]: 2025-12-13 07:36:27.568 241226 DEBUG oslo_service.periodic_task [None req-d86864b1-4a5a-40d1-837d-f2d512596900 - - - - - -] Running periodic task ComputeManager._poll_rebooting_instances run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210[00m
Dec 13 07:36:27 compute-0 nova_compute[241222]: 2025-12-13 07:36:27.568 241226 DEBUG oslo_service.periodic_task [None req-d86864b1-4a5a-40d1-837d-f2d512596900 - - - - - -] Running periodic task ComputeManager._poll_rescued_instances run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210[00m
Dec 13 07:36:27 compute-0 nova_compute[241222]: 2025-12-13 07:36:27.568 241226 DEBUG oslo_service.periodic_task [None req-d86864b1-4a5a-40d1-837d-f2d512596900 - - - - - -] Running periodic task ComputeManager._instance_usage_audit run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210[00m

● edpm_ovn_controller.service - ovn_controller container
     Loaded: loaded (/etc/systemd/system/edpm_ovn_controller.service; enabled; preset: disabled)
     Active: active (running) since Sat 2025-12-13 07:22:49 UTC; 14min ago
   Main PID: 144647 (conmon)
         IO: 0B read, 164.0K written
      Tasks: 1 (limit: 48568)
     Memory: 696.0K (peak: 19.1M)
        CPU: 188ms
     CGroup: /system.slice/edpm_ovn_controller.service
             └─144647 /usr/bin/conmon --api-version 1 -c d4b07d1867f144077f7c5c5cc5ae5c3e4d24058947898d6fee77240ec3efb8ed -u d4b07d1867f144077f7c5c5cc5ae5c3e4d24058947898d6fee77240ec3efb8ed -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/d4b07d1867f144077f7c5c5cc5ae5c3e4d24058947898d6fee77240ec3efb8ed/userdata -p /run/containers/storage/overlay-containers/d4b07d1867f144077f7c5c5cc5ae5c3e4d24058947898d6fee77240ec3efb8ed/userdata/pidfile -n ovn_controller --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/d4b07d1867f144077f7c5c5cc5ae5c3e4d24058947898d6fee77240ec3efb8ed --full-attach -s -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/d4b07d1867f144077f7c5c5cc5ae5c3e4d24058947898d6fee77240ec3efb8ed/userdata/oci-log --conmon-pidfile /run/ovn_controller.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg d4b07d1867f144077f7c5c5cc5ae5c3e4d24058947898d6fee77240ec3efb8ed

Dec 13 07:22:49 compute-0 ovn_controller[144647]: 2025-12-13T07:22:49Z|00023|features|INFO|OVS DB schema supports 4 flow table prefixes, our IDL supports: 4
Dec 13 07:22:49 compute-0 ovn_controller[144647]: 2025-12-13T07:22:49Z|00024|main|INFO|Setting flow table prefixes: ip_src, ip_dst, ipv6_src, ipv6_dst.
Dec 13 07:22:49 compute-0 ovn_controller[144647]: 2025-12-13T07:22:49Z|00001|pinctrl(ovn_pinctrl0)|INFO|unix:/var/run/openvswitch/br-int.mgmt: connecting to switch
Dec 13 07:22:49 compute-0 ovn_controller[144647]: 2025-12-13T07:22:49Z|00002|rconn(ovn_pinctrl0)|INFO|unix:/var/run/openvswitch/br-int.mgmt: connecting...
Dec 13 07:22:49 compute-0 ovn_controller[144647]: 2025-12-13T07:22:49Z|00001|statctrl(ovn_statctrl3)|INFO|unix:/var/run/openvswitch/br-int.mgmt: connecting to switch
Dec 13 07:22:49 compute-0 ovn_controller[144647]: 2025-12-13T07:22:49Z|00002|rconn(ovn_statctrl3)|INFO|unix:/var/run/openvswitch/br-int.mgmt: connecting...
Dec 13 07:22:49 compute-0 ovn_controller[144647]: 2025-12-13T07:22:49Z|00003|rconn(ovn_pinctrl0)|INFO|unix:/var/run/openvswitch/br-int.mgmt: connected
Dec 13 07:22:49 compute-0 ovn_controller[144647]: 2025-12-13T07:22:49Z|00003|rconn(ovn_statctrl3)|INFO|unix:/var/run/openvswitch/br-int.mgmt: connected
Dec 13 07:23:19 compute-0 ovn_controller[144647]: 2025-12-13T07:23:19Z|00025|memory|INFO|17280 kB peak resident set size after 29.7 seconds
Dec 13 07:23:19 compute-0 ovn_controller[144647]: 2025-12-13T07:23:19Z|00026|memory|INFO|idl-cells-OVN_Southbound:239 idl-cells-Open_vSwitch:528 ofctrl_desired_flow_usage-KB:5 ofctrl_installed_flow_usage-KB:4 ofctrl_sb_flow_ref_usage-KB:2

● edpm_ovn_metadata_agent.service - ovn_metadata_agent container
     Loaded: loaded (/etc/systemd/system/edpm_ovn_metadata_agent.service; enabled; preset: disabled)
     Active: active (running) since Sat 2025-12-13 07:23:40 UTC; 13min ago
   Main PID: 154116 (conmon)
         IO: 0B read, 126.5K written
      Tasks: 1 (limit: 48568)
     Memory: 720.0K (peak: 19.1M)
        CPU: 177ms
     CGroup: /system.slice/edpm_ovn_metadata_agent.service
             └─154116 /usr/bin/conmon --api-version 1 -c 1929b765dac802841eb5d5f56597ea7bfd15768bcf514c3ef50eb60bf1b13d07 -u 1929b765dac802841eb5d5f56597ea7bfd15768bcf514c3ef50eb60bf1b13d07 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/1929b765dac802841eb5d5f56597ea7bfd15768bcf514c3ef50eb60bf1b13d07/userdata -p /run/containers/storage/overlay-containers/1929b765dac802841eb5d5f56597ea7bfd15768bcf514c3ef50eb60bf1b13d07/userdata/pidfile -n ovn_metadata_agent --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/1929b765dac802841eb5d5f56597ea7bfd15768bcf514c3ef50eb60bf1b13d07 --full-attach -s -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/1929b765dac802841eb5d5f56597ea7bfd15768bcf514c3ef50eb60bf1b13d07/userdata/oci-log --conmon-pidfile /run/ovn_metadata_agent.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg 1929b765dac802841eb5d5f56597ea7bfd15768bcf514c3ef50eb60bf1b13d07

Dec 13 07:33:41 compute-0 ovn_metadata_agent[154116]: 2025-12-13 07:33:41.643 154121 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" "released" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: held 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423[00m
Dec 13 07:34:41 compute-0 ovn_metadata_agent[154116]: 2025-12-13 07:34:41.644 154121 DEBUG oslo_concurrency.lockutils [-] Acquiring lock "_check_child_processes" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404[00m
Dec 13 07:34:41 compute-0 ovn_metadata_agent[154116]: 2025-12-13 07:34:41.644 154121 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" acquired by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409[00m
Dec 13 07:34:41 compute-0 ovn_metadata_agent[154116]: 2025-12-13 07:34:41.644 154121 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" "released" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: held 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423[00m
Dec 13 07:35:41 compute-0 ovn_metadata_agent[154116]: 2025-12-13 07:35:41.645 154121 DEBUG oslo_concurrency.lockutils [-] Acquiring lock "_check_child_processes" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404[00m
Dec 13 07:35:41 compute-0 ovn_metadata_agent[154116]: 2025-12-13 07:35:41.645 154121 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" acquired by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409[00m
Dec 13 07:35:41 compute-0 ovn_metadata_agent[154116]: 2025-12-13 07:35:41.646 154121 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" "released" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: held 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423[00m
Dec 13 07:36:41 compute-0 ovn_metadata_agent[154116]: 2025-12-13 07:36:41.646 154121 DEBUG oslo_concurrency.lockutils [-] Acquiring lock "_check_child_processes" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404[00m
Dec 13 07:36:41 compute-0 ovn_metadata_agent[154116]: 2025-12-13 07:36:41.647 154121 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" acquired by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409[00m
Dec 13 07:36:41 compute-0 ovn_metadata_agent[154116]: 2025-12-13 07:36:41.647 154121 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" "released" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: held 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423[00m

○ emergency.service - Emergency Shell
     Loaded: loaded (/usr/lib/systemd/system/emergency.service; static)
     Active: inactive (dead)
       Docs: man:sulogin(8)

○ f696b337a701eeb12548640e55e827503e894b0e602e4d8080c3212ba22210e6-407c9b11ead522c2.service - /usr/bin/podman healthcheck run f696b337a701eeb12548640e55e827503e894b0e602e4d8080c3212ba22210e6
     Loaded: loaded (/run/systemd/transient/f696b337a701eeb12548640e55e827503e894b0e602e4d8080c3212ba22210e6-407c9b11ead522c2.service; transient)
  Transient: yes
     Active: inactive (dead) since Sat 2025-12-13 07:36:48 UTC; 2s ago
   Duration: 73ms
TriggeredBy: ● f696b337a701eeb12548640e55e827503e894b0e602e4d8080c3212ba22210e6-407c9b11ead522c2.timer
    Process: 252940 ExecStart=/usr/bin/podman healthcheck run f696b337a701eeb12548640e55e827503e894b0e602e4d8080c3212ba22210e6 (code=exited, status=0/SUCCESS)
   Main PID: 252940 (code=exited, status=0/SUCCESS)
        CPU: 57ms

Dec 13 07:36:48 compute-0 podman[252940]: 2025-12-13 07:36:48.720964148 +0000 UTC m=+0.056178043 container health_status f696b337a701eeb12548640e55e827503e894b0e602e4d8080c3212ba22210e6 (image=quay.io/podified-antelope-centos9/openstack-multipathd@sha256:df38dbd6b3eccec2abaa8e3618a385405ccec1b73ae8c3573a138b0c961ed31f, name=multipathd, health_status=healthy, health_failing_streak=0, health_log=, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/multipathd', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-multipathd@sha256:df38dbd6b3eccec2abaa8e3618a385405ccec1b73ae8c3573a138b0c961ed31f', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/cUnit hv_kvp_daemon.service could not be found.
a-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/multipathd.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run/udev:/run/udev', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:ro', '/var/lib/iscsi:/var/lib/iscsi', '/etc/multipath:/etc/multipath:z', '/etc/multipath.conf:/etc/multipath.conf:ro', '/var/lib/openstack/healthchecks/multipathd:/openstack:ro,z']}, container_name=multipathd, io.buildah.version=1.41.3, managed_by=edpm_ansible, org.label-schema.build-date=20251202, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, tcib_build_tag=c3923531bcda0b0811b2d5053f189beb, config_id=multipathd, maintainer=OpenStack Kubernetes Operator team, org.label-schema.schema-version=1.0, tcib_managed=true)

● getty@tty1.service - Getty on tty1
     Loaded: loaded (/usr/lib/systemd/system/getty@.service; enabled; preset: enabled)
     Active: active (running) since Sat 2025-12-13 06:42:16 UTC; 54min ago
       Docs: man:agetty(8)
             man:systemd-getty-generator(8)
             http://0pointer.de/blog/projects/serial-console.html
   Main PID: 978 (agetty)
         IO: 0B read, 0B written
      Tasks: 1 (limit: 48568)
     Memory: 220.0K (peak: 444.0K)
        CPU: 6ms
     CGroup: /system.slice/system-getty.slice/getty@tty1.service
             └─978 /sbin/agetty -o "-p -- \\u" --noclear - linux

Dec 13 06:42:16 np0005558317 systemd[1]: Started Getty on tty1.

● gssproxy.service - GSSAPI Proxy Daemon
     Loaded: loaded (/usr/lib/systemd/system/gssproxy.service; disabled; preset: disabled)
     Active: active (running) since Sat 2025-12-13 06:42:11 UTC; 54min ago
   Main PID: 827 (gssproxy)
         IO: 0B read, 0B written
      Tasks: 6 (limit: 48568)
     Memory: 1.8M (peak: 2.7M)
        CPU: 11ms
     CGroup: /system.slice/gssproxy.service
             └─827 /usr/sbin/gssproxy -D

Dec 13 06:42:11 np0005558317 systemd[1]: Starting GSSAPI Proxy Daemon...
Dec 13 06:42:11 np0005558317 systemd[1]: Started GSSAPI Proxy Daemon.

○ import-state.service - Import network configuration from initramfs
     Loaded: loaded (/usr/lib/systemd/system/import-state.service; enabled; preset: enabled)
     Active: inactive (dead)

○ initrd-cleanup.service - Cleaning Up and Shutting Down Daemons
     Loaded: loaded (/usr/lib/systemd/system/initrd-cleanup.service; static)
     Active: inactive (dead) since Sat 2025-12-13 06:42:09 UTC; 54min ago
   Main PID: 589 (code=exited, status=0/SUCCESS)
        CPU: 7ms

Dec 13 06:42:09 localhost systemd[1]: Starting Cleaning Up and Shutting Down Daemons...
Dec 13 06:42:09 localhost systemd[1]: initrd-cleanup.service: Deactivated successfully.
Dec 13 06:42:09 localhost systemd[1]: Finished Cleaning Up and Shutting Down Daemons.

○ initrd-parse-etc.service - Mountpoints Configured in the Real Root
     Loaded: loaded (/usr/lib/systemd/system/initrd-parse-etc.service; static)
     Active: inactive (dead) since Sat 2025-12-13 06:42:09 UTC; 54min ago
   Main PID: 539 (code=exited, status=0/SUCCESS)
        CPU: 13ms

Dec 13 06:42:09 localhost systemd[1]: Starting Mountpoints Configured in the Real Root...
Dec 13 06:42:09 localhost systemd[1]: initrd-parse-etc.service: Deactivated successfully.
Dec 13 06:42:09 localhost systemd[1]: Finished Mountpoints Configured in the Real Root.

○ initrd-switch-root.service - Switch Root
     Loaded: loaded (/usr/lib/systemd/system/initrd-switch-root.service; static)
     Active: inactive (dead) since Sat 2025-12-13 06:42:10 UTC; 54min ago
   Main PID: 594 (code=exited, status=0/SUCCESS)
        CPU: 4ms

Dec 13 06:42:09 localhost systemd[1]: Starting Switch Root...

○ initrd-udevadm-cleanup-db.service - Cleanup udev Database
     Loaded: loaded (/usr/lib/systemd/system/initrd-udevadm-cleanup-db.service; static)
     Active: inactive (dead) since Sat 2025-12-13 06:42:09 UTC; 54min ago
   Main PID: 592 (code=exited, status=0/SUCCESS)
        CPU: 6ms

Dec 13 06:42:09 localhost systemd[1]: Starting Cleanup udev Database...
Dec 13 06:42:09 localhost systemd[1]: initrd-udevadm-cleanup-db.service: Deactivated successfully.
Dec 13 06:42:09 localhost systemd[1]: Finished Cleanup udev Database.

○ ip6tables.service - IPv6 firewall with ip6tables
     Loaded: loaded (/usr/lib/systemd/system/ip6tables.service; disabled; preset: disabled)
     Active: inactive (dead)

○ iptables.service - IPv4 firewall with iptables
     Loaded: loaded (/usr/lib/systemd/system/iptables.service; disabled; preset: disabled)
     Active: inactive (dead) since Sat 2025-12-13 07:11:34 UTC; 25min ago
   Duration: 29min 22.777s
   Main PID: 739 (code=exited, status=0/SUCCESS)
        CPU: 64ms

Dec 13 06:42:10 localhost systemd[1]: Starting IPv4 firewall with iptables...
Dec 13 06:42:11 localhost iptables.init[739]: iptables: Applying firewall rules: [  OK  ]
Dec 13 06:42:11 localhost systemd[1]: Finished IPv4 firewall with iptables.
Dec 13 07:11:33 compute-0 systemd[1]: Stopping IPv4 firewall with iptables...
Dec 13 07:11:34 compute-0 iptables.init[62703]: iptables: Setting chains to policy ACCEPT: raw mangle filter nat [  OK  ]
Dec 13 07:11:34 compute-0 iptables.init[62703]: iptables: Flushing firewall rules: [  OK  ]
Dec 13 07:11:34 compute-0 systemd[1]: iptables.service: Deactivated successfully.
Dec 13 07:11:34 compute-0 systemd[1]: Stopped IPv4 firewall with iptables.

● irqbalance.service - irqbalance daemon
     Loaded: loaded (/usr/lib/systemd/system/irqbalance.service; enabled; preset: enabled)
     Active: active (running) since Sat 2025-12-13 06:42:10 UTC; 54min ago
       Docs: man:irqbalance(1)
             https://github.com/Irqbalance/irqbalance
   Main PID: 740 (irqbalance)
         IO: 0B read, 0B written
      Tasks: 2 (limit: 48568)
     Memory: 1.1M (peak: 1.5M)
        CPU: 192ms
     CGroup: /system.slice/irqbalance.service
             └─740 /usr/sbin/irqbalance

Dec 13 06:42:10 localhost systemd[1]: Started irqbalance daemon.
Dec 13 06:42:21 np0005558317 irqbalance[740]: Cannot change IRQ 45 affinity: Operation not permitted
Dec 13 06:42:21 np0005558317 irqbalance[740]: IRQ 45 affinity is now unmanaged
Dec 13 06:42:21 np0005558317 irqbalance[740]: Cannot change IRQ 48 affinity: Operation not permitted
Dec 13 06:42:21 np0005558317 irqbalance[740]: IRQ 48 affinity is now unmanaged
Dec 13 06:42:21 np0005558317 irqbalance[740]: Cannot change IRQ 46 affinity: Operation not permitted
Dec 13 06:42:21 np0005558317 irqbalance[740]: IRQ 46 affinity is now unmanaged
Dec 13 06:43:01 np0005558317 irqbalance[740]: Cannot change IRQ 47 affinity: Operation not permitted
Dec 13 06:43:01 np0005558317 irqbalance[740]: IRQ 47 affinity is now unmanaged

○ iscsi-init.service - One time configuration for iscsi.service
     Loaded: loaded (/usr/lib/systemd/system/iscsi-init.service; disabled; preset: disabled)
     Active: inactive (dead)
  Condition: start condition failed at Sat 2025-12-13 07:28:12 UTC; 8min ago

Dec 13 07:27:28 compute-0 systemd[1]: One time configuration for iscsi.service was skipped because of an unmet condition check (ConditionPathExists=!/etc/iscsi/initiatorname.iscsi).
Dec 13 07:28:12 compute-0 systemd[1]: One time configuration for iscsi.service was skipped because of an unmet condition check (ConditionPathExists=!/etc/iscsi/initiatorname.iscsi).

○ iscsi-onboot.service - Special handling of early boot iSCSI sessions
     Loaded: loaded (/usr/lib/systemd/system/iscsi-onboot.service; enabled; preset: enabled)
     Active: inactive (dead)
       Docs: man:iscsiadm(8)
             man:iscsid(8)

● iscsi-shutdown.service - Logout off all iSCSI sessions on shutdown
     Loaded: loaded (/usr/lib/systemd/system/iscsi-shutdown.service; static)
     Active: active (exited) since Sat 2025-12-13 07:27:28 UTC; 9min ago
       Docs: man:iscsid(8)
             man:iscsiadm(8)
   Main PID: 214354 (code=exited, status=0/SUCCESS)
        CPU: 1ms

Dec 13 07:27:28 compute-0 systemd[1]: Starting Logout off all iSCSI sessions on shutdown...
Dec 13 07:27:28 compute-0 systemd[1]: Finished Logout off all iSCSI sessions on shutdown.

○ iscsi-starter.service
     Loaded: loaded (/usr/lib/systemd/system/iscsi-starter.service; enabled; preset: disabled)
     Active: inactive (dead)

○ iscsi.service - Login and scanning of iSCSI devices
     Loaded: loaded (/usr/lib/systemd/system/iscsi.service; indirect; preset: enabled)
     Active: inactive (dead)
       Docs: man:iscsiadm(8)
             man:iscsid(8)

● iscsid.service - Open-iSCSI
     Loaded: loaded (/usr/lib/systemd/system/iscsid.service; enabled; preset: disabled)
     Active: active (running) since Sat 2025-12-13 07:28:12 UTC; 8min ago
TriggeredBy: ● iscsid.socket
       Docs: man:iscsid(8)
             man:iscsiuio(8)
             man:iscsiadm(8)
   Main PID: 226492 (iscsid)
     Status: "Ready to process requests"
         IO: 0B read, 0B written
      Tasks: 1 (limit: 48568)
     Memory: 1.9M (peak: 2.0M)
        CPU: 5ms
     CGroup: /system.slice/iscsid.service
             └─226492 /usr/sbin/iscsid -f

Dec 13 07:28:12 compute-0 systemd[1]: Starting Open-iSCSI...
Dec 13 07:28:12 compute-0 systemd[1]: Started Open-iSCSI.

○ iscsiuio.service - iSCSI UserSpace I/O driver
     Loaded: loaded (/usr/lib/systemd/system/iscsiuio.service; disabled; preset: disabled)
     Active: inactive (dead)
TriggeredBy: ○ iscsiuio.socket
       Docs: man:iscsiuio(8)

● kdump.service - Crash recovery kernel arming
     Loaded: loaded (/usr/lib/systemd/system/kdump.service; enabled; preset: enabled)
     Active: active (exited) since Sat 2025-12-13 06:42:25 UTC; 54min ago
   Main PID: 965 (code=exited, status=0/SUCCESS)
        CPU: 13.403s

Dec 13 06:42:25 np0005558317 dracut[1242]: Linked:         0 files
Dec 13 06:42:25 np0005558317 dracut[1242]: Compared:       0 xattrs
Dec 13 06:42:25 np0005558317 dracut[1242]: Compared:       0 files
Dec 13 06:42:25 np0005558317 dracut[1242]: Saved:          0 B
Dec 13 06:42:25 np0005558317 dracut[1242]: Duration:       0.000367 seconds
Dec 13 06:42:25 np0005558317 dracut[1242]: *** Hardlinking files done ***
Dec 13 06:42:25 np0005558317 dracut[1242]: *** Creating initramfs image file '/boot/initramfs-5.14.0-648.el9.x86_64kdump.img' done ***
Dec 13 06:42:25 np0005558317 kdumpctl[977]: kdump: kexec: loaded kdump kernel
Dec 13 06:42:25 np0005558317 kdumpctl[977]: kdump: Starting kdump: [OK]
Dec 13 06:42:25 np0005558317 systemd[1]: Finished Crash recovery kernel arming.

● kmod-static-nodes.service - Create List of Static Device Nodes
     Loaded: loaded (/usr/lib/systemd/system/kmod-static-nodes.service; static)
     Active: active (exited) since Sat 2025-12-13 06:42:10 UTC; 54min ago
   Main PID: 644 (code=exited, status=0/SUCCESS)
        CPU: 5ms

Dec 13 06:42:10 localhost systemd[1]: Finished Create List of Static Device Nodes.

● ldconfig.service - Rebuild Dynamic Linker Cache
     Loaded: loaded (/usr/lib/systemd/system/ldconfig.service; static)
     Active: active (exited) since Sat 2025-12-13 06:42:10 UTC; 54min ago
       Docs: man:ldconfig(8)
   Main PID: 666 (code=exited, status=0/SUCCESS)
        CPU: 46ms

Dec 13 06:42:10 localhost systemd[1]: Starting Rebuild Dynamic Linker Cache...
Dec 13 06:42:10 localhost systemd[1]: Finished Rebuild Dynamic Linker Cache.

○ libvirtd.service
     Loaded: masked (Reason: Unit libvirtd.service is masked.)
     Active: inactive (dead)
TriggeredBy: ○ libvirtd-ro.socket
             ○ libvirtd-admin.socket
             ○ libvirtd.socket

○ loadmodules.service - Load legacy module configuration
     Loaded: loaded (/usr/lib/systemd/system/loadmodules.service; enabled; preset: enabled)
     Active: inactive (dead)

○ logrotate.service - Rotate log files
     Loaded: loaded (/usr/lib/systemd/system/logrotate.service; static)
     Active: inactive (dead)
TriggeredBy: ● logrotate.timer
       Docs: man:logrotate(8)
             man:logrotate.conf(5)

○ lvm2-lvmpolld.service - LVM2 poll daemon
     Loaded: loaded (/usr/lib/systemd/system/lvm2-lvmpolld.service; static)
     Active: inactive (dead)
TriggeredBy: ● lvm2-lvmpolld.socket
       Docs: man:lvmpolld(8)

● lvm2-monitor.service - Monitoring of LVM2 mirrors, snapshots etc. using dmeventd or progress polling
     Loaded: loaded (/usr/lib/systemd/system/lvm2-monitor.service; enabled; preset: enabled)
     Active: active (exited) since Sat 2025-12-13 07:07:56 UTC; 28min ago
       Docs: man:dmeventd(8)
             man:lvcreate(8)
             man:lvchange(8)
             man:vgchange(8)
   Main PID: 34034 (code=exited, status=0/SUCCESS)
        CPU: 16ms

Dec 13 07:07:56 compute-0 systemd[1]: Starting Monitoring of LVM2 mirrors, snapshots etc. using dmeventd or progress polling...
Dec 13 07:07:56 compute-0 systemd[1]: Finished Monitoring of LVM2 mirrors, snapshots etc. using dmeventd or progress polling.

○ mdmonitor.service - Software RAID monitoring and management
     Loaded: loaded (/usr/lib/systemd/system/mdmonitor.service; enabled; preset: enabled)
     Active: inactive (dead)

○ microcode.service - Load CPU microcode update
     Loaded: loaded (/usr/lib/systemd/system/microcode.service; enabled; preset: enabled)
     Active: inactive (dead)
  Condition: start condition failed at Sat 2025-12-13 06:42:10 UTC; 54min ago

Dec 13 06:42:10 localhost systemd[1]: Load CPU microcode update was skipped because of an unmet condition check (ConditionPathExists=/sys/devices/system/cpu/microcode/reload).

○ modprobe@configfs.service - Load Kernel Module configfs
     Loaded: loaded (/usr/lib/systemd/system/modprobe@.service; static)
     Active: inactive (dead) since Sat 2025-12-13 06:42:10 UTC; 54min ago
       Docs: man:modprobe(8)
   Main PID: 732 (code=exited, status=0/SUCCESS)
        CPU: 4ms

Dec 13 06:42:10 localhost systemd[1]: Starting Load Kernel Module configfs...
Dec 13 06:42:10 localhost systemd[1]: modprobe@configfs.service: Deactivated successfully.
Dec 13 06:42:10 localhost systemd[1]: Finished Load Kernel Module configfs.

○ modprobe@drm.service - Load Kernel Module drm
     Loaded: loaded (/usr/lib/systemd/system/modprobe@.service; static)
     Active: inactive (dead) since Sat 2025-12-13 06:42:10 UTC; 54min ago
       Docs: man:modprobe(8)
   Main PID: 646 (code=exited, status=0/SUCCESS)
        CPU: 74ms

Dec 13 06:42:10 localhost systemd[1]: modprobe@drm.service: Deactivated successfully.
Dec 13 06:42:10 localhost systemd[1]: Finished Load Kernel Module drm.

○ modprobe@efi_pstore.service - Load Kernel Module efi_pstore
     Loaded: loaded (/usr/lib/systemd/system/modprobe@.service; static)
     Active: inactive (dead) since Sat 2025-12-13 06:42:10 UTC; 54min ago
       Docs: man:modprobe(8)
   Main PID: 647 (code=exited, status=0/SUCCESS)
        CPU: 3ms

Dec 13 06:42:10 localhost systemd[1]: modprobe@efi_pstore.service: Deactivated successfully.
Dec 13 06:42:10 localhost systemd[1]: Finished Load Kernel Module efi_pstore.

○ modprobe@fuse.service - Load Kernel Module fuse
     Loaded: loaded (/usr/lib/systemd/system/modprobe@.service; static)
     Active: inactive (dead) since Sat 2025-12-13 06:42:10 UTC; 54min ago
       Docs: man:modprobe(8)
   Main PID: 648 (code=exited, status=0/SUCCESS)
        CPU: 21ms

Dec 13 06:42:10 localhost systemd[1]: modprobe@fuse.service: Deactivated successfully.
Dec 13 06:42:10 localhost systemd[1]: Finished Load Kernel Module fuse.

○ netns-placeholder.service - Create netns directory
     Loaded: loaded (/etc/systemd/system/netns-placeholder.service; enabled; preset: enabled)
     Active: inactive (dead) since Sat 2025-12-13 07:27:49 UTC; 9min ago
   Main PID: 219880 (code=exited, status=0/SUCCESS)
        CPU: 6ms

Dec 13 07:27:49 compute-0 systemd[1]: Starting Create netns directory...
Dec 13 07:27:49 compute-0 systemd[1]: netns-placeholder.service: Deactivated successfully.
Dec 13 07:27:49 compute-0 systemd[1]: Finished Create netns directory.

○ network.service - LSB: Bring up/down networking
     Loaded: loaded (/etc/rc.d/init.d/network; generated)
     Active: inactive (dead)
       Docs: man:systemd-sysv-generator(8)

● NetworkManager-wait-online.service - Network Manager Wait Online
     Loaded: loaded (/usr/lib/systemd/system/NetworkManager-wait-online.service; enabled; preset: disabled)
     Active: active (exited) since Sat 2025-12-13 07:10:00 UTC; 26min ago
       Docs: man:NetworkManager-wait-online.service(8)
   Main PID: 48916 (code=exited, status=0/SUCCESS)
        CPU: 16ms

Dec 13 07:09:59 compute-0 systemd[1]: Starting Network Manager Wait Online...
Dec 13 07:10:00 compute-0 systemd[1]: Finished Network Manager Wait Online.

● NetworkManager.service - Network Manager
     Loaded: loaded (/usr/lib/systemd/system/NetworkManager.service; enabled; preset: enabled)
    Drop-In: /usr/lib/systemd/system/NetworkManager.service.d
             └─NetworkManager-ovs.conf
     Active: active (running) since Sat 2025-12-13 07:09:59 UTC; 26min ago
       Docs: man:NetworkManager(8)
   Main PID: 48896 (NetworkManager)
         IO: 104.0K read, 301.0K written
      Tasks: 3 (limit: 48568)
     Memory: 5.4M (peak: 7.1M)
        CPU: 1.418s
     CGroup: /system.slice/NetworkManager.service
             └─48896 /usr/sbin/NetworkManager --no-daemon

Dec 13 07:10:25 compute-0 systemd[1]: Reloaded Network Manager.
Dec 13 07:22:49 compute-0 NetworkManager[48896]: <info>  [1765610569.8944] manager: (br-int): new Open vSwitch Interface device (/org/freedesktop/NetworkManager/Devices/16)
Dec 13 07:22:49 compute-0 NetworkManager[48896]: <info>  [1765610569.8950] device (br-int)[Open vSwitch Interface]: state change: unmanaged -> unavailable (reason 'managed', managed-type: 'external')
Dec 13 07:22:49 compute-0 NetworkManager[48896]: <warn>  [1765610569.8951] device (br-int)[Open vSwitch Interface]: error setting IPv4 forwarding to '1': No such file or directory
Dec 13 07:22:49 compute-0 NetworkManager[48896]: <info>  [1765610569.8959] manager: (br-int): new Open vSwitch Port device (/org/freedesktop/NetworkManager/Devices/17)
Dec 13 07:22:49 compute-0 NetworkManager[48896]: <info>  [1765610569.8964] manager: (br-int): new Open vSwitch Bridge device (/org/freedesktop/NetworkManager/Devices/18)
Dec 13 07:22:49 compute-0 NetworkManager[48896]: <info>  [1765610569.8967] device (br-int)[Open vSwitch Interface]: state change: unavailable -> disconnected (reason 'none', managed-type: 'full')
Dec 13 07:22:49 compute-0 NetworkManager[48896]: <info>  [1765610569.9149] manager: (ovn-d8e85b-0): new Open vSwitch Port device (/org/freedesktop/NetworkManager/Devices/19)
Dec 13 07:22:49 compute-0 NetworkManager[48896]: <info>  [1765610569.9297] device (genev_sys_6081): carrier: link connected
Dec 13 07:22:49 compute-0 NetworkManager[48896]: <info>  [1765610569.9299] manager: (genev_sys_6081): new Generic device (/org/freedesktop/NetworkManager/Devices/20)

○ nfs-idmapd.service - NFSv4 ID-name mapping service
     Loaded: loaded (/usr/lib/systemd/system/nfs-idmapd.service; static)
     Active: inactive (dead)
       Docs: man:idmapd(8)

○ nfs-mountd.service - NFS Mount Daemon
     Loaded: loaded (/usr/lib/systemd/system/nfs-mountd.service; static)
     Active: inactive (dead)
       Docs: man:rpc.mountd(8)

○ nfs-server.service - NFS server and services
     Loaded: loaded (/usr/lib/systemd/system/nfs-server.service; disabled; preset: disabled)
     Active: inactive (dead)
       Docs: man:rpc.nfsd(8)
             man:exportfs(8)

○ nfs-utils.service - NFS server and client services
     Loaded: loaded (/usr/lib/systemd/system/nfs-utils.service; static)
     Active: inactive (dead)

○ nfsdcld.service - NFSv4 Client Tracking Daemon
     Loaded: loaded (/usr/lib/systemd/system/nfsdcld.service; static)
     Active: inactive (dead)
       Docs: man:nfsdcld(8)

● nftables.service - Netfilter Tables
     Loaded: loaded (/usr/lib/systemd/system/nftables.service; enabled; preset: disabled)
     Active: active (exited) since Sat 2025-12-13 07:11:35 UTC; 25min ago
       Docs: man:nft(8)
   Main PID: 63092 (code=exited, status=0/SUCCESS)
        CPU: 13ms

Dec 13 07:11:35 compute-0 systemd[1]: Starting Netfilter Tables...
Dec 13 07:11:35 compute-0 systemd[1]: Finished Netfilter Tables.

● nis-domainname.service - Read Unit ntpd.service could not be found.
Unit ntpdate.service could not be found.
and set NIS domainname from /etc/sysconfig/network
     Loaded: loaded (/usr/lib/systemd/system/nis-domainname.service; enabled; preset: enabled)
     Active: active (exited) since Sat 2025-12-13 06:42:10 UTC; 54min ago
   Main PID: 649 (code=exited, status=0/SUCCESS)
        CPU: 4ms

Dec 13 06:42:10 localhost systemd[1]: Finished Read and set NIS domainname from /etc/sysconfig/network.

○ nvmefc-boot-connections.service - Auto-connect to subsystems on FC-NVME devices found during boot
     Loaded: loaded (/usr/lib/systemd/system/nvmefc-boot-connections.service; enabled; preset: enabled)
     Active: inactive (dead)

● openvswitch.service - Open vSwitch
     Loaded: loaded (/usr/lib/systemd/system/openvswitch.service; enabled; preset: disabled)
     Active: active (exited) since Sat 2025-12-13 07:09:47 UTC; 27min ago
   Main PID: 47210 (code=exited, status=0/SUCCESS)
        CPU: 1ms

Dec 13 07:09:47 compute-0 systemd[1]: Starting Open vSwitch...
Dec 13 07:09:47 compute-0 systemd[1]: Finished Open vSwitch.

● ovs-delete-transient-ports.service - Open vSwitch Delete Transient Ports
     Loaded: loaded (/usr/lib/systemd/system/ovs-delete-transient-ports.service; static)
     Active: active (exited) since Sat 2025-12-13 07:09:47 UTC; 27min ago
   Main PID: 47148 (code=exited, status=0/SUCCESS)
        CPU: 21ms

Dec 13 07:09:47 compute-0 systemd[1]: Starting Open vSwitch Delete Transient Ports...
Dec 13 07:09:47 compute-0 systemd[1]: Finished Open vSwitch Delete Transient Ports.

● ovs-vswitchd.service - Open vSwitch Forwarding Unit
     Loaded: loaded (/usr/lib/systemd/system/ovs-vswitchd.service; static)
     Active: active (running) since Sat 2025-12-13 07:09:47 UTC; 27min ago
   Main PID: 47201 (ovs-vswitchd)
         IO: 3.4M read, 24.0K written
      Tasks: 8 (limit: 48568)
     Memory: 232.0M (peak: 250.8M)
        CPU: 2.201s
     CGroup: /system.slice/ovs-vswitchd.service
             └─47201 ovs-vswitchd unix:/var/run/openvswitch/db.sock -vconsole:emer -vsyslog:err -vfile:info --mlockall --user openvswitch:hugetlbfs --no-chdir --log-file=/var/log/openvswitch/ovs-vswitchd.log --pidfile=/var/run/openvswitch/ovs-vswitchd.pid --detach

Dec 13 07:09:47 compute-0 systemd[1]: Starting Open vSwitch Forwarding Unit...
Dec 13 07:09:47 compute-0 ovs-ctl[47191]: Inserting openvswitch module [  OK  ]
Dec 13 07:09:47 compute-0 ovs-ctl[47160]: Starting ovs-vswitchd [  OK  ]
Dec 13 07:09:47 compute-0 ovs-ctl[47160]: Enabling remote OVSDB managers [  OK  ]
Dec 13 07:09:47 compute-0 systemd[1]: Started Open vSwitch Forwarding Unit.
Dec 13 07:09:47 compute-0 ovs-vsctl[47209]: ovs|00001|vsctl|INFO|Called as ovs-vsctl --no-wait add Open_vSwitch . external-ids hostname=compute-0

● ovsdb-server.service - Open vSwitch Database Unit
     Loaded: loaded (/usr/lib/systemd/system/ovsdb-server.service; static)
     Active: active (running) since Sat 2025-12-13 07:09:47 UTC; 27min ago
   Main PID: 47120 (ovsdb-server)
         IO: 1.2M read, 96.5K written
      Tasks: 1 (limit: 48568)
     Memory: 4.7M (peak: 38.7M)
        CPU: 603ms
     CGroup: /system.slice/ovsdb-server.service
             └─47120 ovsdb-server /etc/openvswitch/conf.db -vconsole:emer -vsyslog:err -vfile:info --remote=punix:/var/run/openvswitch/db.sock --private-key=db:Open_vSwitch,SSL,private_key --certificate=db:Open_vSwitch,SSL,certificate --bootstrap-ca-cert=db:Open_vSwitch,SSL,ca_cert --user openvswitch:hugetlbfs --no-chdir --log-file=/var/log/openvswitch/ovsdb-server.log --pidfile=/var/run/openvswitch/ovsdb-server.pid --detach

Dec 13 07:09:47 compute-0 chown[47067]: /usr/bin/chown: cannot access '/run/openvswitch': No such file or directory
Dec 13 07:09:47 compute-0 ovs-ctl[47072]: /etc/openvswitch/conf.db does not exist ... (warning).
Dec 13 07:09:47 compute-0 ovs-ctl[47072]: Creating empty database /etc/openvswitch/conf.db [  OK  ]
Dec 13 07:09:47 compute-0 ovs-ctl[47072]: Starting ovsdb-server [  OK  ]
Dec 13 07:09:47 compute-0 ovs-vsctl[47121]: ovs|00001|vsctl|INFO|Called as ovs-vsctl --no-wait -- init -- set Open_vSwitch . db-version=8.5.1
DeUnit pacemaker.service could not be found.
Unit plymouth-quit-wait.service could not be found.
Unit plymouth-start.service could not be found.
Unit power-profiles-daemon.service could not be found.
c 13 07:09:47 compute-0 ovs-vsctl[47141]: ovs|00001|vsctl|INFO|Called as ovs-vsctl --no-wait set Open_vSwitch . ovs-version=3.3.5-115.el9s "external-ids:system-id=\"075cc82e-193d-47f2-a248-9917472f5475\"" "external-ids:rundir=\"/var/run/openvswitch\"" "system-type=\"centos\"" "system-version=\"9\""
Dec 13 07:09:47 compute-0 ovs-ctl[47072]: Configuring Open vSwitch system IDs [  OK  ]
Dec 13 07:09:47 compute-0 ovs-ctl[47072]: Enabling remote OVSDB managers [  OK  ]
Dec 13 07:09:47 compute-0 systemd[1]: Started Open vSwitch Database Unit.
Dec 13 07:09:47 compute-0 ovs-vsctl[47147]: ovs|00001|vsctl|INFO|Called as ovs-vsctl --no-wait add Open_vSwitch . external-ids hostname=compute-0

● polkit.service - Authorization Manager
     Loaded: loaded (/usr/lib/systemd/system/polkit.service; static)
     Active: active (running) since Sat 2025-12-13 07:09:11 UTC; 27min ago
       Docs: man:polkit(8)
   Main PID: 43388 (polkitd)
         IO: 18.9M read, 0B written
      Tasks: 8 (limit: 48568)
     Memory: 24.6M (peak: 26.0M)
        CPU: 841ms
     CGroup: /system.slice/polkit.service
             └─43388 /usr/lib/polkit-1/polkitd --no-debug

Dec 13 07:25:40 compute-0 polkitd[43388]: Collecting garbage unconditionally...
Dec 13 07:25:40 compute-0 polkitd[43388]: Loading rules from directory /etc/polkit-1/rules.d
Dec 13 07:25:40 compute-0 polkitd[43388]: Loading rules from directory /usr/share/polkit-1/rules.d
Dec 13 07:25:40 compute-0 polkitd[43388]: Finished loading, compiling and executing 3 rules
Dec 13 07:26:47 compute-0 polkitd[43388]: Registered Authentication Agent for unix-process:205916:268078 (system bus name :1.2526 [pkttyagent --process 205916 --notify-fd 4 --fallback], object path /org/freedesktop/PolicyKit1/AuthenticationAgent, locale en_US.UTF-8)
Dec 13 07:26:47 compute-0 polkitd[43388]: Unregistered Authentication Agent for unix-process:205916:268078 (system bus name :1.2526, object path /org/freedesktop/PolicyKit1/AuthenticationAgent, locale en_US.UTF-8) (disconnected from bus)
Dec 13 07:26:47 compute-0 polkitd[43388]: Registered Authentication Agent for unix-process:205915:268077 (system bus name :1.2527 [pkttyagent --process 205915 --notify-fd 4 --fallback], object path /org/freedesktop/PolicyKit1/AuthenticationAgent, locale en_US.UTF-8)
Dec 13 07:26:47 compute-0 polkitd[43388]: Unregistered Authentication Agent for unix-process:205915:268077 (system bus name :1.2527, object path /org/freedesktop/PolicyKit1/AuthenticationAgent, locale en_US.UTF-8) (disconnected from bus)
Dec 13 07:26:48 compute-0 polkitd[43388]: Registered Authentication Agent for unix-process:206382:268229 (system bus name :1.2530 [pkttyagent --process 206382 --notify-fd 4 --fallback], object path /org/freedesktop/PolicyKit1/AuthenticationAgent, locale en_US.UTF-8)
Dec 13 07:26:48 compute-0 polkitd[43388]: Unregistered Authentication Agent for unix-process:206382:268229 (system bus name :1.2530, object path /org/freedesktop/PolicyKit1/AuthenticationAgent, locale en_US.UTF-8) (disconnected from bus)

○ raid-check.service - RAID setup health check
     Loaded: loaded (/usr/lib/systemd/system/raid-check.service; static)
     Active: inactive (dead)
TriggeredBy: ○ raid-check.timer

○ rc-local.service - /etc/rc.d/rc.local Compatibility
     Loaded: loaded (/usr/lib/systemd/system/rc-local.service; static)
     Active: inactive (dead)
       Docs: man:systemd-rc-local-generator(8)

○ rescue.service - Rescue Shell
     Loaded: loaded (/usr/lib/systemd/system/rescue.service; static)
     Active: inactive (dead)
       Docs: man:sulogin(8)

○ rpc-gssd.service - RPC security service for NFS client and server
     Loaded: loaded (/usr/lib/systemd/system/rpc-gssd.service; static)
     Active: inactive (dead)
  Condition: start condition failed at Sat 2025-12-13 06:42:11 UTC; 54min ago
       Docs: man:rpc.gssd(8)

Dec 13 06:42:11 np0005558317 systemd[1]: RPC security service for NFS client and server was skipped because of an unmet condition check (ConditionPathExists=/etc/krb5.keytab).

● rpc-statd-notify.service - Notify NFS peers of a rUnit rpc-svcgssd.service could not be found.
estart
     Loaded: loaded (/usr/lib/systemd/system/rpc-statd-notify.service; static)
     Active: active (exited) since Sat 2025-12-13 06:42:16 UTC; 54min ago
       Docs: man:sm-notify(8)
             man:rpc.statd(8)
        CPU: 6ms

Dec 13 06:42:16 np0005558317 systemd[1]: Starting Notify NFS peers of a restart...
Dec 13 06:42:16 np0005558317 sm-notify[961]: Version 2.5.4 starting
Dec 13 06:42:16 np0005558317 systemd[1]: Started Notify NFS peers of a restart.

○ rpc-statd.service - NFS status monitor for NFSv2/3 locking.
     Loaded: loaded (/usr/lib/systemd/system/rpc-statd.service; static)
     Active: inactive (dead)
       Docs: man:rpc.statd(8)

● rpcbind.service - RPC Bind
     Loaded: loaded (/usr/lib/systemd/system/rpcbind.service; enabled; preset: enabled)
     Active: active (running) since Sat 2025-12-13 06:42:10 UTC; 54min ago
TriggeredBy: ● rpcbind.socket
       Docs: man:rpcbind(8)
   Main PID: 671 (rpcbind)
         IO: 0B read, 0B written
      Tasks: 1 (limit: 48568)
     Memory: 2.5M (peak: 3.0M)
        CPU: 21ms
     CGroup: /system.slice/rpcbind.service
             └─671 /usr/bin/rpcbind -w -f

Dec 13 06:42:10 localhost systemd[1]: Starting RPC Bind...
Dec 13 06:42:10 localhost systemd[1]: Started RPC Bind.

● rsyslog.service - System Logging Service
     Loaded: loaded (/usr/lib/systemd/system/rsyslog.service; enabled; preset: enabled)
     Active: active (running) since Sat 2025-12-13 06:42:16 UTC; 54min ago
       Docs: man:rsyslogd(8)
             https://www.rsyslog.com/doc/
   Main PID: 962 (rsyslogd)
         IO: 4.0K read, 12.2M written
      Tasks: 3 (limit: 48568)
     Memory: 13.1M (peak: 15.0M)
        CPU: 4.764s
     CGroup: /system.slice/rsyslog.service
             └─962 /usr/sbin/rsyslogd -n

Dec 13 07:12:37 compute-0 rsyslogd[962]: imjournal: journal files changed, reloading...  [v8.2510.0-2.el9 try https://www.rsyslog.com/e/0 ]
Dec 13 07:15:00 compute-0 rsyslogd[962]: imjournal: journal files changed, reloading...  [v8.2510.0-2.el9 try https://www.rsyslog.com/e/0 ]
Dec 13 07:15:02 compute-0 rsyslogd[962]: message too long (8842) with configured size 8096, begin of message is: [{"container_id": "8e6a4f61ea03", "container_image_digests": ["quay.io/ceph/ceph [v8.2510.0-2.el9 try https://www.rsyslog.com/e/2445 ]
Dec 13 07:24:09 compute-0 rsyslogd[962]: imjournal: journal files changed, reloading...  [v8.2510.0-2.el9 try https://www.rsyslog.com/e/0 ]
Dec 13 07:24:09 compute-0 rsyslogd[962]: imjournal: journal files changed, reloading...  [v8.2510.0-2.el9 try https://www.rsyslog.com/e/0 ]
Dec 13 07:28:44 compute-0 rsyslogd[962]: imjournal: journal files changed, reloading...  [v8.2510.0-2.el9 try https://www.rsyslog.com/e/0 ]
Dec 13 07:29:14 compute-0 rsyslogd[962]: imjournal: journal files changed, reloading...  [v8.2510.0-2.el9 try https://www.rsyslog.com/e/0 ]
Dec 13 07:36:25 compute-0 rsyslogd[962]: imjournal: journal files changed, reloading...  [v8.2510.0-2.el9 try https://www.rsyslog.com/e/0 ]
Dec 13 07:36:29 compute-0 rsyslogd[962]: imjournal: journal files changed, reloading...  [v8.2510.0-2.el9 try https://www.rsyslog.com/e/0 ]
Dec 13 07:36:29 compute-0 rsyslogd[962]: imjournal from <np0005558317:ceph-osd>: begin to drop messages due to rate-limiting

○ selinux-autorelabel-mark.service - Mark the need to relabel after reboot
     Loaded: loaded (/usr/lib/systemd/system/selinux-autorelabel-mark.service; enabled; preset: enabled)
     Active: inactive (dead)
  Condition: start condition failed at Sat 2025-12-13 06:42:10 UTC; 54min ago

Dec 13 06:42:10 localhost systemd[1]: Mark the need to relabel after reboot was skipped because of an unmet condition check (ConditionSecurity=!selinux).

● serial-getty@ttyS0.service - Serial Getty on ttyS0
     Loaded: loaded (/usr/lib/systemd/system/serial-getty@.service; enabled-runtime; preset: disabled)
     Active: active (running) since Sat 2025-12-13 06:42:16 UTC; 54min ago
       Docs: man:agetty(8)
             man:systemd-getty-generator(8)
             http://0pointer.de/blog/projects/serial-console.html
   MaUnit sntp.service could not be found.
Unit sshd-keygen.service could not be found.
in PID: 984 (agetty)
         IO: 0B read, 0B written
      Tasks: 1 (limit: 48568)
     Memory: 296.0K (peak: 552.0K)
        CPU: 6ms
     CGroup: /system.slice/system-serial\x2dgetty.slice/serial-getty@ttyS0.service
             └─984 /sbin/agetty -o "-p -- \\u" --keep-baud 115200,57600,38400,9600 - vt220

Dec 13 06:42:16 np0005558317 systemd[1]: Started Serial Getty on ttyS0.

○ sshd-keygen@ecdsa.service - OpenSSH ecdsa Server Key Generation
     Loaded: loaded (/usr/lib/systemd/system/sshd-keygen@.service; disabled; preset: disabled)
    Drop-In: /usr/lib/systemd/system/sshd-keygen@.service.d
             └─disable-sshd-keygen-if-cloud-init-active.conf
     Active: inactive (dead)
  Condition: start condition failed at Sat 2025-12-13 07:25:43 UTC; 11min ago

Dec 13 06:42:10 localhost systemd[1]: OpenSSH ecdsa Server Key Generation was skipped because of an unmet condition check (ConditionPathExists=!/run/systemd/generator.early/multi-user.target.wants/cloud-init.target).
Dec 13 07:25:43 compute-0 systemd[1]: OpenSSH ecdsa Server Key Generation was skipped because of an unmet condition check (ConditionPathExists=!/run/systemd/generator.early/multi-user.target.wants/cloud-init.target).

○ sshd-keygen@ed25519.service - OpenSSH ed25519 Server Key Generation
     Loaded: loaded (/usr/lib/systemd/system/sshd-keygen@.service; disabled; preset: disabled)
    Drop-In: /usr/lib/systemd/system/sshd-keygen@.service.d
             └─disable-sshd-keygen-if-cloud-init-active.conf
     Active: inactive (dead)
  Condition: start condition failed at Sat 2025-12-13 07:25:43 UTC; 11min ago

Dec 13 06:42:10 localhost systemd[1]: OpenSSH ed25519 Server Key Generation was skipped because of an unmet condition check (ConditionPathExists=!/run/systemd/generator.early/multi-user.target.wants/cloud-init.target).
Dec 13 07:25:43 compute-0 systemd[1]: OpenSSH ed25519 Server Key Generation was skipped because of an unmet condition check (ConditionPathExists=!/run/systemd/generator.early/multi-user.target.wants/cloud-init.target).

○ sshd-keygen@rsa.service - OpenSSH rsa Server Key Generation
     Loaded: loaded (/usr/lib/systemd/system/sshd-keygen@.service; disabled; preset: disabled)
    Drop-In: /usr/lib/systemd/system/sshd-keygen@.service.d
             └─disable-sshd-keygen-if-cloud-init-active.conf
     Active: inactive (dead)
  Condition: start condition failed at Sat 2025-12-13 07:25:43 UTC; 11min ago

Dec 13 06:42:10 localhost systemd[1]: OpenSSH rsa Server Key Generation was skipped because of an unmet condition check (ConditionPathExists=!/run/systemd/generator.early/multi-user.target.wants/cloud-init.target).
Dec 13 07:25:43 compute-0 systemd[1]: OpenSSH rsa Server Key Generation was skipped because of an unmet condition check (ConditionPathExists=!/run/systemd/generator.early/multi-user.target.wants/cloud-init.target).

● sshd.service - OpenSSH server daemon
     Loaded: loaded (/usr/lib/systemd/system/sshd.service; enabled; preset: enabled)
     Active: active (running) since Sat 2025-12-13 07:25:43 UTC; 11min ago
       Docs: man:sshd(8)
             man:sshd_config(5)
   Main PID: 179217 (sshd)
         IO: 0B read, 0B written
      Tasks: 1 (limit: 48568)
     Memory: 3.4M (peak: 7.1M)
        CPU: 90ms
     CGroup: /system.slice/sshd.service
             └─179217 "sshd: /usr/sbin/sshd -D [listener] 0 of 10-100 startups"

Dec 13 07:25:43 compute-0 sshd[179217]: Server listening on 0.0.0.0 port 22.
Dec 13 07:25:43 compute-0 sshd[179217]: Server listening on :: port 22.
Dec 13 07:25:43 compute-0 systemd[1]: Started OpenSSH server daemon.
Dec 13 07:27:12 compute-0 sshd-session[212092]: Accepted publickey for zuul from 192.168.122.30 port 52180 ssh2: ECDSA SHA256:7hwsPrzEGvjNfXCD1S+7z6QhqAHn2HFxvvV5rKQhgY8
Dec 13 07:27:12 compute-0 sshd-session[212092]: pam_unix(sshd:session): session opened for user zuul(uid=1000) by zuul(uid=0)
Dec 13 07:28:45 compute-0 sshd-session[234740]: Accepted publickey for zuul from 192.168.122.30 port 55228 ssh2: ECDSA SHA256:7hwsPrzEGvjNfXCD1S+7z6QhqAHn2HFxvvV5rKQhgY8
Dec 13 07Unit syslog.service could not be found.
:28:45 compute-0 sshd-session[234740]: pam_unix(sshd:session): session opened for user zuul(uid=1000) by zuul(uid=0)
Dec 13 07:28:45 compute-0 sshd-session[234740]: pam_unix(sshd:session): session closed for user zuul
Dec 13 07:36:08 compute-0 sshd-session[246077]: Accepted publickey for zuul from 192.168.122.10 port 33054 ssh2: ECDSA SHA256:7hwsPrzEGvjNfXCD1S+7z6QhqAHn2HFxvvV5rKQhgY8
Dec 13 07:36:09 compute-0 sshd-session[246077]: pam_unix(sshd:session): session opened for user zuul(uid=1000) by zuul(uid=0)

○ sssd-kcm.service - SSSD Kerberos Cache Manager
     Loaded: loaded (/usr/lib/systemd/system/sssd-kcm.service; indirect; preset: disabled)
     Active: inactive (dead)
TriggeredBy: ● sssd-kcm.socket
       Docs: man:sssd-kcm(5)

○ sssd.service - System Security Services Daemon
     Loaded: loaded (/usr/lib/systemd/system/sssd.service; enabled; preset: enabled)
     Active: inactive (dead)
  Condition: start condition failed at Sat 2025-12-13 06:42:10 UTC; 54min ago

Dec 13 06:42:10 localhost systemd[1]: System Security Services Daemon was skipped because no trigger condition checks were met.

○ sysstat-collect.service - system activity accounting tool
     Loaded: loaded (/usr/lib/systemd/system/sysstat-collect.service; static)
     Active: inactive (dead)
TriggeredBy: ○ sysstat-collect.timer
       Docs: man:sa1(8)

○ sysstat-summary.service - Generate a daily summary of process accounting
     Loaded: loaded (/usr/lib/systemd/system/sysstat-summary.service; static)
     Active: inactive (dead)
TriggeredBy: ○ sysstat-summary.timer
       Docs: man:sa2(8)

○ sysstat.service - Resets System Activity Logs
     Loaded: loaded (/usr/lib/systemd/system/sysstat.service; enabled; preset: enabled)
     Active: inactive (dead)

○ systemd-ask-password-console.service - Dispatch Password Requests to Console
     Loaded: loaded (/usr/lib/systemd/system/systemd-ask-password-console.service; static)
     Active: inactive (dead)
TriggeredBy: ● systemd-ask-password-console.path
       Docs: man:systemd-ask-password-console.service(8)

○ systemd-ask-password-wall.service - Forward Password Requests to Wall
     Loaded: loaded (/usr/lib/systemd/system/systemd-ask-password-wall.service; static)
     Active: inactive (dead)
TriggeredBy: ● systemd-ask-password-wall.path
       Docs: man:systemd-ask-password-wall.service(8)

○ systemd-binfmt.service - Set Up Additional Binary Formats
     Loaded: loaded (/usr/lib/systemd/system/systemd-binfmt.service; static)
     Active: inactive (dead)
  Condition: start condition failed at Sat 2025-12-13 06:42:10 UTC; 54min ago
       Docs: man:systemd-binfmt.service(8)
             man:binfmt.d(5)
             https://docs.kernel.org/admin-guide/binfmt-misc.html
             https://www.freedesktop.org/wiki/Software/systemd/APIFileSystems

Dec 13 06:42:10 localhost systemd[1]: Set Up Additional Binary Formats was skipped because no trigger condition checks were met.

○ systemd-boot-random-seed.service - Update Boot Loader Random Seed
     Loaded: loaded (/usr/lib/systemd/system/systemd-boot-random-seed.service; static)
     Active: inactive (dead)
  Condition: start condition failed at Sat 2025-12-13 06:42:10 UTC; 54min ago
       Docs: man:systemd-boot-random-seed.service(8)
             man:random(4)

Dec 13 06:42:10 localhost systemd[1]: Update Boot Loader Random Seed was skipped because no trigger condition checks were met.

● systemd-boot-update.service - Automatic Boot Loader Update
     Loaded: loaded (/usr/lib/systemd/system/systemd-boot-update.service; enabled; preset: enabled)
     Active: active (exited) since Sat 2025-12-13 06:42:10 UTC; 54min ago
       Docs: man:bootctl(1)
   Main PID: 667 (code=exited, status=0/SUCCESS)
        CPU: 7ms

Dec 13 06:42:10 localhost systemd[1]: Starting Automatic Boot Loader Update...
Dec 13 06:42:10 localhost bootctl[667]: Couldn't find EFI system partition, skipping.
Dec 13 06:42:10 localhost systemd[1]: Finished Automatic Boot Loader Update.

○ systemd-firstboot.service - First Boot Wizard
     Loaded: loaded (/usr/lib/systemd/system/systemd-firstboot.service; static)
     Active: inactive (dead)
  Condition: start condition failed at Sat 2025-12-13 06:42:10 UTC; 54min ago
       Docs: man:systemd-firstboot(1)

Dec 13 06:42:10 localhost systemd[1]: First Boot Wizard was skipped because of an unmet condition check (ConditionFirstBoot=yes).

○ systemd-fsck-root.service - File System Check on Root Device
     Loaded: loaded (/usr/lib/systemd/system/systemd-fsck-root.service; static)
     Active: inactive (dead) since Sat 2025-12-13 06:42:10 UTC; 54min ago
   Duration: 1.174s
       Docs: man:systemd-fsck-root.service(8)
   Main PID: 522 (code=exited, status=0/SUCCESS)
        CPU: 11ms

Dec 13 06:42:09 localhost systemd[1]: Starting File System Check on /dev/disk/by-uuid/cbdedf45-ed1d-4952-82a8-33a12c0ba266...
Dec 13 06:42:09 localhost systemd-fsck[524]: /usr/sbin/fsck.xfs: XFS file system.
Dec 13 06:42:09 localhost systemd[1]: Finished File System Check on /dev/disk/by-uuid/cbdedf45-ed1d-4952-82a8-33a12c0ba266.

● systemd-hostnamed.service - Hostname Service
     Loaded: loaded (/usr/lib/systemd/system/systemd-hostnamed.service; static)
    Drop-In: /usr/lib/systemd/system/systemd-hostnamed.service.d
             └─disable-privatedevices.conf
     Active: active (running) since Sat 2025-12-13 07:36:36 UTC; 14s ago
       Docs: man:systemd-hostnamed.service(8)
             man:hostname(5)
             man:machine-info(5)
             man:org.freedesktop.resolve1(5)
   Main PID: 249805 (systemd-hostnam)
         IO: 8.0K read, 0B written
      Tasks: 1 (limit: 48568)
     Memory: 2.7M (peak: 3.7M)
        CPU: 83ms
     CGroup: /system.slice/systemd-hostnamed.service
             └─249805 /usr/lib/systemd/systemd-hostnamed

Dec 13 07:36:36 compute-0 systemd[1]: Starting Hostname Service...
Dec 13 07:36:36 compute-0 systemd[1]: Started Hostname Service.

● systemd-hwdb-update.service - Rebuild Hardware Database
     Loaded: loaded (/usr/lib/systemd/system/systemd-hwdb-update.service; static)
     Active: active (exited) since Sat 2025-12-13 06:42:10 UTC; 54min ago
       Docs: man:hwdb(7)
             man:systemd-hwdb(8)
   Main PID: 659 (code=exited, status=0/SUCCESS)
        CPU: 389ms

Dec 13 06:42:10 localhost systemd[1]: Starting Rebuild Hardware Database...
Dec 13 06:42:10 localhost systemd[1]: Finished Rebuild Hardware Database.

○ systemd-initctl.service - initctl Compatibility Daemon
     Loaded: loaded (/usr/lib/systemd/system/systemd-initctl.service; static)
     Active: inactive (dead)
TriggeredBy: ● systemd-initctl.socket
       Docs: man:systemd-initctl.service(8)

● systemd-journal-catalog-update.service - Rebuild Journal Catalog
     Loaded: loaded (/usr/lib/systemd/system/systemd-journal-catalog-update.service; static)
     Active: active (exited) since Sat 2025-12-13 06:42:10 UTC; 54min ago
       Docs: man:systemd-journald.service(8)
             man:journald.conf(5)
   Main PID: 672 (code=exited, status=0/SUCCESS)
        CPU: 14ms

Dec 13 06:42:10 localhost systemd[1]: Starting Rebuild Journal Catalog...
Dec 13 06:42:10 localhost systemd[1]: Finished Rebuild Journal Catalog.

● systemd-journal-flush.service - Flush Journal to Persistent Storage
     Loaded: loaded (/usr/lib/systemd/system/systemd-journal-flush.service; static)
     Active: active (exited) since Sat 2025-12-13 06:42:10 UTC; 54min ago
       Docs: man:systemd-journald.service(8)
             man:journald.conf(5)
   Main PID: 660 (code=exited, status=0/SUCCESS)
        CPU: 9ms

Dec 13 06:42:10 localhost systemd[1]: Starting Flush Journal to Persistent Storage...
Dec 13 06:42:10 localhost systemd[1]: Finished Flush Journal to Persistent Storage.

● systemd-journald.service - Journal Service
     Loaded: loaded (/usr/lib/systemd/system/systemd-journald.service; static)
     Active: active (running) since Sat 2025-12-13 06:42:10 UTC; 54min ago
TriggeredBy: ● systemd-journald.socket
             ● systemd-journald-dev-log.socket
       Docs: man:systemd-journald.service(8)
             man:journald.conf(5)
   Main PID: 650 (systemd-journal)
     Status: "Processing requests..."
         IO: 0B read, 0B written
      Tasks: 1 (limit: 48568)
     Memory: 63.7M (peak: 64.2M)
        CPU: 4.949s
     CGroup: /system.slice/systemd-journald.service
             └─650 /usr/lib/systemd/systemd-journald

Dec 13 06:42:10 localhost systemd-journald[650]: Journal started
Dec 13 06:42:10 localhost systemd-journald[650]: Runtime Journal (/run/log/journal/64f1d6692049d8be5e8b216cc203502c) is 8.0M, max 153.6M, 145.6M free.
Dec 13 06:42:10 localhost systemd[1]: systemd-journald.service: Deactivated successfully.
Dec 13 06:42:10 localhost systemd-journald[650]: Runtime Journal (/run/log/journal/64f1d6692049d8be5e8b216cc203502c) is 8.0M, max 153.6M, 145.6M free.
Dec 13 06:42:10 localhost systemd-journald[650]: Received client request to flush runtime journal.

● systemd-logind.service - User Login Management
     Loaded: loaded (/usr/lib/systemd/system/systemd-logind.service; static)
    Drop-In: /usr/lib/systemd/system/systemd-logind.service.d
             └─10-grub2-logind-service.conf
     Active: active (running) since Sat 2025-12-13 06:42:10 UTC; 54min ago
       Docs: man:sd-login(3)
             man:systemd-logind.service(8)
             man:logind.conf(5)
             man:org.freedesktop.login1(5)
   Main PID: 745 (systemd-logind)
     Status: "Processing requests..."
         IO: 4.0K read, 0B written
      Tasks: 1 (limit: 48568)
     Memory: 6.3M (peak: 6.8M)
        CPU: 2.044s
     CGroup: /system.slice/systemd-logind.service
             └─745 /usr/lib/systemd/systemd-logind

Dec 13 07:27:07 compute-0 systemd-logind[745]: Removed session 51.
Dec 13 07:27:12 compute-0 systemd-logind[745]: New session 52 of user zuul.
Dec 13 07:28:11 compute-0 systemd-logind[745]: Watching system buttons on /dev/input/event0 (Power Button)
Dec 13 07:28:11 compute-0 systemd-logind[745]: Watching system buttons on /dev/input/event1 (AT Translated Set 2 keyboard)
Dec 13 07:28:45 compute-0 systemd-logind[745]: New session 53 of user zuul.
Dec 13 07:28:45 compute-0 systemd-logind[745]: Session 53 logged out. Waiting for processes to exit.
Dec 13 07:28:45 compute-0 systemd-logind[745]: Removed session 53.
Dec 13 07:29:18 compute-0 systemd-logind[745]: Session 52 logged out. Waiting for processes to exit.
Dec 13 07:29:18 compute-0 systemd-logind[745]: Removed session 52.
Dec 13 07:36:09 compute-0 systemd-logind[745]: New session 54 of user zuul.

○ systemd-machine-id-commit.service - Commit a transient machine-id on disk
     Loaded: loaded (/usr/lib/systemd/system/systemd-machine-id-commit.service; static)
     Active: inactive (dead)
  Condition: start condition failed at Sat 2025-12-13 06:42:10 UTC; 54min ago
       Docs: man:systemd-machine-id-commit.service(8)

Dec 13 06:42:10 localhost systemd[1]: Commit a transient machine-id on disk was skipped because of an unmet condition check (ConditionPathIsMountPoint=/etc/machine-id).

● systemd-machined.service - Virtual Machine and Container Registration Service
     Loaded: loaded (/usr/lib/systemd/system/systemd-machined.service; static)
     Active: active (running) since Sat 2025-12-13 07:26:42 UTC; 10min ago
       Docs: man:systemd-machined.service(8)
             man:org.freedesktop.machine1(5)
   Main PID: 204630 (systemd-machine)
     Status: "Processing requests..."
         IO: 0B read, 0B written
      Tasks: 1 (limit: 48568)
     Memory: 1.4M (peak: 1.7M)
        CPU: 454ms
     CGroup: /system.slice/systemd-machined.service
             └─204630 /usr/lib/systemd/systemd-machined

Dec 13 07:26:42 compute-0 systemd[1]: Starting Virtual Machine and Container Registration Service...
Dec 13 07:26:42 compute-0 systemd[1]: Started Virtual Machine and Container Registration Service.

● systemd-modules-load.service - Load Kernel Modules
     Loaded: loaded (/usr/lib/systemd/system/systemd-modules-load.service; static)
     Active: active (exited) since Sat 2025-12-13 07:28:08 UTC; 8min ago
       Docs: man:systemd-modules-load.service(8)
             man:modules-load.d(5)
   Main PID: 224873 (code=exUnit systemd-networkd-wait-online.service could not be found.
ited, status=0/SUCCESS)
        CPU: 9ms

Dec 13 07:28:08 compute-0 systemd[1]: Starting Load Kernel Modules...
Dec 13 07:28:08 compute-0 systemd[1]: Finished Load Kernel Modules.

● systemd-network-generator.service - Generate network units from Kernel command line
     Loaded: loaded (/usr/lib/systemd/system/systemd-network-generator.service; enabled; preset: enabled)
     Active: active (exited) since Sat 2025-12-13 06:42:10 UTC; 54min ago
       Docs: man:systemd-network-generator.service(8)
   Main PID: 651 (code=exited, status=0/SUCCESS)
        CPU: 7ms

Dec 13 06:42:10 localhost systemd[1]: Finished Generate network units from Kernel command line.

○ systemd-pcrmachine.service - TPM2 PCR Machine ID Measurement
     Loaded: loaded (/usr/lib/systemd/system/systemd-pcrmachine.service; static)
     Active: inactive (dead)
  Condition: start condition failed at Sat 2025-12-13 06:42:10 UTC; 54min ago
       Docs: man:systemd-pcrmachine.service(8)

○ systemd-pcrphase-initrd.service - TPM2 PCR Barrier (initrd)
     Loaded: loaded (/usr/lib/systemd/system/systemd-pcrphase-initrd.service; static)
     Active: inactive (dead)
       Docs: man:systemd-pcrphase-initrd.service(8)

○ systemd-pcrphase-sysinit.service - TPM2 PCR Barrier (Initialization)
     Loaded: loaded (/usr/lib/systemd/system/systemd-pcrphase-sysinit.service; static)
     Active: inactive (dead)
  Condition: start condition failed at Sat 2025-12-13 06:42:10 UTC; 54min ago
       Docs: man:systemd-pcrphase-sysinit.service(8)

Dec 13 06:42:10 localhost systemd[1]: TPM2 PCR Barrier (Initialization) was skipped because of an unmet condition check (ConditionPathExists=/sys/firmware/efi/efivars/StubPcrKernelImage-4a67b082-0a4c-41cf-b6c7-440b29bb8c4f).

○ systemd-pcrphase.service - TPM2 PCR Barrier (User)
     Loaded: loaded (/usr/lib/systemd/system/systemd-pcrphase.service; static)
     Active: inactive (dead)
  Condition: start condition failed at Sat 2025-12-13 06:42:11 UTC; 54min ago
       Docs: man:systemd-pcrphase.service(8)

Dec 13 06:42:11 np0005558317 systemd[1]: TPM2 PCR Barrier (User) was skipped because of an unmet condition check (ConditionPathExists=/sys/firmware/efi/efivars/StubPcrKernelImage-4a67b082-0a4c-41cf-b6c7-440b29bb8c4f).

○ systemd-pstore.service - Platform Persistent Storage Archival
     Loaded: loaded (/usr/lib/systemd/system/systemd-pstore.service; enabled; preset: enabled)
     Active: inactive (dead)
  Condition: start condition failed at Sat 2025-12-13 06:42:10 UTC; 54min ago
       Docs: man:systemd-pstore(8)

Dec 13 06:42:10 localhost systemd[1]: Platform Persistent Storage Archival was skipped because of an unmet condition check (ConditionDirectoryNotEmpty=/sys/fs/pstore).

● systemd-random-seed.service - Load/Save OS Random Seed
     Loaded: loaded (/usr/lib/systemd/system/systemd-random-seed.service; static)
     Active: active (exited) since Sat 2025-12-13 06:42:10 UTC; 54min ago
       Docs: man:systemd-random-seed.service(8)
             man:random(4)
   Main PID: 661 (code=exited, status=0/SUCCESS)
        CPU: 7ms

Dec 13 06:42:10 localhost systemd[1]: Starting Load/Save OS Random Seed...
Dec 13 06:42:10 localhost systemd[1]: Finished Load/Save OS Random Seed.

● systemd-remount-fs.service - Remount Root and Kernel File Systems
     Loaded: loaded (/usr/lib/systemd/system/systemd-remount-fs.service; enabled-runtime; preset: disabled)
     Active: active (exited) since Sat 2025-12-13 06:42:10 UTC; 54min ago
       Docs: man:systemd-remount-fs.service(8)
             https://www.freedesktop.org/wiki/Software/systemd/APIFileSystems
   Main PID: 652 (code=exited, status=0/SUCCESS)
        CPU: 9ms

Dec 13 06:42:10 localhost systemd[1]: Finished Remount Root and Kernel File Systems.

○ systemd-repart.service - Repartition Root Disk
     Loaded: loaded (/usr/lib/systemd/system/systemd-repart.service; static)
     Active: inactive (dead)
  Condition: start condition failed at Sat 2025-12-13 06:42:10 UTC; 54min ago
       Docs: man:systemd-repart.service(8)

○ systemd-rfkill.service - Load/Save RF Kill Switch SUnit systemd-timesyncd.service could not be found.
Unit systemd-tmpfiles.service could not be found.
tatus
     Loaded: loaded (/usr/lib/systemd/system/systemd-rfkill.service; static)
     Active: inactive (dead)
TriggeredBy: ● systemd-rfkill.socket
       Docs: man:systemd-rfkill.service(8)

● systemd-sysctl.service - Apply Kernel Variables
     Loaded: loaded (/usr/lib/systemd/system/systemd-sysctl.service; static)
     Active: active (exited) since Sat 2025-12-13 07:09:19 UTC; 27min ago
       Docs: man:systemd-sysctl.service(8)
             man:sysctl.d(5)
   Main PID: 44871 (code=exited, status=0/SUCCESS)
        CPU: 14ms

Dec 13 07:09:19 compute-0 systemd[1]: Starting Apply Kernel Variables...
Dec 13 07:09:19 compute-0 systemd[1]: Finished Apply Kernel Variables.

○ systemd-sysext.service - Merge System Extension Images into /usr/ and /opt/
     Loaded: loaded (/usr/lib/systemd/system/systemd-sysext.service; disabled; preset: disabled)
     Active: inactive (dead)
       Docs: man:systemd-sysext.service(8)

● systemd-sysusers.service - Create System Users
     Loaded: loaded (/usr/lib/systemd/system/systemd-sysusers.service; static)
     Active: active (exited) since Sat 2025-12-13 06:42:10 UTC; 54min ago
       Docs: man:sysusers.d(5)
             man:systemd-sysusers.service(8)
   Main PID: 662 (code=exited, status=0/SUCCESS)
        CPU: 14ms

Dec 13 06:42:10 localhost systemd[1]: Starting Create System Users...
Dec 13 06:42:10 localhost systemd[1]: Finished Create System Users.

○ systemd-tmpfiles-clean.service - Cleanup of Temporary Directories
     Loaded: loaded (/usr/lib/systemd/system/systemd-tmpfiles-clean.service; static)
     Active: inactive (dead) since Sat 2025-12-13 06:57:37 UTC; 39min ago
TriggeredBy: ● systemd-tmpfiles-clean.timer
       Docs: man:tmpfiles.d(5)
             man:systemd-tmpfiles(8)
   Main PID: 30900 (code=exited, status=0/SUCCESS)
        CPU: 36ms

Dec 13 06:57:37 compute-0 systemd[1]: Starting Cleanup of Temporary Directories...
Dec 13 06:57:37 compute-0 systemd[1]: systemd-tmpfiles-clean.service: Deactivated successfully.
Dec 13 06:57:37 compute-0 systemd[1]: Finished Cleanup of Temporary Directories.

● systemd-tmpfiles-setup-dev.service - Create Static Device Nodes in /dev
     Loaded: loaded (/usr/lib/systemd/system/systemd-tmpfiles-setup-dev.service; static)
     Active: active (exited) since Sat 2025-12-13 06:42:10 UTC; 54min ago
       Docs: man:tmpfiles.d(5)
             man:systemd-tmpfiles(8)
   Main PID: 664 (code=exited, status=0/SUCCESS)
        CPU: 29ms

Dec 13 06:42:10 localhost systemd[1]: Starting Create Static Device Nodes in /dev...
Dec 13 06:42:10 localhost systemd[1]: Finished Create Static Device Nodes in /dev.

● systemd-tmpfiles-setup.service - Create Volatile Files and Directories
     Loaded: loaded (/usr/lib/systemd/system/systemd-tmpfiles-setup.service; static)
     Active: active (exited) since Sat 2025-12-13 06:42:10 UTC; 54min ago
       Docs: man:tmpfiles.d(5)
             man:systemd-tmpfiles(8)
   Main PID: 668 (code=exited, status=0/SUCCESS)
        CPU: 73ms

Dec 13 06:42:10 localhost systemd[1]: Starting Create Volatile Files and Directories...
Dec 13 06:42:10 localhost systemd[1]: Finished Create Volatile Files and Directories.

○ systemd-udev-settle.service - Wait for udev To Complete Device Initialization
     Loaded: loaded (/usr/lib/systemd/system/systemd-udev-settle.service; static)
     Active: inactive (dead)
       Docs: man:systemd-udev-settle.service(8)

● systemd-udev-trigger.service - Coldplug All udev Devices
     Loaded: loaded (/usr/lib/systemd/system/systemd-udev-trigger.service; static)
    Drop-In: /usr/lib/systemd/system/systemd-udev-trigger.service.d
             └─systemd-udev-trigger-no-reload.conf
     Active: active (exited) since Sat 2025-12-13 06:42:10 UTC; 54min ago
       Docs: man:udev(7)
             man:systemd-udevd.service(8)
   Main PID: 654 (code=exited, status=0/SUCCESS)
        CPU: 96ms

Dec 13 06:42:10 localhost systemd[1]: Finished Coldplug All udev Devices.

● systemd-udevd.service - Rule-based Manager for Device Events and Files
     Loaded: loaded (/usr/lib/systemd/system/systemd-udevd.service; static)
     Active: active (running) since Sat 2025-12-13 06:42:10 UTC; 54min ago
TriggeredBy: ● systemd-udevd-kernel.socket
             ● systemd-udevd-control.socket
       Docs: man:systemd-udevd.service(8)
             man:udev(7)
   Main PID: 701 (systemd-udevd)
     Status: "Processing with 24 children at max"
         IO: 175.1M read, 99.8M written
      Tasks: 1
     Memory: 52.8M (peak: 88.8M)
        CPU: 6.213s
     CGroup: /system.slice/systemd-udevd.service
             └─udev
               └─701 /usr/lib/systemd/systemd-udevd

Dec 13 07:36:20 compute-0 lvm[246726]: PV /dev/loop5 online, VG ceph_vg2 is complete.
Dec 13 07:36:20 compute-0 lvm[246726]: VG ceph_vg2 finished
Dec 13 07:36:20 compute-0 lvm[246756]: PV /dev/loop4 online, VG ceph_vg1 is complete.
Dec 13 07:36:20 compute-0 lvm[246756]: VG ceph_vg1 finished
Dec 13 07:36:37 compute-0 lvm[250082]: PV /dev/loop5 online, VG ceph_vg2 is complete.
Dec 13 07:36:37 compute-0 lvm[250082]: VG ceph_vg2 finished
Dec 13 07:36:37 compute-0 lvm[250077]: PV /dev/loop3 online, VG ceph_vg0 is complete.
Dec 13 07:36:37 compute-0 lvm[250077]: VG ceph_vg0 finished
Dec 13 07:36:37 compute-0 lvm[250081]: PV /dev/loop4 online, VG ceph_vg1 is complete.
Dec 13 07:36:37 compute-0 lvm[250081]: VG ceph_vg1 finished

● systemd-update-done.service - Update is Completed
     Loaded: loaded (/usr/lib/systemd/system/systemd-update-done.service; static)
     Active: active (exited) since Sat 2025-12-13 06:42:10 UTC; 54min ago
       Docs: man:systemd-update-done.service(8)
   Main PID: 702 (code=exited, status=0/SUCCESS)
        CPU: 9ms

Dec 13 06:42:10 localhost systemd[1]: Starting Update is Completed...
Dec 13 06:42:10 localhost systemd[1]: Finished Update is Completed.

○ systemd-update-utmp-runlevel.service - Record Runlevel Change in UTMP
     Loaded: loaded (/usr/lib/systemd/system/systemd-update-utmp-runlevel.service; static)
     Active: inactive (dead) since Sat 2025-12-13 06:42:17 UTC; 54min ago
       Docs: man:systemd-update-utmp-runlevel.service(8)
             man:utmp(5)
   Main PID: 989 (code=exited, status=0/SUCCESS)
        CPU: 6ms

Dec 13 06:42:16 np0005558317 systemd[1]: Starting Record Runlevel Change in UTMP...
Dec 13 06:42:17 np0005558317 systemd[1]: systemd-update-utmp-runlevel.service: Deactivated successfully.
Dec 13 06:42:17 np0005558317 systemd[1]: Finished Record Runlevel Change in UTMP.

● systemd-update-utmp.service - Record System Boot/Shutdown in UTMP
     Loaded: loaded (/usr/lib/systemd/system/systemd-update-utmp.service; static)
     Active: active (exited) since Sat 2025-12-13 06:42:10 UTC; 54min ago
       Docs: man:systemd-update-utmp.service(8)
             man:utmp(5)
   Main PID: 700 (code=exited, status=0/SUCCESS)
        CPU: 6ms

Dec 13 06:42:10 localhost systemd[1]: Starting Record System Boot/Shutdown in UTMP...
Dec 13 06:42:10 localhost systemd[1]: Finished Record System Boot/Shutdown in UTMP.

● systemd-user-sessions.service - Permit User Sessions
     Loaded: loaded (/usr/lib/systemd/system/systemd-user-sessions.service; static)
     Active: active (exited) since Sat 2025-12-13 06:42:16 UTC; 54min ago
       Docs: man:systemd-user-sessions.service(8)
   Main PID: 964 (code=exited, status=0/SUCCESS)
        CPU: 9ms

Dec 13 06:42:16 np0005558317 systemd[1]: Starting Permit User Sessions...
Dec 13 06:42:16 np0005558317 systemd[1]: Finished Permit User Sessions.

○ systemd-vconsole-setup.service - Setup Virtual Console
     Loaded: loaded (/usr/lib/systemd/system/systemd-vconsole-setup.service; static)
     Active: inactive (dead) since Sat 2025-12-13 06:42:09 UTC; 54min ago
   Duration: 1.518s
       Docs: man:systemd-vconsole-setup.service(8)
             man:vconsole.conf(5)
   Main PID: 286 (code=exited, status=0/SUCCESS)
        CPU: 170ms

Dec 13 06:42:08 localhost systemd[1]: Finished Setup Virtual Console.
Dec 13 06:42:09 localhost systemd[1]: systemd-vconsole-setup.service: Deactivated successfully.
Dec 13 06:42:09 localhost systemd[1]: Stopped Setup Virtual Console.
Notice: jouUnit tlp.service could not be found.
rnal has been rotated since unit was started, output may be incomplete.

● tuned.service - Dynamic System Tuning Daemon
     Loaded: loaded (/usr/lib/systemd/system/tuned.service; enabled; preset: enabled)
     Active: active (running) since Sat 2025-12-13 07:18:59 UTC; 17min ago
       Docs: man:tuned(8)
             man:tuned.conf(5)
             man:tuned-adm(8)
   Main PID: 105788 (tuned)
         IO: 0B read, 0B written
      Tasks: 4 (limit: 48568)
     Memory: 13.7M (peak: 13.9M)
        CPU: 316ms
     CGroup: /system.slice/tuned.service
             └─105788 /usr/bin/python3 -Es /usr/sbin/tuned -l -P

Dec 13 07:18:59 compute-0 systemd[1]: Starting Dynamic System Tuning Daemon...
Dec 13 07:18:59 compute-0 systemd[1]: Started Dynamic System Tuning Daemon.

○ unbound-anchor.service - update of the root trust anchor for DNSSEC validation in unbound
     Loaded: loaded (/usr/lib/systemd/system/unbound-anchor.service; static)
     Active: inactive (dead)
TriggeredBy: ● unbound-anchor.timer
       Docs: man:unbound-anchor(8)

● user-runtime-dir@1000.service - User Runtime Directory /run/user/1000
     Loaded: loaded (/usr/lib/systemd/system/user-runtime-dir@.service; static)
     Active: active (exited) since Sat 2025-12-13 06:42:43 UTC; 54min ago
       Docs: man:user@.service(5)
   Main PID: 4372 (code=exited, status=0/SUCCESS)
        CPU: 10ms

Dec 13 06:42:43 np0005558317 systemd[1]: Starting User Runtime Directory /run/user/1000...
Dec 13 06:42:43 np0005558317 systemd[1]: Finished User Runtime Directory /run/user/1000.

● user-runtime-dir@42477.service - User Runtime Directory /run/user/42477
     Loaded: loaded (/usr/lib/systemd/system/user-runtime-dir@.service; static)
     Active: active (exited) since Sat 2025-12-13 07:13:43 UTC; 23min ago
       Docs: man:user@.service(5)
   Main PID: 76209 (code=exited, status=0/SUCCESS)
        CPU: 10ms

Dec 13 07:13:43 compute-0 systemd[1]: Starting User Runtime Directory /run/user/42477...
Dec 13 07:13:43 compute-0 systemd[1]: Finished User Runtime Directory /run/user/42477.

● user@1000.service - User Manager for UID 1000
     Loaded: loaded (/usr/lib/systemd/system/user@.service; static)
    Drop-In: /usr/lib/systemd/system/user@.service.d
             └─10-login-barrier.conf
     Active: active (running) since Sat 2025-12-13 06:42:43 UTC; 54min ago
       Docs: man:user@.service(5)
   Main PID: 4373 (systemd)
     Status: "Ready."
         IO: 676.0K read, 8.0K written
      Tasks: 5
     Memory: 7.3M (peak: 14.9M)
        CPU: 1.705s
     CGroup: /user.slice/user-1000.slice/user@1000.service
             ├─app.slice
             │ └─dbus-broker.service
             │   ├─14044 /usr/bin/dbus-broker-launch --scope user
             │   └─14045 dbus-broker --log 4 --controller 9 --machine-id 64f1d6692049d8be5e8b216cc203502c --max-bytes 100000000000000 --max-fds 25000000000000 --max-matches 5000000000
             ├─init.scope
             │ ├─4373 /usr/lib/systemd/systemd --user
             │ └─4375 "(sd-pam)"
             └─user.slice
               └─podman-pause-1c7804ac.scope
                 └─14027 catatonit -P

Dec 13 06:51:16 np0005558317 dbus-broker-launch[14044]: Policy to allow eavesdropping in /usr/share/dbus-1/session.conf +31: Eavesdropping is deprecated and ignored
Dec 13 06:51:16 np0005558317 dbus-broker-launch[14044]: Policy to allow eavesdropping in /usr/share/dbus-1/session.conf +33: Eavesdropping is deprecated and ignored
Dec 13 06:51:16 np0005558317 systemd[4373]: Started D-Bus User Message Bus.
Dec 13 06:51:16 np0005558317 dbus-broker-lau[14044]: Ready
Dec 13 06:51:16 np0005558317 systemd[4373]: selinux: avc:  op=load_policy lsm=selinux seqno=6 res=1
Dec 13 06:51:16 np0005558317 systemd[4373]: Created slice Slice /user.
Dec 13 06:51:16 np0005558317 systemd[4373]: podman-14022.scope: unit configures an IP firewall, but not running as root.
Dec 13 06:51:16 np0005558317 systemd[4373]: (This warning is only shown for the first unit using IP firewalling.)
Dec 13 06:51:16 np0005558317 systemd[4373]: Started podman-14022.scope.
Dec 13 06:51:17 np0005558317 systemd[4373]: Started podman-pause-1c7804ac.scope.

● user@42477.service - User Manager for UID 42477
     Loaded: loaded (/usr/lib/systemd/system/user@.service; static)
    Drop-In: /usr/lib/systemd/system/user@.service.d
             └─10-login-barrier.conf
     Active: active (running) since Sat 2025-12-13 07:13:43 UTC; 23min ago
       Docs: man:user@.service(5)
   Main PID: 76210 (systemd)
     Status: "Ready."
         IO: 0B read, 0B written
      Tasks: 2
     Memory: 7.0M (peak: 10.5M)
        CPU: 1.100s
     CGroup: /user.slice/user-42477.slice/user@42477.service
             └─init.scope
               ├─76210 /usr/lib/systemd/systemd --user
               └─76212 "(sd-pam)"

Dec 13 07:13:43 compute-0 systemd[76210]: Reached target Sockets.
Dec 13 07:13:43 compute-0 systemd[76210]: Reached target Basic System.
Dec 13 07:13:43 compute-0 systemd[76210]: Reached target Main User Target.
Dec 13 07:13:43 compute-0 systemd[76210]: Startup finished in 87ms.
Dec 13 07:13:43 compute-0 systemd[1]: Started User Manager for UID 42477.
Dec 13 07:15:43 compute-0 systemd[76210]: Starting Mark boot as successful...
Dec 13 07:15:43 compute-0 systemd[76210]: Finished Mark boot as successful.
Dec 13 07:19:18 compute-0 systemd[76210]: Created slice User Background Tasks Slice.
Dec 13 07:19:18 compute-0 systemd[76210]: Starting Cleanup of User's Temporary Files and Directories...
Dec 13 07:19:18 compute-0 systemd[76210]: Finished Cleanup of User's Temporary Files and Directories.

○ virtinterfaced.service - libvirt interface daemon
     Loaded: loaded (/usr/lib/systemd/system/virtinterfaced.service; disabled; preset: disabled)
     Active: inactive (dead)
TriggeredBy: ○ virtinterfaced-admin.socket
             ○ virtinterfaced.socket
             ○ virtinterfaced-ro.socket
       Docs: man:virtinterfaced(8)
             https://libvirt.org/

○ virtlockd.service - libvirt locking daemon
     Loaded: loaded (/usr/lib/systemd/system/virtlockd.service; disabled; preset: disabled)
     Active: inactive (dead)
TriggeredBy: ○ virtlockd-admin.socket
             ● virtlockd.socket
       Docs: man:virtlockd(8)
             https://libvirt.org/

● virtlogd.service - libvirt logging daemon
     Loaded: loaded (/usr/lib/systemd/system/virtlogd.service; enabled; preset: disabled)
     Active: active (running) since Sat 2025-12-13 07:26:40 UTC; 10min ago
TriggeredBy: ● virtlogd-admin.socket
             ● virtlogd.socket
       Docs: man:virtlogd(8)
             https://libvirt.org/
   Main PID: 204000 (virtlogd)
         IO: 648.0K read, 0B written
      Tasks: 1 (limit: 48568)
     Memory: 3.1M (peak: 3.3M)
        CPU: 61ms
     CGroup: /system.slice/virtlogd.service
             └─204000 /usr/sbin/virtlogd

Dec 13 07:26:40 compute-0 systemd[1]: Starting libvirt logging daemon...
Dec 13 07:26:40 compute-0 systemd[1]: Started libvirt logging daemon.

○ virtnetworkd.service - libvirt network daemon
     Loaded: loaded (/usr/lib/systemd/system/virtnetworkd.service; disabled; preset: disabled)
     Active: inactive (dead)
TriggeredBy: ○ virtnetworkd-admin.socket
             ○ virtnetworkd.socket
             ○ virtnetworkd-ro.socket
       Docs: man:virtnetworkd(8)
             https://libvirt.org/

● virtnodedevd.service - libvirt nodedev daemon
     Loaded: loaded (/usr/lib/systemd/system/virtnodedevd.service; enabled; preset: disabled)
     Active: active (running) since Sat 2025-12-13 07:29:11 UTC; 7min ago
TriggeredBy: ● virtnodedevd.socket
             ● virtnodedevd-admin.socket
             ● virtnodedevd-ro.socket
       Docs: man:virtnodedevd(8)
             https://libvirt.org/
   Main PID: 240159 (virtnodedevd)
         IO: 0B read, 0B written
      Tasks: 20 (limit: 48568)
     Memory: 5.9M (peak: 6.9M)
        CPU: 732ms
     CGroup: /system.slice/virtnodedevd.service
             └─240159 /usr/sbin/virtnodedevd --timeout 120

Dec 13 07:29:11 compute-0 systemd[1]: Starting libvirt nodedev daemoUnit ypbind.service could not be found.
Unit yppasswdd.service could not be found.
Unit ypserv.service could not be found.
Unit ypxfrd.service could not be found.
n...
Dec 13 07:29:11 compute-0 systemd[1]: Started libvirt nodedev daemon.

○ virtnwfilterd.service - libvirt nwfilter daemon
     Loaded: loaded (/usr/lib/systemd/system/virtnwfilterd.service; disabled; preset: disabled)
     Active: inactive (dead)
TriggeredBy: ○ virtnwfilterd.socket
             ○ virtnwfilterd-admin.socket
             ○ virtnwfilterd-ro.socket
       Docs: man:virtnwfilterd(8)
             https://libvirt.org/

○ virtproxyd.service - libvirt proxy daemon
     Loaded: loaded (/usr/lib/systemd/system/virtproxyd.service; enabled; preset: disabled)
     Active: inactive (dead) since Sat 2025-12-13 07:28:41 UTC; 8min ago
   Duration: 2min 15ms
TriggeredBy: ● virtproxyd-ro.socket
             ● virtproxyd-tls.socket
             ● virtproxyd.socket
             ● virtproxyd-admin.socket
       Docs: man:virtproxyd(8)
             https://libvirt.org/
   Main PID: 204419 (code=exited, status=0/SUCCESS)
        CPU: 36ms

Dec 13 07:26:41 compute-0 systemd[1]: Starting libvirt proxy daemon...
Dec 13 07:26:41 compute-0 systemd[1]: Started libvirt proxy daemon.
Dec 13 07:28:41 compute-0 systemd[1]: virtproxyd.service: Deactivated successfully.

● virtqemud.service - libvirt QEMU daemon
     Loaded: loaded (/usr/lib/systemd/system/virtqemud.service; enabled; preset: enabled)
     Active: active (running) since Sat 2025-12-13 07:29:14 UTC; 7min ago
TriggeredBy: ● virtqemud.socket
             ● virtqemud-ro.socket
             ● virtqemud-admin.socket
       Docs: man:virtqemud(8)
             https://libvirt.org/
   Main PID: 241006 (virtqemud)
         IO: 4.6M read, 176.0K written
      Tasks: 18 (limit: 32768)
     Memory: 19.5M (peak: 46.6M)
        CPU: 844ms
     CGroup: /system.slice/virtqemud.service
             └─241006 /usr/sbin/virtqemud --timeout 120

Dec 13 07:29:14 compute-0 systemd[1]: Starting libvirt QEMU daemon...
Dec 13 07:29:14 compute-0 systemd[1]: Started libvirt QEMU daemon.
Dec 13 07:29:15 compute-0 virtqemud[241006]: libvirt version: 11.9.0, package: 1.el9 (builder@centos.org, 2025-11-04-09:54:50, )
Dec 13 07:29:15 compute-0 virtqemud[241006]: hostname: compute-0
Dec 13 07:29:15 compute-0 virtqemud[241006]: End of file while reading data: Input/output error
Dec 13 07:36:19 compute-0 virtqemud[241006]: Failed to connect socket to '/var/run/libvirt/virtnetworkd-sock-ro': No such file or directory
Dec 13 07:36:19 compute-0 virtqemud[241006]: Failed to connect socket to '/var/run/libvirt/virtnwfilterd-sock-ro': No such file or directory
Dec 13 07:36:19 compute-0 virtqemud[241006]: Failed to connect socket to '/var/run/libvirt/virtstoraged-sock-ro': No such file or directory

○ virtsecretd.service - libvirt secret daemon
     Loaded: loaded (/usr/lib/systemd/system/virtsecretd.service; enabled; preset: disabled)
     Active: inactive (dead) since Sat 2025-12-13 07:28:48 UTC; 8min ago
   Duration: 2min 5.027s
TriggeredBy: ● virtsecretd-admin.socket
             ● virtsecretd.socket
             ● virtsecretd-ro.socket
       Docs: man:virtsecretd(8)
             https://libvirt.org/
   Main PID: 204863 (code=exited, status=0/SUCCESS)
        CPU: 37ms

Dec 13 07:26:43 compute-0 systemd[1]: Starting libvirt secret daemon...
Dec 13 07:26:43 compute-0 systemd[1]: Started libvirt secret daemon.
Dec 13 07:28:48 compute-0 systemd[1]: virtsecretd.service: Deactivated successfully.

○ virtstoraged.service - libvirt storage daemon
     Loaded: loaded (/usr/lib/systemd/system/virtstoraged.service; disabled; preset: disabled)
     Active: inactive (dead)
TriggeredBy: ○ virtstoraged-admin.socket
             ○ virtstoraged.socket
             ○ virtstoraged-ro.socket
       Docs: man:virtstoraged(8)
             https://libvirt.org/

● -.slice - Root Slice
     Loaded: loaded
     Active: active since Sat 2025-12-13 06:42:07 UTC; 54min ago
      Until: Sat 2025-12-13 06:42:07 UTC; 54min ago
       Docs: man:systemd.special(7)
      Tasks: 1374
     Memory: 2.7G
        CPU: 22min 44.960s
     CGroup: /
             ├─252669 turbostat --debug sleep 10
             ├─252673 sleep 10
             ├─init.scope
             │ └─1 /usr/lib/systemd/systemd --switched-root --system --deserialize 31
             ├─machine.slice
             │ ├─libpod-1929b765dac802841eb5d5f56597ea7bfd15768bcf514c3ef50eb60bf1b13d07.scope
             │ │ └─container
             │ │   ├─154118 dumb-init --single-child -- kolla_start
             │ │   ├─154121 "neutron-ovn-metadata-agent (/usr/bin/python3 /usr/bin/neutron-ovn-metadata-agent)"
             │ │   ├─154224 "neutron-ovn-metadata-agent (/usr/bin/python3 /usr/bin/neutron-ovn-metadata-agent)"
             │ │   └─154229 /usr/bin/python3 /bin/privsep-helper --config-file /etc/neutron/neutron.conf --config-dir /etc/neutron.conf.d --privsep_context neutron.privileged.namespace_cmd --privsep_sock_path /tmp/tmpj55srbhp/privsep.sock
             │ ├─libpod-85647c0fe69de86f6774be6672640c6d932467db58f079f97876cd7dae0a1306.scope
             │ │ └─container
             │ │   ├─241224 dumb-init --single-child -- kolla_start
             │ │   └─241226 /usr/bin/python3 /usr/bin/nova-compute
             │ ├─libpod-d4b07d1867f144077f7c5c5cc5ae5c3e4d24058947898d6fee77240ec3efb8ed.scope
             │ │ └─container
             │ │   ├─144649 dumb-init --single-child -- kolla_start
             │ │   └─144652 /usr/bin/ovn-controller --pidfile unix:/run/openvswitch/db.sock -p /etc/pki/tls/private/ovndb.key -c /etc/pki/tls/certs/ovndb.crt -C /etc/pki/tls/certs/ovndbca.crt
             │ └─libpod-f696b337a701eeb12548640e55e827503e894b0e602e4d8080c3212ba22210e6.scope
             │   └─container
             │     ├─223788 dumb-init --single-child -- kolla_start
             │     └─223792 /usr/sbin/multipathd -d
             ├─system.slice
             │ ├─NetworkManager.service
             │ │ └─48896 /usr/sbin/NetworkManager --no-daemon
             │ ├─auditd.service
             │ │ ├─673 /sbin/auditd
             │ │ └─675 /usr/sbin/sedispatch
             │ ├─chronyd.service
             │ │ └─58459 /usr/sbin/chronyd -F 2
             │ ├─crond.service
             │ │ ├─  968 /usr/sbin/crond -n
             │ │ └─30916 /usr/sbin/anacron -s
             │ ├─dbus-broker.service
             │ │ ├─727 /usr/bin/dbus-broker-launch --scope system --audit
             │ │ └─734 dbus-broker --log 4 --controller 9 --machine-id 64f1d6692049d8be5e8b216cc203502c --max-bytes 536870912 --max-fds 4096 --max-matches 131072 --audit
             │ ├─edpm_multipathd.service
             │ │ └─223786 /usr/bin/conmon --api-version 1 -c f696b337a701eeb12548640e55e827503e894b0e602e4d8080c3212ba22210e6 -u f696b337a701eeb12548640e55e827503e894b0e602e4d8080c3212ba22210e6 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/f696b337a701eeb12548640e55e827503e894b0e602e4d8080c3212ba22210e6/userdata -p /run/containers/storage/overlay-containers/f696b337a701eeb12548640e55e827503e894b0e602e4d8080c3212ba22210e6/userdata/pidfile -n multipathd --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/f696b337a701eeb12548640e55e827503e894b0e602e4d8080c3212ba22210e6 --full-attach -s -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/f696b337a701eeb12548640e55e827503e894b0e602e4d8080c3212ba22210e6/userdata/oci-log --conmon-pidfile /run/multipathd.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg f696b337a701eeb12548640e55e827503e894b0e602e4d8080c3212ba22210e6
             │ ├─edpm_nova_compute.service
             │ │ └─241222 /usr/bin/conmon --api-version 1 -c 85647c0fe69de86f6774be6672640c6d932467db58f079f97876cd7dae0a1306 -u 85647c0fe69de86f6774be6672640c6d932467db58f079f97876cd7dae0a1306 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/85647c0fe69de86f6774be6672640c6d932467db58f079f97876cd7dae0a1306/userdata -p /run/containers/storage/overlay-containers/85647c0fe69de86f6774be6672640c6d932467db58f079f97876cd7dae0a1306/userdata/pidfile -n nova_compute --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/85647c0fe69de86f6774be6672640c6d932467db58f079f97876cd7dae0a1306 --full-attach -s -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/85647c0fe69de86f6774be6672640c6d932467db58f079f97876cd7dae0a1306/userdata/oci-log --conmon-pidfile /run/nova_compute.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg 85647c0fe69de86f6774be6672640c6d932467db58f079f97876cd7dae0a1306
             │ ├─edpm_ovn_controller.service
             │ │ └─144647 /usr/bin/conmon --api-version 1 -c d4b07d1867f144077f7c5c5cc5ae5c3e4d24058947898d6fee77240ec3efb8ed -u d4b07d1867f144077f7c5c5cc5ae5c3e4d24058947898d6fee77240ec3efb8ed -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/d4b07d1867f144077f7c5c5cc5ae5c3e4d24058947898d6fee77240ec3efb8ed/userdata -p /run/containers/storage/overlay-containers/d4b07d1867f144077f7c5c5cc5ae5c3e4d24058947898d6fee77240ec3efb8ed/userdata/pidfile -n ovn_controller --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/d4b07d1867f144077f7c5c5cc5ae5c3e4d24058947898d6fee77240ec3efb8ed --full-attach -s -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/d4b07d1867f144077f7c5c5cc5ae5c3e4d24058947898d6fee77240ec3efb8ed/userdata/oci-log --conmon-pidfile /run/ovn_controller.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg d4b07d1867f144077f7c5c5cc5ae5c3e4d24058947898d6fee77240ec3efb8ed
             │ ├─edpm_ovn_metadata_agent.service
             │ │ └─154116 /usr/bin/conmon --api-version 1 -c 1929b765dac802841eb5d5f56597ea7bfd15768bcf514c3ef50eb60bf1b13d07 -u 1929b765dac802841eb5d5f56597ea7bfd15768bcf514c3ef50eb60bf1b13d07 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/1929b765dac802841eb5d5f56597ea7bfd15768bcf514c3ef50eb60bf1b13d07/userdata -p /run/containers/storage/overlay-containers/1929b765dac802841eb5d5f56597ea7bfd15768bcf514c3ef50eb60bf1b13d07/userdata/pidfile -n ovn_metadata_agent --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/1929b765dac802841eb5d5f56597ea7bfd15768bcf514c3ef50eb60bf1b13d07 --full-attach -s -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/1929b765dac802841eb5d5f56597ea7bfd15768bcf514c3ef50eb60bf1b13d07/userdata/oci-log --conmon-pidfile /run/ovn_metadata_agent.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg 1929b765dac802841eb5d5f56597ea7bfd15768bcf514c3ef50eb60bf1b13d07
             │ ├─gssproxy.service
             │ │ └─827 /usr/sbin/gssproxy -D
             │ ├─irqbalance.service
             │ │ └─740 /usr/sbin/irqbalance
             │ ├─iscsid.service
             │ │ └─226492 /usr/sbin/iscsid -f
             │ ├─ovs-vswitchd.service
             │ │ └─47201 ovs-vswitchd unix:/var/run/openvswitch/db.sock -vconsole:emer -vsyslog:err -vfile:info --mlockall --user openvswitch:hugetlbfs --no-chdir --log-file=/var/log/openvswitch/ovs-vswitchd.log --pidfile=/var/run/openvswitch/ovs-vswitchd.pid --detach
             │ ├─ovsdb-server.service
             │ │ └─47120 ovsdb-server /etc/openvswitch/conf.db -vconsole:emer -vsyslog:err -vfile:info --remote=punix:/var/run/openvswitch/db.sock --private-key=db:Open_vSwitch,SSL,private_key --certificate=db:Open_vSwitch,SSL,certificate --bootstrap-ca-cert=db:Open_vSwitch,SSL,ca_cert --user openvswitch:hugetlbfs --no-chdir --log-file=/var/log/openvswitch/ovsdb-server.log --pidfile=/var/run/openvswitch/ovsdb-server.pid --detach
             │ ├─polkit.service
             │ │ └─43388 /usr/lib/polkit-1/polkitd --no-debug
             │ ├─rpcbind.service
             │ │ └─671 /usr/bin/rpcbind -w -f
             │ ├─rsyslog.service
             │ │ └─962 /usr/sbin/rsyslogd -n
             │ ├─sshd.service
             │ │ └─179217 "sshd: /usr/sbin/sshd -D [listener] 0 of 10-100 startups"
             │ ├─system-ceph\x2d00fdae1b\x2d7fad\x2d5f1b\x2d8734\x2dba4d9298a6de.slice
             │ │ ├─ceph-00fdae1b-7fad-5f1b-8734-ba4d9298a6de@crash.compute-0.service
             │ │ │ ├─libpod-payload-8e6a4f61ea03b0deb3b22f2359f4e4ace46ba5e323139a1f6359205360a9b0cc
             │ │ │ │ ├─79703 /run/podman-init -- /usr/bin/ceph-crash -n client.crash.compute-0
             │ │ │ │ └─79705 /usr/bin/python3 -s /usr/bin/ceph-crash -n client.crash.compute-0
             │ │ │ └─runtime
             │ │ │   └─79701 /usr/bin/conmon --api-version 1 -c 8e6a4f61ea03b0deb3b22f2359f4e4ace46ba5e323139a1f6359205360a9b0cc -u 8e6a4f61ea03b0deb3b22f2359f4e4ace46ba5e323139a1f6359205360a9b0cc -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/8e6a4f61ea03b0deb3b22f2359f4e4ace46ba5e323139a1f6359205360a9b0cc/userdata -p /run/containers/storage/overlay-containers/8e6a4f61ea03b0deb3b22f2359f4e4ace46ba5e323139a1f6359205360a9b0cc/userdata/pidfile -n ceph-00fdae1b-7fad-5f1b-8734-ba4d9298a6de-crash-compute-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/8e6a4f61ea03b0deb3b22f2359f4e4ace46ba5e323139a1f6359205360a9b0cc --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/8e6a4f61ea03b0deb3b22f2359f4e4ace46ba5e323139a1f6359205360a9b0cc/userdata/oci-log --conmon-pidfile /run/ceph-00fdae1b-7fad-5f1b-8734-ba4d9298a6de@crash.compute-0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 8e6a4f61ea03b0deb3b22f2359f4e4ace46ba5e323139a1f6359205360a9b0cc
             │ │ ├─ceph-00fdae1b-7fad-5f1b-8734-ba4d9298a6de@mds.cephfs.compute-0.zwnyoz.service
             │ │ │ ├─libpod-payload-c65ab07d188ff0da2857402181db229477778de43e052dbfea38f8c212c47f50
             │ │ │ │ ├─93862 /run/podman-init -- /usr/bin/ceph-mds -n mds.cephfs.compute-0.zwnyoz -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ │ │ └─93864 /usr/bin/ceph-mds -n mds.cephfs.compute-0.zwnyoz -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ │ └─runtime
             │ │ │   └─93860 /usr/bin/conmon --api-version 1 -c c65ab07d188ff0da2857402181db229477778de43e052dbfea38f8c212c47f50 -u c65ab07d188ff0da2857402181db229477778de43e052dbfea38f8c212c47f50 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/c65ab07d188ff0da2857402181db229477778de43e052dbfea38f8c212c47f50/userdata -p /run/containers/storage/overlay-containers/c65ab07d188ff0da2857402181db229477778de43e052dbfea38f8c212c47f50/userdata/pidfile -n ceph-00fdae1b-7fad-5f1b-8734-ba4d9298a6de-mds-cephfs-compute-0-zwnyoz --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/c65ab07d188ff0da2857402181db229477778de43e052dbfea38f8c212c47f50 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/c65ab07d188ff0da2857402181db229477778de43e052dbfea38f8c212c47f50/userdata/oci-log --conmon-pidfile /run/ceph-00fdae1b-7fad-5f1b-8734-ba4d9298a6de@mds.cephfs.compute-0.zwnyoz.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg c65ab07d188ff0da2857402181db229477778de43e052dbfea38f8c212c47f50
             │ │ ├─ceph-00fdae1b-7fad-5f1b-8734-ba4d9298a6de@mgr.compute-0.qsherl.service
             │ │ │ ├─libpod-payload-4d78867918d5dd4dba36f3a6dc6db4122866221ae6fbf48a37819c5ae84e8283
             │ │ │ │ ├─75198 /run/podman-init -- /usr/bin/ceph-mgr -n mgr.compute-0.qsherl -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ │ │ └─75200 /usr/bin/ceph-mgr -n mgr.compute-0.qsherl -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ │ └─runtime
             │ │ │   └─75196 /usr/bin/conmon --api-version 1 -c 4d78867918d5dd4dba36f3a6dc6db4122866221ae6fbf48a37819c5ae84e8283 -u 4d78867918d5dd4dba36f3a6dc6db4122866221ae6fbf48a37819c5ae84e8283 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/4d78867918d5dd4dba36f3a6dc6db4122866221ae6fbf48a37819c5ae84e8283/userdata -p /run/containers/storage/overlay-containers/4d78867918d5dd4dba36f3a6dc6db4122866221ae6fbf48a37819c5ae84e8283/userdata/pidfile -n ceph-00fdae1b-7fad-5f1b-8734-ba4d9298a6de-mgr-compute-0-qsherl --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/4d78867918d5dd4dba36f3a6dc6db4122866221ae6fbf48a37819c5ae84e8283 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/4d78867918d5dd4dba36f3a6dc6db4122866221ae6fbf48a37819c5ae84e8283/userdata/oci-log --conmon-pidfile /run/ceph-00fdae1b-7fad-5f1b-8734-ba4d9298a6de@mgr.compute-0.qsherl.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 4d78867918d5dd4dba36f3a6dc6db4122866221ae6fbf48a37819c5ae84e8283
             │ │ ├─ceph-00fdae1b-7fad-5f1b-8734-ba4d9298a6de@mon.compute-0.service
             │ │ │ ├─libpod-payload-4656a144eefb5f6bf26a1e6dd6df77bfd9faa9dce17b22c42a655c087745995a
             │ │ │ │ ├─74926 /run/podman-init -- /usr/bin/ceph-mon -n mon.compute-0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false
             │ │ │ │ └─74928 /usr/bin/ceph-mon -n mon.compute-0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false
             │ │ │ └─runtime
             │ │ │   └─74924 /usr/bin/conmon --api-version 1 -c 4656a144eefb5f6bf26a1e6dd6df77bfd9faa9dce17b22c42a655c087745995a -u 4656a144eefb5f6bf26a1e6dd6df77bfd9faa9dce17b22c42a655c087745995a -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/4656a144eefb5f6bf26a1e6dd6df77bfd9faa9dce17b22c42a655c087745995a/userdata -p /run/containers/storage/overlay-containers/4656a144eefb5f6bf26a1e6dd6df77bfd9faa9dce17b22c42a655c087745995a/userdata/pidfile -n ceph-00fdae1b-7fad-5f1b-8734-ba4d9298a6de-mon-compute-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/4656a144eefb5f6bf26a1e6dd6df77bfd9faa9dce17b22c42a655c087745995a --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/4656a144eefb5f6bf26a1e6dd6df77bfd9faa9dce17b22c42a655c087745995a/userdata/oci-log --conmon-pidfile /run/ceph-00fdae1b-7fad-5f1b-8734-ba4d9298a6de@mon.compute-0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 4656a144eefb5f6bf26a1e6dd6df77bfd9faa9dce17b22c42a655c087745995a
             │ │ ├─ceph-00fdae1b-7fad-5f1b-8734-ba4d9298a6de@osd.0.service
             │ │ │ ├─libpod-payload-5e169e1385f98bf8a58844e41c31305318f100b9850e1f4defaf308d2b1dfde7
             │ │ │ │ ├─85138 /run/podman-init -- /usr/bin/ceph-osd -n osd.0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --osd-objectstore=bluestore
             │ │ │ │ └─85140 /usr/bin/ceph-osd -n osd.0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --osd-objectstore=bluestore
             │ │ │ └─runtime
             │ │ │   └─85136 /usr/bin/conmon --api-version 1 -c 5e169e1385f98bf8a58844e41c31305318f100b9850e1f4defaf308d2b1dfde7 -u 5e169e1385f98bf8a58844e41c31305318f100b9850e1f4defaf308d2b1dfde7 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/5e169e1385f98bf8a58844e41c31305318f100b9850e1f4defaf308d2b1dfde7/userdata -p /run/containers/storage/overlay-containers/5e169e1385f98bf8a58844e41c31305318f100b9850e1f4defaf308d2b1dfde7/userdata/pidfile -n ceph-00fdae1b-7fad-5f1b-8734-ba4d9298a6de-osd-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/5e169e1385f98bf8a58844e41c31305318f100b9850e1f4defaf308d2b1dfde7 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/5e169e1385f98bf8a58844e41c31305318f100b9850e1f4defaf308d2b1dfde7/userdata/oci-log --conmon-pidfile /run/ceph-00fdae1b-7fad-5f1b-8734-ba4d9298a6de@osd.0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 5e169e1385f98bf8a58844e41c31305318f100b9850e1f4defaf308d2b1dfde7
             │ │ ├─ceph-00fdae1b-7fad-5f1b-8734-ba4d9298a6de@osd.1.service
             │ │ │ ├─libpod-payload-c0e0c03f97b0b2b02555f476cf4558ef6f7c2cd731350718d9262d59a0b7be03
             │ │ │ │ ├─86140 /run/podman-init -- /usr/bin/ceph-osd -n osd.1 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --osd-objectstore=bluestore
             │ │ │ │ └─86142 /usr/bin/ceph-osd -n osd.1 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --osd-objectstore=bluestore
             │ │ │ └─runtime
             │ │ │   └─86138 /usr/bin/conmon --api-version 1 -c c0e0c03f97b0b2b02555f476cf4558ef6f7c2cd731350718d9262d59a0b7be03 -u c0e0c03f97b0b2b02555f476cf4558ef6f7c2cd731350718d9262d59a0b7be03 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/c0e0c03f97b0b2b02555f476cf4558ef6f7c2cd731350718d9262d59a0b7be03/userdata -p /run/containers/storage/overlay-containers/c0e0c03f97b0b2b02555f476cf4558ef6f7c2cd731350718d9262d59a0b7be03/userdata/pidfile -n ceph-00fdae1b-7fad-5f1b-8734-ba4d9298a6de-osd-1 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/c0e0c03f97b0b2b02555f476cf4558ef6f7c2cd731350718d9262d59a0b7be03 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/c0e0c03f97b0b2b02555f476cf4558ef6f7c2cd731350718d9262d59a0b7be03/userdata/oci-log --conmon-pidfile /run/ceph-00fdae1b-7fad-5f1b-8734-ba4d9298a6de@osd.1.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg c0e0c03f97b0b2b02555f476cf4558ef6f7c2cd731350718d9262d59a0b7be03
             │ │ ├─ceph-00fdae1b-7fad-5f1b-8734-ba4d9298a6de@osd.2.service
             │ │ │ ├─libpod-payload-bb7cd2f636f6ef6017e815fa4141bf45b494cfb9652486980f5606492505725a
             │ │ │ │ ├─87153 /run/podman-init -- /usr/bin/ceph-osd -n osd.2 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --osd-objectstore=bluestore
             │ │ │ │ └─87155 /usr/bin/ceph-osd -n osd.2 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --osd-objectstore=bluestore
             │ │ │ └─runtime
             │ │ │   └─87151 /usr/bin/conmon --api-version 1 -c bb7cd2f636f6ef6017e815fa4141bf45b494cfb9652486980f5606492505725a -u bb7cd2f636f6ef6017e815fa4141bf45b494cfb9652486980f5606492505725a -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/bb7cd2f636f6ef6017e815fa4141bf45b494cfb9652486980f5606492505725a/userdata -p /run/containers/storage/overlay-containers/bb7cd2f636f6ef6017e815fa4141bf45b494cfb9652486980f5606492505725a/userdata/pidfile -n ceph-00fdae1b-7fad-5f1b-8734-ba4d9298a6de-osd-2 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/bb7cd2f636f6ef6017e815fa4141bf45b494cfb9652486980f5606492505725a --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/bb7cd2f636f6ef6017e815fa4141bf45b494cfb9652486980f5606492505725a/userdata/oci-log --conmon-pidfile /run/ceph-00fdae1b-7fad-5f1b-8734-ba4d9298a6de@osd.2.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg bb7cd2f636f6ef6017e815fa4141bf45b494cfb9652486980f5606492505725a
             │ │ └─ceph-00fdae1b-7fad-5f1b-8734-ba4d9298a6de@rgw.rgw.compute-0.kikquh.service
             │ │   ├─libpod-payload-69ac193e949f4ec1c85ec7f6eeac603563ef58b890ae60370ce1d5f216a4080c
             │ │   │ ├─93474 /run/podman-init -- /usr/bin/radosgw -n client.rgw.rgw.compute-0.kikquh -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --stop-timeout=120
             │ │   │ └─93487 /usr/bin/radosgw -n client.rgw.rgw.compute-0.kikquh -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --stop-timeout=120
             │ │   └─runtime
             │ │     └─93465 /usr/bin/conmon --api-version 1 -c 69ac193e949f4ec1c85ec7f6eeac603563ef58b890ae60370ce1d5f216a4080c -u 69ac193e949f4ec1c85ec7f6eeac603563ef58b890ae60370ce1d5f216a4080c -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/69ac193e949f4ec1c85ec7f6eeac603563ef58b890ae60370ce1d5f216a4080c/userdata -p /run/containers/storage/overlay-containers/69ac193e949f4ec1c85ec7f6eeac603563ef58b890ae60370ce1d5f216a4080c/userdata/pidfile -n ceph-00fdae1b-7fad-5f1b-8734-ba4d9298a6de-rgw-rgw-compute-0-kikquh --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/69ac193e949f4ec1c85ec7f6eeac603563ef58b890ae60370ce1d5f216a4080c --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/69ac193e949f4ec1c85ec7f6eeac603563ef58b890ae60370ce1d5f216a4080c/userdata/oci-log --conmon-pidfile /run/ceph-00fdae1b-7fad-5f1b-8734-ba4d9298a6de@rgw.rgw.compute-0.kikquh.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 69ac193e949f4ec1c85ec7f6eeac603563ef58b890ae60370ce1d5f216a4080c
             │ ├─system-getty.slice
             │ │ └─getty@tty1.service
             │ │   └─978 /sbin/agetty -o "-p -- \\u" --noclear - linux
             │ ├─system-serial\x2dgetty.slice
             │ │ └─serial-getty@ttyS0.service
             │ │   └─984 /sbin/agetty -o "-p -- \\u" --keep-baud 115200,57600,38400,9600 - vt220
             │ ├─systemd-hostnamed.service
             │ │ └─249805 /usr/lib/systemd/systemd-hostnamed
             │ ├─systemd-journald.service
             │ │ └─650 /usr/lib/systemd/systemd-journald
             │ ├─systemd-logind.service
             │ │ └─745 /usr/lib/systemd/systemd-logind
             │ ├─systemd-machined.service
             │ │ └─204630 /usr/lib/systemd/systemd-machined
             │ ├─systemd-udevd.service
             │ │ └─udev
             │ │   └─701 /usr/lib/systemd/systemd-udevd
             │ ├─tuned.service
             │ │ ├─105788 /usr/bin/python3 -Es /usr/sbin/tuned -l -P
             │ │ ├─253212 /usr/bin/sh - /usr/sbin/virt-what
             │ │ └─253217 
             │ ├─virtlogd.service
             │ │ └─204000 /usr/sbin/virtlogd
             │ ├─virtnodedevd.service
             │ │ └─240159 /usr/sbin/virtnodedevd --timeout 120
             │ └─virtqemud.service
             │   └─241006 /usr/sbin/virtqemud --timeout 120
             └─user.slice
               ├─user-1000.slice
               │ ├─session-1.scope
               │ │ └─4579 /usr/bin/python3
               │ ├─session-54.scope
               │ │ ├─246077 "sshd-session: zuul [priv]"
               │ │ ├─246080 "sshd-session: zuul@notty"
               │ │ ├─246081 sudo bash -c "rm -rf /var/tmp/sos-osp && mkdir /var/tmp/sos-osp && sos report --batch --all-logs --tmp-dir=/var/tmp/sos-osp  -p container,openstack_edpm,system,storage,virt"
               │ │ ├─246105 /usr/bin/python3 -s /sbin/sos report --batch --all-logs --tmp-dir=/var/tmp/sos-osp -p container,openstack_edpm,system,storage,virt
               │ │ ├─252668 timeout 15s turbostat --debug sleep 10
               │ │ ├─253143 timeout 300s systemctl status --all
               │ │ ├─253144 systemctl status --all
               │ │ ├─253187 timeout 300s ceph osd pool autoscale-status --format json-pretty
               │ │ ├─253188 /usr/bin/python3 -s /usr/bin/ceph osd pool autoscale-status --format json-pretty
               │ │ ├─253210 timeout 300s tuned-adm recommend
               │ │ └─253211 /usr/bin/python3 -Es /usr/sbin/tuned-adm recommend
               │ └─user@1000.service
               │   ├─app.slice
               │   │ └─dbus-broker.service
               │   │   ├─14044 /usr/bin/dbus-broker-launch --scope user
               │   │   └─14045 dbus-broker --log 4 --controller 9 --machine-id 64f1d6692049d8be5e8b216cc203502c --max-bytes 100000000000000 --max-fds 25000000000000 --max-matches 5000000000
               │   ├─init.scope
               │   │ ├─4373 /usr/lib/systemd/systemd --user
               │   │ └─4375 "(sd-pam)"
               │   └─user.slice
               │     └─podman-pause-1c7804ac.scope
               │       └─14027 catatonit -P
               └─user-42477.slice
                 ├─session-20.scope
                 │ ├─76206 "sshd-session: ceph-admin [priv]"
                 │ └─76226 "sshd-session: ceph-admin"
                 ├─session-22.scope
                 │ ├─76223 "sshd-session: ceph-admin [priv]"
                 │ └─76228 "sshd-session: ceph-admin@notty"
                 ├─session-23.scope
                 │ ├─76254 "sshd-session: ceph-admin [priv]"
                 │ └─76257 "sshd-session: ceph-admin@notty"
                 ├─session-24.scope
                 │ ├─76283 "sshd-session: ceph-admin [priv]"
                 │ └─76286 "sshd-session: ceph-admin@notty"
                 ├─session-25.scope
                 │ ├─76312 "sshd-session: ceph-admin [priv]"
                 │ └─76315 "sshd-session: ceph-admin@notty"
                 ├─session-26.scope
                 │ ├─76341 "sshd-session: ceph-admin [priv]"
                 │ └─76344 "sshd-session: ceph-admin@notty"
                 ├─session-27.scope
                 │ ├─76370 "sshd-session: ceph-admin [priv]"
                 │ └─76373 "sshd-session: ceph-admin@notty"
                 ├─session-28.scope
                 │ ├─76399 "sshd-session: ceph-admin [priv]"
                 │ └─76402 "sshd-session: ceph-admin@notty"
                 ├─session-29.scope
                 │ ├─76428 "sshd-session: ceph-admin [priv]"
                 │ └─76431 "sshd-session: ceph-admin@notty"
                 ├─session-30.scope
                 │ ├─76457 "sshd-session: ceph-admin [priv]"
                 │ └─76460 "sshd-session: ceph-admin@notty"
                 ├─session-31.scope
                 │ ├─76484 "sshd-session: ceph-admin [priv]"
                 │ └─76487 "sshd-session: ceph-admin@notty"
                 ├─session-32.scope
                 │ ├─76513 "sshd-session: ceph-admin [priv]"
                 │ └─76516 "sshd-session: ceph-admin@notty"
                 └─user@42477.service
                   └─init.scope
                     ├─76210 /usr/lib/systemd/systemd --user
                     └─76212 "(sd-pam)"

Dec 13 07:36:36 compute-0 systemd[1]: Started libpod-conmon-7ef045a8093e37ddfe81fb914959b324beccf6e9409d5e86e4bc2d506d3e7a83.scope.
Dec 13 07:36:36 compute-0 systemd[1]: Started libcrun container.
Dec 13 07:36:36 compute-0 systemd[1]: libpod-7ef045a8093e37ddfe81fb914959b324beccf6e9409d5e86e4bc2d506d3e7a83.scope: Deactivated successfully.
Dec 13 07:36:36 compute-0 systemd[1]: var-lib-containers-storage-overlay-bc7b8449204da5322c2256cbdb7e34c3cd08a36bac73ce34ddd7064160553db4-merged.mount: Deactivated successfully.
Dec 13 07:36:36 compute-0 systemd[1]: libpod-conmon-7ef045a8093e37ddfe81fb914959b324beccf6e9409d5e86e4bc2d506d3e7a83.scope: Deactivated successfully.
Dec 13 07:36:37 compute-0 systemd[1]: Started libpod-conmon-404541e0807a459ae6ec7f22f774933b836db8dd393ed1f5af5f1eac4d23db94.scope.
Dec 13 07:36:37 compute-0 systemd[1]: Started libcrun container.
Dec 13 07:36:37 compute-0 systemd[1]: libpod-404541e0807a459ae6ec7f22f774933b836db8dd393ed1f5af5f1eac4d23db94.scope: Deactivated successfully.
Dec 13 07:36:37 compute-0 systemd[1]: var-lib-containers-storage-overlay-3bf735cd2afa77a110740d8bea3c1686099be5dc68bd2760ad55048f93416a7e-merged.mount: Deactivated successfully.
Dec 13 07:36:37 compute-0 systemd[1]: libpod-conmon-404541e0807a459ae6ec7f22f774933b836db8dd393ed1f5af5f1eac4d23db94.scope: Deactivated successfully.

● machine.slice - Virtual Machine and Container Slice
     Loaded: loaded (/usr/lib/systemd/system/machine.slice; static)
     Active: active since Sat 2025-12-13 07:13:18 UTC; 23min ago
      Until: Sat 2025-12-13 07:13:18 UTC; 23min ago
       Docs: man:systemd.special(7)
         IO: 542.7M read, 19.4M written
      Tasks: 42
     Memory: 428.0M (peak: 480.4M)
        CPU: 1min 36.466s
     CGroup: /machine.slice
             ├─libpod-1929b765dac802841eb5d5f56597ea7bfd15768bcf514c3ef50eb60bf1b13d07.scope
             │ └─container
             │   ├─154118 dumb-init --single-child -- kolla_start
             │   ├─154121 "neutron-ovn-metadata-agent (/usr/bin/python3 /usr/bin/neutron-ovn-metadata-agent)"
             │   ├─154224 "neutron-ovn-metadata-agent (/usr/bin/python3 /usr/bin/neutron-ovn-metadata-agent)"
             │   └─154229 /usr/bin/python3 /bin/privsep-helper --config-file /etc/neutron/neutron.conf --config-dir /etc/neutron.conf.d --privsep_context neutron.privileged.namespace_cmd --privsep_sock_path /tmp/tmpj55srbhp/privsep.sock
             ├─libpod-85647c0fe69de86f6774be6672640c6d932467db58f079f97876cd7dae0a1306.scope
             │ └─container
             │   ├─241224 dumb-init --single-child -- kolla_start
             │   └─241226 /usr/bin/python3 /usr/bin/nova-compute
             ├─libpod-d4b07d1867f144077f7c5c5cc5ae5c3e4d24058947898d6fee77240ec3efb8ed.scope
             │ └─container
             │   ├─144649 dumb-init --single-child -- kolla_start
             │   └─144652 /usr/bin/ovn-controller --pidfile unix:/run/openvswitch/db.sock -p /etc/pki/tls/private/ovndb.key -c /etc/pki/tls/certs/ovndb.crt -C /etc/pki/tls/certs/ovndbca.crt
             └─libpod-f696b337a701eeb12548640e55e827503e894b0e602e4d8080c3212ba22210e6.scope
               └─container
                 ├─223788 dumb-init --single-child -- kolla_start
                 └─223792 /usr/sbin/multipathd -d

Dec 13 07:36:36 compute-0 reverent_shamir[249686]:                 "ceph.with_tpm": "0"
Dec 13 07:36:36 compute-0 reverent_shamir[249686]:             },
Dec 13 07:36:36 compute-0 reverent_shamir[249686]:             "type": "block",
Dec 13 07:36:36 compute-0 reverent_shamir[249686]:             "vg_name": "ceph_vg2"
Dec 13 07:36:36 compute-0 reverent_shamir[249686]:         }
Dec 13 07:36:36 compute-0 reverent_shamir[249686]:     ]
Dec 13 07:36:36 compute-0 reverent_shamir[249686]: }
Dec 13 07:36:36 compute-0 blissful_austin[249880]: 167 167
Dec 13 07:36:36 compute-0 conmon[249880]: conmon 7ef045a8093e37ddfe81 <nwarn>: Failed to open cgroups file: /sys/fs/cgroup/machine.slice/libpod-7ef045a8093e37ddfe81fb914959b324beccf6e9409d5e86e4bc2d506d3e7a83.scope/container/memory.events
Dec 13 07:36:37 compute-0 blissful_aryabhata[249952]: {}

● system-ceph\x2d00fdae1b\x2d7fad\x2d5f1b\x2d8734\x2dba4d9298a6de.slice - Slice /system/ceph-00fdae1b-7fad-5f1b-8734-ba4d9298a6de
     Loaded: loaded
     Active: active since Sat 2025-12-13 07:13:21 UTC; 23min ago
      Until: Sat 2025-12-13 07:13:21 UTC; 23min ago
         IO: 22.0M read, 3.8G written
      Tasks: 1002
     Memory: 2.0G (peak: 2.1G)
        CPU: 1min 5.166s
     CGroup: /system.slice/system-ceph\x2d00fdae1b\x2d7fad\x2d5f1b\x2d8734\x2dba4d9298a6de.slice
             ├─ceph-00fdae1b-7fad-5f1b-8734-ba4d9298a6de@crash.compute-0.service
             │ ├─libpod-payload-8e6a4f61ea03b0deb3b22f2359f4e4ace46ba5e323139a1f6359205360a9b0cc
             │ │ ├─79703 /run/podman-init -- /usr/bin/ceph-crash -n client.crash.compute-0
             │ │ └─79705 /usr/bin/python3 -s /usr/bin/ceph-crash -n client.crash.compute-0
             │ └─runtime
             │   └─79701 /usr/bin/conmon --api-version 1 -c 8e6a4f61ea03b0deb3b22f2359f4e4ace46ba5e323139a1f6359205360a9b0cc -u 8e6a4f61ea03b0deb3b22f2359f4e4ace46ba5e323139a1f6359205360a9b0cc -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/8e6a4f61ea03b0deb3b22f2359f4e4ace46ba5e323139a1f6359205360a9b0cc/userdata -p /run/containers/storage/overlay-containers/8e6a4f61ea03b0deb3b22f2359f4e4ace46ba5e323139a1f6359205360a9b0cc/userdata/pidfile -n ceph-00fdae1b-7fad-5f1b-8734-ba4d9298a6de-crash-compute-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/8e6a4f61ea03b0deb3b22f2359f4e4ace46ba5e323139a1f6359205360a9b0cc --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/8e6a4f61ea03b0deb3b22f2359f4e4ace46ba5e323139a1f6359205360a9b0cc/userdata/oci-log --conmon-pidfile /run/ceph-00fdae1b-7fad-5f1b-8734-ba4d9298a6de@crash.compute-0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 8e6a4f61ea03b0deb3b22f2359f4e4ace46ba5e323139a1f6359205360a9b0cc
             ├─ceph-00fdae1b-7fad-5f1b-8734-ba4d9298a6de@mds.cephfs.compute-0.zwnyoz.service
             │ ├─libpod-payload-c65ab07d188ff0da2857402181db229477778de43e052dbfea38f8c212c47f50
             │ │ ├─93862 /run/podman-init -- /usr/bin/ceph-mds -n mds.cephfs.compute-0.zwnyoz -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ └─93864 /usr/bin/ceph-mds -n mds.cephfs.compute-0.zwnyoz -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ └─runtime
             │   └─93860 /usr/bin/conmon --api-version 1 -c c65ab07d188ff0da2857402181db229477778de43e052dbfea38f8c212c47f50 -u c65ab07d188ff0da2857402181db229477778de43e052dbfea38f8c212c47f50 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/c65ab07d188ff0da2857402181db229477778de43e052dbfea38f8c212c47f50/userdata -p /run/containers/storage/overlay-containers/c65ab07d188ff0da2857402181db229477778de43e052dbfea38f8c212c47f50/userdata/pidfile -n ceph-00fdae1b-7fad-5f1b-8734-ba4d9298a6de-mds-cephfs-compute-0-zwnyoz --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/c65ab07d188ff0da2857402181db229477778de43e052dbfea38f8c212c47f50 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/c65ab07d188ff0da2857402181db229477778de43e052dbfea38f8c212c47f50/userdata/oci-log --conmon-pidfile /run/ceph-00fdae1b-7fad-5f1b-8734-ba4d9298a6de@mds.cephfs.compute-0.zwnyoz.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg c65ab07d188ff0da2857402181db229477778de43e052dbfea38f8c212c47f50
             ├─ceph-00fdae1b-7fad-5f1b-8734-ba4d9298a6de@mgr.compute-0.qsherl.service
             │ ├─libpod-payload-4d78867918d5dd4dba36f3a6dc6db4122866221ae6fbf48a37819c5ae84e8283
             │ │ ├─75198 /run/podman-init -- /usr/bin/ceph-mgr -n mgr.compute-0.qsherl -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ └─75200 /usr/bin/ceph-mgr -n mgr.compute-0.qsherl -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ └─runtime
             │   └─75196 /usr/bin/conmon --api-version 1 -c 4d78867918d5dd4dba36f3a6dc6db4122866221ae6fbf48a37819c5ae84e8283 -u 4d78867918d5dd4dba36f3a6dc6db4122866221ae6fbf48a37819c5ae84e8283 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/4d78867918d5dd4dba36f3a6dc6db4122866221ae6fbf48a37819c5ae84e8283/userdata -p /run/containers/storage/overlay-containers/4d78867918d5dd4dba36f3a6dc6db4122866221ae6fbf48a37819c5ae84e8283/userdata/pidfile -n ceph-00fdae1b-7fad-5f1b-8734-ba4d9298a6de-mgr-compute-0-qsherl --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/4d78867918d5dd4dba36f3a6dc6db4122866221ae6fbf48a37819c5ae84e8283 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/4d78867918d5dd4dba36f3a6dc6db4122866221ae6fbf48a37819c5ae84e8283/userdata/oci-log --conmon-pidfile /run/ceph-00fdae1b-7fad-5f1b-8734-ba4d9298a6de@mgr.compute-0.qsherl.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 4d78867918d5dd4dba36f3a6dc6db4122866221ae6fbf48a37819c5ae84e8283
             ├─ceph-00fdae1b-7fad-5f1b-8734-ba4d9298a6de@mon.compute-0.service
             │ ├─libpod-payload-4656a144eefb5f6bf26a1e6dd6df77bfd9faa9dce17b22c42a655c087745995a
             │ │ ├─74926 /run/podman-init -- /usr/bin/ceph-mon -n mon.compute-0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false
             │ │ └─74928 /usr/bin/ceph-mon -n mon.compute-0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false
             │ └─runtime
             │   └─74924 /usr/bin/conmon --api-version 1 -c 4656a144eefb5f6bf26a1e6dd6df77bfd9faa9dce17b22c42a655c087745995a -u 4656a144eefb5f6bf26a1e6dd6df77bfd9faa9dce17b22c42a655c087745995a -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/4656a144eefb5f6bf26a1e6dd6df77bfd9faa9dce17b22c42a655c087745995a/userdata -p /run/containers/storage/overlay-containers/4656a144eefb5f6bf26a1e6dd6df77bfd9faa9dce17b22c42a655c087745995a/userdata/pidfile -n ceph-00fdae1b-7fad-5f1b-8734-ba4d9298a6de-mon-compute-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/4656a144eefb5f6bf26a1e6dd6df77bfd9faa9dce17b22c42a655c087745995a --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/4656a144eefb5f6bf26a1e6dd6df77bfd9faa9dce17b22c42a655c087745995a/userdata/oci-log --conmon-pidfile /run/ceph-00fdae1b-7fad-5f1b-8734-ba4d9298a6de@mon.compute-0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 4656a144eefb5f6bf26a1e6dd6df77bfd9faa9dce17b22c42a655c087745995a
             ├─ceph-00fdae1b-7fad-5f1b-8734-ba4d9298a6de@osd.0.service
             │ ├─libpod-payload-5e169e1385f98bf8a58844e41c31305318f100b9850e1f4defaf308d2b1dfde7
             │ │ ├─85138 /run/podman-init -- /usr/bin/ceph-osd -n osd.0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --osd-objectstore=bluestore
             │ │ └─85140 /usr/bin/ceph-osd -n osd.0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --osd-objectstore=bluestore
             │ └─runtime
             │   └─85136 /usr/bin/conmon --api-version 1 -c 5e169e1385f98bf8a58844e41c31305318f100b9850e1f4defaf308d2b1dfde7 -u 5e169e1385f98bf8a58844e41c31305318f100b9850e1f4defaf308d2b1dfde7 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/5e169e1385f98bf8a58844e41c31305318f100b9850e1f4defaf308d2b1dfde7/userdata -p /run/containers/storage/overlay-containers/5e169e1385f98bf8a58844e41c31305318f100b9850e1f4defaf308d2b1dfde7/userdata/pidfile -n ceph-00fdae1b-7fad-5f1b-8734-ba4d9298a6de-osd-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/5e169e1385f98bf8a58844e41c31305318f100b9850e1f4defaf308d2b1dfde7 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/5e169e1385f98bf8a58844e41c31305318f100b9850e1f4defaf308d2b1dfde7/userdata/oci-log --conmon-pidfile /run/ceph-00fdae1b-7fad-5f1b-8734-ba4d9298a6de@osd.0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 5e169e1385f98bf8a58844e41c31305318f100b9850e1f4defaf308d2b1dfde7
             ├─ceph-00fdae1b-7fad-5f1b-8734-ba4d9298a6de@osd.1.service
             │ ├─libpod-payload-c0e0c03f97b0b2b02555f476cf4558ef6f7c2cd731350718d9262d59a0b7be03
             │ │ ├─86140 /run/podman-init -- /usr/bin/ceph-osd -n osd.1 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --osd-objectstore=bluestore
             │ │ └─86142 /usr/bin/ceph-osd -n osd.1 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --osd-objectstore=bluestore
             │ └─runtime
             │   └─86138 /usr/bin/conmon --api-version 1 -c c0e0c03f97b0b2b02555f476cf4558ef6f7c2cd731350718d9262d59a0b7be03 -u c0e0c03f97b0b2b02555f476cf4558ef6f7c2cd731350718d9262d59a0b7be03 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/c0e0c03f97b0b2b02555f476cf4558ef6f7c2cd731350718d9262d59a0b7be03/userdata -p /run/containers/storage/overlay-containers/c0e0c03f97b0b2b02555f476cf4558ef6f7c2cd731350718d9262d59a0b7be03/userdata/pidfile -n ceph-00fdae1b-7fad-5f1b-8734-ba4d9298a6de-osd-1 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/c0e0c03f97b0b2b02555f476cf4558ef6f7c2cd731350718d9262d59a0b7be03 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/c0e0c03f97b0b2b02555f476cf4558ef6f7c2cd731350718d9262d59a0b7be03/userdata/oci-log --conmon-pidfile /run/ceph-00fdae1b-7fad-5f1b-8734-ba4d9298a6de@osd.1.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg c0e0c03f97b0b2b02555f476cf4558ef6f7c2cd731350718d9262d59a0b7be03
             ├─ceph-00fdae1b-7fad-5f1b-8734-ba4d9298a6de@osd.2.service
             │ ├─libpod-payload-bb7cd2f636f6ef6017e815fa4141bf45b494cfb9652486980f5606492505725a
             │ │ ├─87153 /run/podman-init -- /usr/bin/ceph-osd -n osd.2 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --osd-objectstore=bluestore
             │ │ └─87155 /usr/bin/ceph-osd -n osd.2 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --osd-objectstore=bluestore
             │ └─runtime
             │   └─87151 /usr/bin/conmon --api-version 1 -c bb7cd2f636f6ef6017e815fa4141bf45b494cfb9652486980f5606492505725a -u bb7cd2f636f6ef6017e815fa4141bf45b494cfb9652486980f5606492505725a -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/bb7cd2f636f6ef6017e815fa4141bf45b494cfb9652486980f5606492505725a/userdata -p /run/containers/storage/overlay-containers/bb7cd2f636f6ef6017e815fa4141bf45b494cfb9652486980f5606492505725a/userdata/pidfile -n ceph-00fdae1b-7fad-5f1b-8734-ba4d9298a6de-osd-2 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/bb7cd2f636f6ef6017e815fa4141bf45b494cfb9652486980f5606492505725a --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/bb7cd2f636f6ef6017e815fa4141bf45b494cfb9652486980f5606492505725a/userdata/oci-log --conmon-pidfile /run/ceph-00fdae1b-7fad-5f1b-8734-ba4d9298a6de@osd.2.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg bb7cd2f636f6ef6017e815fa4141bf45b494cfb9652486980f5606492505725a
             └─ceph-00fdae1b-7fad-5f1b-8734-ba4d9298a6de@rgw.rgw.compute-0.kikquh.service
               ├─libpod-payload-69ac193e949f4ec1c85ec7f6eeac603563ef58b890ae60370ce1d5f216a4080c
               │ ├─93474 /run/podman-init -- /usr/bin/radosgw -n client.rgw.rgw.compute-0.kikquh -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --stop-timeout=120
               │ └─93487 /usr/bin/radosgw -n client.rgw.rgw.compute-0.kikquh -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --stop-timeout=120
               └─runtime
                 └─93465 /usr/bin/conmon --api-version 1 -c 69ac193e949f4ec1c85ec7f6eeac603563ef58b890ae60370ce1d5f216a4080c -u 69ac193e949f4ec1c85ec7f6eeac603563ef58b890ae60370ce1d5f216a4080c -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/69ac193e949f4ec1c85ec7f6eeac603563ef58b890ae60370ce1d5f216a4080c/userdata -p /run/containers/storage/overlay-containers/69ac193e949f4ec1c85ec7f6eeac603563ef58b890ae60370ce1d5f216a4080c/userdata/pidfile -n ceph-00fdae1b-7fad-5f1b-8734-ba4d9298a6de-rgw-rgw-compute-0-kikquh --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/69ac193e949f4ec1c85ec7f6eeac603563ef58b890ae60370ce1d5f216a4080c --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/69ac193e949f4ec1c85ec7f6eeac603563ef58b890ae60370ce1d5f216a4080c/userdata/oci-log --conmon-pidfile /run/ceph-00fdae1b-7fad-5f1b-8734-ba4d9298a6de@rgw.rgw.compute-0.kikquh.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 69ac193e949f4ec1c85ec7f6eeac603563ef58b890ae60370ce1d5f216a4080c

Dec 13 07:36:50 compute-0 ceph-mon[74928]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd dump", "format": "json-pretty"} v 0)
Dec 13 07:36:50 compute-0 ceph-mon[74928]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/2657197604' entity='client.admin' cmd={"prefix": "osd dump", "format": "json-pretty"} : dispatch
Dec 13 07:36:50 compute-0 ceph-mgr[75200]: log_channel(cluster) log [DBG] : pgmap v802: 321 pgs: 321 active+clean; 461 KiB data, 136 MiB used, 60 GiB / 60 GiB avail
Dec 13 07:36:50 compute-0 ceph-mon[74928]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd numa-status", "format": "json-pretty"} v 0)
Dec 13 07:36:50 compute-0 ceph-mon[74928]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/1044118940' entity='client.admin' cmd={"prefix": "osd numa-status", "format": "json-pretty"} : dispatch
Dec 13 07:36:50 compute-0 ceph-mon[74928]: from='client.14616 -' entity='client.admin' cmd=[{"prefix": "osd df", "target": ["mon-mgr", ""], "format": "json-pretty"}]: dispatch
Dec 13 07:36:50 compute-0 ceph-mon[74928]: from='client.? 192.168.122.100:0/2657197604' entity='client.admin' cmd={"prefix": "osd dump", "format": "json-pretty"} : dispatch
Dec 13 07:36:50 compute-0 ceph-mon[74928]: pgmap v802: 321 pgs: 321 active+clean; 461 KiB data, 136 MiB used, 60 GiB / 60 GiB avail
Dec 13 07:36:50 compute-0 ceph-mon[74928]: from='client.? 192.168.122.100:0/1044118940' entity='client.admin' cmd={"prefix": "osd numa-status", "format": "json-pretty"} : dispatch
Dec 13 07:36:51 compute-0 ceph-mgr[75200]: log_channel(audit) log [DBG] : from='client.14622 -' entity='client.admin' cmd=[{"prefix": "osd perf", "target": ["mon-mgr", ""], "format": "json-pretty"}]: dispatch

● system-dbus\x2d:1.1\x2dorg.fedoraproject.SetroubleshootPrivileged.slice - Slice /system/dbus-:1.1-org.fedoraproject.SetroubleshootPrivileged
     Loaded: loaded
     Active: active since Sat 2025-12-13 07:26:41 UTC; 10min ago
      Until: Sat 2025-12-13 07:26:41 UTC; 10min ago
         IO: 8.0K read, 0B written
      Tasks: 0
     Memory: 12.0K (peak: 58.7M)
        CPU: 753ms
     CGroup: /system.slice/system-dbus\x2d:1.1\x2dorg.fedoraproject.SetroubleshootPrivileged.slice

Dec 13 07:26:41 compute-0 systemd[1]: Created slice Slice /system/dbus-:1.1-org.fedoraproject.SetroubleshootPrivileged.

● system-getty.slice - Slice /system/getty
     Loaded: loaded
     Active: active since Sat 2025-12-13 06:42:10 UTC; 54min ago
      Until: Sat 2025-12-13 06:42:10 UTC; 54min ago
         IO: 0B read, 0B written
      Tasks: 1
     Memory: 232.0K (peak: 456.0K)
        CPU: 6ms
     CGroup: /system.slice/system-getty.slice
             └─getty@tty1.service
               └─978 /sbin/agetty -o "-p -- \\u" --noclear - linux

● system-modprobe.slice - Slice /system/modprobe
     Loaded: loaded
     Active: active since Sat 2025-12-13 06:42:08 UTC; 54min ago
      Until: Sat 2025-12-13 06:42:08 UTC; 54min ago
         IO: 0B read, 0B written
      Tasks: 0
     Memory: 872.0K (peak: 11.4M)
        CPU: 110ms
     CGroup: /system.slice/system-modprobe.slice

Dec 13 06:42:08 localhost systemd[1]: Created slice Slice /system/modprobe.

● system-serial\x2dgetty.slice - Slice /system/serial-getty
     Loaded: loaded
     Active: active since Sat 2025-12-13 06:42:10 UTC; 54min ago
      Until: Sat 2025-12-13 06:42:10 UTC; 54min ago
         IO: 0B read, 0B written
      Tasks: 1
     Memory: 308.0K (peak: 564.0K)
        CPU: 6ms
     CGroup: /system.slice/system-serial\x2dgetty.slice
             └─serial-getty@ttyS0.service
               └─984 /sbin/agetty -o "-p -- \\u" --keep-baud 115200,57600,38400,9600 - vt220

● system-sshd\x2dkeygen.slice - Slice /system/sshd-keygen
     Loaded: loaded
     Active: active since Sat 2025-12-13 06:42:10 UTC; 54min ago
      Until: Sat 2025-12-13 06:42:10 UTC; 54min ago
         IO: 0B read, 0B written
      Tasks: 0
     Memory: 0B (peak: 0B)
        CPU: 0
     CGroup: /system.slice/system-sshd\x2dkeygen.slice

● system.slice - System Slice
     Loaded: loaded
     Active: active since Sat 2025-12-13 06:42:07 UTC; 54min ago
      Until: Sat 2025-12-13 06:42:07 UTC; 54min ago
       Docs: man:systemd.special(7)
         IO: 274.2M read, 4.0G written
      Tasks: 1101
     Memory: 2.6G (peak: 2.8G)
        CPU: 2min 48.476s
     CGroup: /system.slice
             ├─NetworkManager.service
             │ └─48896 /usr/sbin/NetworkManager --no-daemon
             ├─auditd.service
             │ ├─673 /sbin/auditd
             │ └─675 /usr/sbin/sedispatch
             ├─chronyd.service
             │ └─58459 /usr/sbin/chronyd -F 2
             ├─crond.service
             │ ├─  968 /usr/sbin/crond -n
             │ └─30916 /usr/sbin/anacron -s
             ├─dbus-broker.service
             │ ├─727 /usr/bin/dbus-broker-launch --scope system --audit
             │ └─734 dbus-broker --log 4 --controller 9 --machine-id 64f1d6692049d8be5e8b216cc203502c --max-bytes 536870912 --max-fds 4096 --max-matches 131072 --audit
             ├─edpm_multipathd.service
             │ └─223786 /usr/bin/conmon --api-version 1 -c f696b337a701eeb12548640e55e827503e894b0e602e4d8080c3212ba22210e6 -u f696b337a701eeb12548640e55e827503e894b0e602e4d8080c3212ba22210e6 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/f696b337a701eeb12548640e55e827503e894b0e602e4d8080c3212ba22210e6/userdata -p /run/containers/storage/overlay-containers/f696b337a701eeb12548640e55e827503e894b0e602e4d8080c3212ba22210e6/userdata/pidfile -n multipathd --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/f696b337a701eeb12548640e55e827503e894b0e602e4d8080c3212ba22210e6 --full-attach -s -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/f696b337a701eeb12548640e55e827503e894b0e602e4d8080c3212ba22210e6/userdata/oci-log --conmon-pidfile /run/multipathd.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg f696b337a701eeb12548640e55e827503e894b0e602e4d8080c3212ba22210e6
             ├─edpm_nova_compute.service
             │ └─241222 /usr/bin/conmon --api-version 1 -c 85647c0fe69de86f6774be6672640c6d932467db58f079f97876cd7dae0a1306 -u 85647c0fe69de86f6774be6672640c6d932467db58f079f97876cd7dae0a1306 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/85647c0fe69de86f6774be6672640c6d932467db58f079f97876cd7dae0a1306/userdata -p /run/containers/storage/overlay-containers/85647c0fe69de86f6774be6672640c6d932467db58f079f97876cd7dae0a1306/userdata/pidfile -n nova_compute --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/85647c0fe69de86f6774be6672640c6d932467db58f079f97876cd7dae0a1306 --full-attach -s -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/85647c0fe69de86f6774be6672640c6d932467db58f079f97876cd7dae0a1306/userdata/oci-log --conmon-pidfile /run/nova_compute.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg 85647c0fe69de86f6774be6672640c6d932467db58f079f97876cd7dae0a1306
             ├─edpm_ovn_controller.service
             │ └─144647 /usr/bin/conmon --api-version 1 -c d4b07d1867f144077f7c5c5cc5ae5c3e4d24058947898d6fee77240ec3efb8ed -u d4b07d1867f144077f7c5c5cc5ae5c3e4d24058947898d6fee77240ec3efb8ed -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/d4b07d1867f144077f7c5c5cc5ae5c3e4d24058947898d6fee77240ec3efb8ed/userdata -p /run/containers/storage/overlay-containers/d4b07d1867f144077f7c5c5cc5ae5c3e4d24058947898d6fee77240ec3efb8ed/userdata/pidfile -n ovn_controller --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/d4b07d1867f144077f7c5c5cc5ae5c3e4d24058947898d6fee77240ec3efb8ed --full-attach -s -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/d4b07d1867f144077f7c5c5cc5ae5c3e4d24058947898d6fee77240ec3efb8ed/userdata/oci-log --conmon-pidfile /run/ovn_controller.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg d4b07d1867f144077f7c5c5cc5ae5c3e4d24058947898d6fee77240ec3efb8ed
             ├─edpm_ovn_metadata_agent.service
             │ └─154116 /usr/bin/conmon --api-version 1 -c 1929b765dac802841eb5d5f56597ea7bfd15768bcf514c3ef50eb60bf1b13d07 -u 1929b765dac802841eb5d5f56597ea7bfd15768bcf514c3ef50eb60bf1b13d07 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/1929b765dac802841eb5d5f56597ea7bfd15768bcf514c3ef50eb60bf1b13d07/userdata -p /run/containers/storage/overlay-containers/1929b765dac802841eb5d5f56597ea7bfd15768bcf514c3ef50eb60bf1b13d07/userdata/pidfile -n ovn_metadata_agent --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/1929b765dac802841eb5d5f56597ea7bfd15768bcf514c3ef50eb60bf1b13d07 --full-attach -s -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/1929b765dac802841eb5d5f56597ea7bfd15768bcf514c3ef50eb60bf1b13d07/userdata/oci-log --conmon-pidfile /run/ovn_metadata_agent.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg 1929b765dac802841eb5d5f56597ea7bfd15768bcf514c3ef50eb60bf1b13d07
             ├─gssproxy.service
             │ └─827 /usr/sbin/gssproxy -D
             ├─irqbalance.service
             │ └─740 /usr/sbin/irqbalance
             ├─iscsid.service
             │ └─226492 /usr/sbin/iscsid -f
             ├─ovs-vswitchd.service
             │ └─47201 ovs-vswitchd unix:/var/run/openvswitch/db.sock -vconsole:emer -vsyslog:err -vfile:info --mlockall --user openvswitch:hugetlbfs --no-chdir --log-file=/var/log/openvswitch/ovs-vswitchd.log --pidfile=/var/run/openvswitch/ovs-vswitchd.pid --detach
             ├─ovsdb-server.service
             │ └─47120 ovsdb-server /etc/openvswitch/conf.db -vconsole:emer -vsyslog:err -vfile:info --remote=punix:/var/run/openvswitch/db.sock --private-key=db:Open_vSwitch,SSL,private_key --certificate=db:Open_vSwitch,SSL,certificate --bootstrap-ca-cert=db:Open_vSwitch,SSL,ca_cert --user openvswitch:hugetlbfs --no-chdir --log-file=/var/log/openvswitch/ovsdb-server.log --pidfile=/var/run/openvswitch/ovsdb-server.pid --detach
             ├─polkit.service
             │ └─43388 /usr/lib/polkit-1/polkitd --no-debug
             ├─rpcbind.service
             │ └─671 /usr/bin/rpcbind -w -f
             ├─rsyslog.service
             │ └─962 /usr/sbin/rsyslogd -n
             ├─sshd.service
             │ └─179217 "sshd: /usr/sbin/sshd -D [listener] 0 of 10-100 startups"
             ├─system-ceph\x2d00fdae1b\x2d7fad\x2d5f1b\x2d8734\x2dba4d9298a6de.slice
             │ ├─ceph-00fdae1b-7fad-5f1b-8734-ba4d9298a6de@crash.compute-0.service
             │ │ ├─libpod-payload-8e6a4f61ea03b0deb3b22f2359f4e4ace46ba5e323139a1f6359205360a9b0cc
             │ │ │ ├─79703 /run/podman-init -- /usr/bin/ceph-crash -n client.crash.compute-0
             │ │ │ └─79705 /usr/bin/python3 -s /usr/bin/ceph-crash -n client.crash.compute-0
             │ │ └─runtime
             │ │   └─79701 /usr/bin/conmon --api-version 1 -c 8e6a4f61ea03b0deb3b22f2359f4e4ace46ba5e323139a1f6359205360a9b0cc -u 8e6a4f61ea03b0deb3b22f2359f4e4ace46ba5e323139a1f6359205360a9b0cc -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/8e6a4f61ea03b0deb3b22f2359f4e4ace46ba5e323139a1f6359205360a9b0cc/userdata -p /run/containers/storage/overlay-containers/8e6a4f61ea03b0deb3b22f2359f4e4ace46ba5e323139a1f6359205360a9b0cc/userdata/pidfile -n ceph-00fdae1b-7fad-5f1b-8734-ba4d9298a6de-crash-compute-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/8e6a4f61ea03b0deb3b22f2359f4e4ace46ba5e323139a1f6359205360a9b0cc --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/8e6a4f61ea03b0deb3b22f2359f4e4ace46ba5e323139a1f6359205360a9b0cc/userdata/oci-log --conmon-pidfile /run/ceph-00fdae1b-7fad-5f1b-8734-ba4d9298a6de@crash.compute-0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 8e6a4f61ea03b0deb3b22f2359f4e4ace46ba5e323139a1f6359205360a9b0cc
             │ ├─ceph-00fdae1b-7fad-5f1b-8734-ba4d9298a6de@mds.cephfs.compute-0.zwnyoz.service
             │ │ ├─libpod-payload-c65ab07d188ff0da2857402181db229477778de43e052dbfea38f8c212c47f50
             │ │ │ ├─93862 /run/podman-init -- /usr/bin/ceph-mds -n mds.cephfs.compute-0.zwnyoz -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ │ └─93864 /usr/bin/ceph-mds -n mds.cephfs.compute-0.zwnyoz -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ └─runtime
             │ │   └─93860 /usr/bin/conmon --api-version 1 -c c65ab07d188ff0da2857402181db229477778de43e052dbfea38f8c212c47f50 -u c65ab07d188ff0da2857402181db229477778de43e052dbfea38f8c212c47f50 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/c65ab07d188ff0da2857402181db229477778de43e052dbfea38f8c212c47f50/userdata -p /run/containers/storage/overlay-containers/c65ab07d188ff0da2857402181db229477778de43e052dbfea38f8c212c47f50/userdata/pidfile -n ceph-00fdae1b-7fad-5f1b-8734-ba4d9298a6de-mds-cephfs-compute-0-zwnyoz --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/c65ab07d188ff0da2857402181db229477778de43e052dbfea38f8c212c47f50 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/c65ab07d188ff0da2857402181db229477778de43e052dbfea38f8c212c47f50/userdata/oci-log --conmon-pidfile /run/ceph-00fdae1b-7fad-5f1b-8734-ba4d9298a6de@mds.cephfs.compute-0.zwnyoz.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg c65ab07d188ff0da2857402181db229477778de43e052dbfea38f8c212c47f50
             │ ├─ceph-00fdae1b-7fad-5f1b-8734-ba4d9298a6de@mgr.compute-0.qsherl.service
             │ │ ├─libpod-payload-4d78867918d5dd4dba36f3a6dc6db4122866221ae6fbf48a37819c5ae84e8283
             │ │ │ ├─75198 /run/podman-init -- /usr/bin/ceph-mgr -n mgr.compute-0.qsherl -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ │ └─75200 /usr/bin/ceph-mgr -n mgr.compute-0.qsherl -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ └─runtime
             │ │   └─75196 /usr/bin/conmon --api-version 1 -c 4d78867918d5dd4dba36f3a6dc6db4122866221ae6fbf48a37819c5ae84e8283 -u 4d78867918d5dd4dba36f3a6dc6db4122866221ae6fbf48a37819c5ae84e8283 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/4d78867918d5dd4dba36f3a6dc6db4122866221ae6fbf48a37819c5ae84e8283/userdata -p /run/containers/storage/overlay-containers/4d78867918d5dd4dba36f3a6dc6db4122866221ae6fbf48a37819c5ae84e8283/userdata/pidfile -n ceph-00fdae1b-7fad-5f1b-8734-ba4d9298a6de-mgr-compute-0-qsherl --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/4d78867918d5dd4dba36f3a6dc6db4122866221ae6fbf48a37819c5ae84e8283 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/4d78867918d5dd4dba36f3a6dc6db4122866221ae6fbf48a37819c5ae84e8283/userdata/oci-log --conmon-pidfile /run/ceph-00fdae1b-7fad-5f1b-8734-ba4d9298a6de@mgr.compute-0.qsherl.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 4d78867918d5dd4dba36f3a6dc6db4122866221ae6fbf48a37819c5ae84e8283
             │ ├─ceph-00fdae1b-7fad-5f1b-8734-ba4d9298a6de@mon.compute-0.service
             │ │ ├─libpod-payload-4656a144eefb5f6bf26a1e6dd6df77bfd9faa9dce17b22c42a655c087745995a
             │ │ │ ├─74926 /run/podman-init -- /usr/bin/ceph-mon -n mon.compute-0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false
             │ │ │ └─74928 /usr/bin/ceph-mon -n mon.compute-0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false
             │ │ └─runtime
             │ │   └─74924 /usr/bin/conmon --api-version 1 -c 4656a144eefb5f6bf26a1e6dd6df77bfd9faa9dce17b22c42a655c087745995a -u 4656a144eefb5f6bf26a1e6dd6df77bfd9faa9dce17b22c42a655c087745995a -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/4656a144eefb5f6bf26a1e6dd6df77bfd9faa9dce17b22c42a655c087745995a/userdata -p /run/containers/storage/overlay-containers/4656a144eefb5f6bf26a1e6dd6df77bfd9faa9dce17b22c42a655c087745995a/userdata/pidfile -n ceph-00fdae1b-7fad-5f1b-8734-ba4d9298a6de-mon-compute-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/4656a144eefb5f6bf26a1e6dd6df77bfd9faa9dce17b22c42a655c087745995a --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/4656a144eefb5f6bf26a1e6dd6df77bfd9faa9dce17b22c42a655c087745995a/userdata/oci-log --conmon-pidfile /run/ceph-00fdae1b-7fad-5f1b-8734-ba4d9298a6de@mon.compute-0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 4656a144eefb5f6bf26a1e6dd6df77bfd9faa9dce17b22c42a655c087745995a
             │ ├─ceph-00fdae1b-7fad-5f1b-8734-ba4d9298a6de@osd.0.service
             │ │ ├─libpod-payload-5e169e1385f98bf8a58844e41c31305318f100b9850e1f4defaf308d2b1dfde7
             │ │ │ ├─85138 /run/podman-init -- /usr/bin/ceph-osd -n osd.0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --osd-objectstore=bluestore
             │ │ │ └─85140 /usr/bin/ceph-osd -n osd.0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --osd-objectstore=bluestore
             │ │ └─runtime
             │ │   └─85136 /usr/bin/conmon --api-version 1 -c 5e169e1385f98bf8a58844e41c31305318f100b9850e1f4defaf308d2b1dfde7 -u 5e169e1385f98bf8a58844e41c31305318f100b9850e1f4defaf308d2b1dfde7 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/5e169e1385f98bf8a58844e41c31305318f100b9850e1f4defaf308d2b1dfde7/userdata -p /run/containers/storage/overlay-containers/5e169e1385f98bf8a58844e41c31305318f100b9850e1f4defaf308d2b1dfde7/userdata/pidfile -n ceph-00fdae1b-7fad-5f1b-8734-ba4d9298a6de-osd-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/5e169e1385f98bf8a58844e41c31305318f100b9850e1f4defaf308d2b1dfde7 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/5e169e1385f98bf8a58844e41c31305318f100b9850e1f4defaf308d2b1dfde7/userdata/oci-log --conmon-pidfile /run/ceph-00fdae1b-7fad-5f1b-8734-ba4d9298a6de@osd.0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 5e169e1385f98bf8a58844e41c31305318f100b9850e1f4defaf308d2b1dfde7
             │ ├─ceph-00fdae1b-7fad-5f1b-8734-ba4d9298a6de@osd.1.service
             │ │ ├─libpod-payload-c0e0c03f97b0b2b02555f476cf4558ef6f7c2cd731350718d9262d59a0b7be03
             │ │ │ ├─86140 /run/podman-init -- /usr/bin/ceph-osd -n osd.1 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --osd-objectstore=bluestore
             │ │ │ └─86142 /usr/bin/ceph-osd -n osd.1 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --osd-objectstore=bluestore
             │ │ └─runtime
             │ │   └─86138 /usr/bin/conmon --api-version 1 -c c0e0c03f97b0b2b02555f476cf4558ef6f7c2cd731350718d9262d59a0b7be03 -u c0e0c03f97b0b2b02555f476cf4558ef6f7c2cd731350718d9262d59a0b7be03 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/c0e0c03f97b0b2b02555f476cf4558ef6f7c2cd731350718d9262d59a0b7be03/userdata -p /run/containers/storage/overlay-containers/c0e0c03f97b0b2b02555f476cf4558ef6f7c2cd731350718d9262d59a0b7be03/userdata/pidfile -n ceph-00fdae1b-7fad-5f1b-8734-ba4d9298a6de-osd-1 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/c0e0c03f97b0b2b02555f476cf4558ef6f7c2cd731350718d9262d59a0b7be03 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/c0e0c03f97b0b2b02555f476cf4558ef6f7c2cd731350718d9262d59a0b7be03/userdata/oci-log --conmon-pidfile /run/ceph-00fdae1b-7fad-5f1b-8734-ba4d9298a6de@osd.1.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg c0e0c03f97b0b2b02555f476cf4558ef6f7c2cd731350718d9262d59a0b7be03
             │ ├─ceph-00fdae1b-7fad-5f1b-8734-ba4d9298a6de@osd.2.service
             │ │ ├─libpod-payload-bb7cd2f636f6ef6017e815fa4141bf45b494cfb9652486980f5606492505725a
             │ │ │ ├─87153 /run/podman-init -- /usr/bin/ceph-osd -n osd.2 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --osd-objectstore=bluestore
             │ │ │ └─87155 /usr/bin/ceph-osd -n osd.2 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --osd-objectstore=bluestore
             │ │ └─runtime
             │ │   └─87151 /usr/bin/conmon --api-version 1 -c bb7cd2f636f6ef6017e815fa4141bf45b494cfb9652486980f5606492505725a -u bb7cd2f636f6ef6017e815fa4141bf45b494cfb9652486980f5606492505725a -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/bb7cd2f636f6ef6017e815fa4141bf45b494cfb9652486980f5606492505725a/userdata -p /run/containers/storage/overlay-containers/bb7cd2f636f6ef6017e815fa4141bf45b494cfb9652486980f5606492505725a/userdata/pidfile -n ceph-00fdae1b-7fad-5f1b-8734-ba4d9298a6de-osd-2 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/bb7cd2f636f6ef6017e815fa4141bf45b494cfb9652486980f5606492505725a --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/bb7cd2f636f6ef6017e815fa4141bf45b494cfb9652486980f5606492505725a/userdata/oci-log --conmon-pidfile /run/ceph-00fdae1b-7fad-5f1b-8734-ba4d9298a6de@osd.2.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg bb7cd2f636f6ef6017e815fa4141bf45b494cfb9652486980f5606492505725a
             │ └─ceph-00fdae1b-7fad-5f1b-8734-ba4d9298a6de@rgw.rgw.compute-0.kikquh.service
             │   ├─libpod-payload-69ac193e949f4ec1c85ec7f6eeac603563ef58b890ae60370ce1d5f216a4080c
             │   │ ├─93474 /run/podman-init -- /usr/bin/radosgw -n client.rgw.rgw.compute-0.kikquh -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --stop-timeout=120
             │   │ └─93487 /usr/bin/radosgw -n client.rgw.rgw.compute-0.kikquh -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --stop-timeout=120
             │   └─runtime
             │     └─93465 /usr/bin/conmon --api-version 1 -c 69ac193e949f4ec1c85ec7f6eeac603563ef58b890ae60370ce1d5f216a4080c -u 69ac193e949f4ec1c85ec7f6eeac603563ef58b890ae60370ce1d5f216a4080c -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/69ac193e949f4ec1c85ec7f6eeac603563ef58b890ae60370ce1d5f216a4080c/userdata -p /run/containers/storage/overlay-containers/69ac193e949f4ec1c85ec7f6eeac603563ef58b890ae60370ce1d5f216a4080c/userdata/pidfile -n ceph-00fdae1b-7fad-5f1b-8734-ba4d9298a6de-rgw-rgw-compute-0-kikquh --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/69ac193e949f4ec1c85ec7f6eeac603563ef58b890ae60370ce1d5f216a4080c --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/69ac193e949f4ec1c85ec7f6eeac603563ef58b890ae60370ce1d5f216a4080c/userdata/oci-log --conmon-pidfile /run/ceph-00fdae1b-7fad-5f1b-8734-ba4d9298a6de@rgw.rgw.compute-0.kikquh.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 69ac193e949f4ec1c85ec7f6eeac603563ef58b890ae60370ce1d5f216a4080c
             ├─system-getty.slice
             │ └─getty@tty1.service
             │   └─978 /sbin/agetty -o "-p -- \\u" --noclear - linux
             ├─system-serial\x2dgetty.slice
             │ └─serial-getty@ttyS0.service
             │   └─984 /sbin/agetty -o "-p -- \\u" --keep-baud 115200,57600,38400,9600 - vt220
             ├─systemd-hostnamed.service
             │ └─249805 /usr/lib/systemd/systemd-hostnamed
             ├─systemd-journald.service
             │ └─650 /usr/lib/systemd/systemd-journald
             ├─systemd-logind.service
             │ └─745 /usr/lib/systemd/systemd-logind
             ├─systemd-machined.service
             │ └─204630 /usr/lib/systemd/systemd-machined
             ├─systemd-udevd.service
             │ └─udev
             │   └─701 /usr/lib/systemd/systemd-udevd
             ├─tuned.service
             │ ├─105788 /usr/bin/python3 -Es /usr/sbin/tuned -l -P
             │ ├─253212 /usr/bin/sh - /usr/sbin/virt-what
             │ ├─253254 
             │ └─253255 "[grep]"
             ├─virtlogd.service
             │ └─204000 /usr/sbin/virtlogd
             ├─virtnodedevd.service
             │ └─240159 /usr/sbin/virtnodedevd --timeout 120
             └─virtqemud.service
               └─241006 /usr/sbin/virtqemud --timeout 120

Dec 13 07:36:37 compute-0 lvm[250082]: VG ceph_vg2 finished
Dec 13 07:36:37 compute-0 lvm[250077]: PV /dev/loop3 online, VG ceph_vg0 is complete.
Dec 13 07:36:37 compute-0 lvm[250077]: VG ceph_vg0 finished
Dec 13 07:36:37 compute-0 lvm[250081]: PV /dev/loop4 online, VG ceph_vg1 is complete.
Dec 13 07:36:37 compute-0 lvm[250081]: VG ceph_vg1 finished
Dec 13 07:36:37 compute-0 podman[250063]: 2025-12-13 07:36:37.920364168 +0000 UTC m=+0.179607024 container health_status d4b07d1867f144077f7c5c5cc5ae5c3e4d24058947898d6fee77240ec3efb8ed (image=quay.io/podified-antelope-centos9/openstack-ovn-controller@sha256:fa24ce4aa285e3632c86a53e8d0385d4c788d049da42dd06570ad9d44aae00de, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, config_id=ovn_controller, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, tcib_build_tag=c3923531bcda0b0811b2d5053f189beb, container_name=ovn_controller, managed_by=edpm_ansible, org.label-schema.build-date=20251202, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, tcib_managed=true, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller@sha256:fa24ce4aa285e3632c86a53e8d0385d4c788d049da42dd06570ad9d44aae00de', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team)
Dec 13 07:36:41 compute-0 ovn_metadata_agent[154116]: 2025-12-13 07:36:41.646 154121 DEBUG oslo_concurrency.lockutils [-] Acquiring lock "_check_child_processes" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404[00m
Dec 13 07:36:41 compute-0 ovn_metadata_agent[154116]: 2025-12-13 07:36:41.647 154121 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" acquired by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409[00m
Dec 13 07:36:41 compute-0 ovn_metadata_agent[154116]: 2025-12-13 07:36:41.647 154121 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" "released" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: held 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423[00m
Dec 13 07:36:48 compute-0 podman[252940]: 2025-12-13 07:36:48.720964148 +0000 UTC m=+0.056178043 container health_status f696b337a701eeb12548640e55e827503e894b0e602e4d8080c3212ba22210e6 (image=quay.io/podified-antelope-centos9/openstack-multipathd@sha256:df38dbd6b3eccec2abaa8e3618a385405ccec1b73ae8c3573a138b0c961ed31f, name=multipathd, health_status=healthy, health_failing_streak=0, health_log=, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/multipathd', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-multipathd@sha256:df38dbd6b3eccec2abaa8e3618a385405ccec1b73ae8c3573a138b0c961ed31f', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/multipathd.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run/udev:/run/udev', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:ro', '/var/lib/iscsi:/var/lib/iscsi', '/etc/multipath:/etc/multipath:z', '/etc/multipath.conf:/etc/multipath.conf:ro', '/var/lib/openstack/healthchecks/multipathd:/openstack:ro,z']}, container_name=multipathd, io.buildah.version=1.41.3, managed_by=edpm_ansible, org.label-schema.build-date=20251202, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, tcib_build_tag=c3923531bcda0b0811b2d5053f189beb, config_id=multipathd, maintainer=OpenStack Kubernetes Operator team, org.label-schema.schema-version=1.0, tcib_managed=true)

● user-1000.slice - User Slice of UID 1000
     Loaded: loaded
    Drop-In: /usr/lib/systemd/system/user-.slice.d
             └─10-defaults.conf
     Active: active since Sat 2025-12-13 06:42:43 UTC; 54min ago
      Until: Sat 2025-12-13 06:42:43 UTC; 54min ago
       Docs: man:user@.service(5)
         IO: 574.0M read, 8.4G written
      Tasks: 37 (limit: 20034)
     Memory: 3.5G (peak: 4.1G)
        CPU: 16min 10.726s
     CGroup: /user.slice/user-1000.slice
             ├─session-1.scope
             │ └─4579 /usr/bin/python3
             ├─session-54.scope
             │ ├─246077 "sshd-session: zuul [priv]"
             │ ├─246080 "sshd-session: zuul@notty"
             │ ├─246081 sudo bash -c "rm -rf /var/tmp/sos-osp && mkdir /var/tmp/sos-osp && sos report --batch --all-logs --tmp-dir=/var/tmp/sos-osp  -p container,openstack_edpm,system,storage,virt"
             │ ├─246105 /usr/bin/python3 -s /sbin/sos report --batch --all-logs --tmp-dir=/var/tmp/sos-osp -p container,openstack_edpm,system,storage,virt
             │ ├─252668 timeout 15s turbostat --debug sleep 10
             │ ├─253143 timeout 300s systemctl status --all
             │ ├─253144 systemctl status --all
             │ ├─253187 timeout 300s ceph osd pool autoscale-status --format json-pretty
             │ ├─253188 /usr/bin/python3 -s /usr/bin/ceph osd pool autoscale-status --format json-pretty
             │ ├─253210 timeout 300s tuned-adm recommend
             │ └─253211 /usr/bin/python3 -Es /usr/sbin/tuned-adm recommend
             └─user@1000.service
               ├─app.slice
               │ └─dbus-broker.service
               │   ├─14044 /usr/bin/dbus-broker-launch --scope user
               │   └─14045 dbus-broker --log 4 --controller 9 --machine-id 64f1d6692049d8be5e8b216cc203502c --max-bytes 100000000000000 --max-fds 25000000000000 --max-matches 5000000000
               ├─init.scope
               │ ├─4373 /usr/lib/systemd/systemd --user
               │ └─4375 "(sd-pam)"
               └─user.slice
                 └─podman-pause-1c7804ac.scope
                   └─14027 catatonit -P

Dec 13 07:29:18 compute-0 python3.9[241385]: ansible-containers.podman.podman_container PODMAN-CONTAINER-DEBUG: podman start nova_compute_init
Dec 13 07:29:18 compute-0 sudo[241383]: pam_unix(sudo:session): session closed for user root
Dec 13 07:29:18 compute-0 sshd-session[212095]: Connection closed by 192.168.122.30 port 52180
Dec 13 07:29:18 compute-0 sshd-session[212092]: pam_unix(sshd:session): session closed for user zuul
Dec 13 07:36:09 compute-0 sudo[246081]:     zuul : PWD=/home/zuul ; USER=root ; COMMAND=/bin/bash -c 'rm -rf /var/tmp/sos-osp && mkdir /var/tmp/sos-osp && sos report --batch --all-logs --tmp-dir=/var/tmp/sos-osp  -p container,openstack_edpm,system,storage,virt'
Dec 13 07:36:09 compute-0 sudo[246081]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Dec 13 07:36:18 compute-0 ovs-vsctl[246408]: ovs|00001|db_ctl_base|ERR|no key "dpdk-init" in Open_vSwitch record "." column other_config
Dec 13 07:36:43 compute-0 ovs-appctl[251847]: ovs|00001|daemon_unix|WARN|/var/run/openvswitch/ovs-monitor-ipsec.pid: open: No such file or directory
Dec 13 07:36:43 compute-0 ovs-appctl[251853]: ovs|00001|daemon_unix|WARN|/var/run/openvswitch/ovs-monitor-ipsec.pid: open: No such file or directory
Dec 13 07:36:43 compute-0 ovs-appctl[251862]: ovs|00001|daemon_unix|WARN|/var/run/openvswitch/ovs-monitor-ipsec.pid: open: No such file or directory

● user-42477.slice - User Slice of UID 42477
     Loaded: loaded
    Drop-In: /usr/lib/systemd/system/user-.slice.d
             └─10-defaults.conf
     Active: active since Sat 2025-12-13 07:13:43 UTC; 23min ago
      Until: Sat 2025-12-13 07:13:43 UTC; 23min ago
       Docs: man:user@.service(5)
         IO: 2.7M read, 94.4M written
      Tasks: 26 (limit: 20034)
     Memory: 28.2M (peak: 80.4M)
        CPU: 1min 19.481s
     CGroup: /user.slice/user-42477.slice
             ├─session-20.scope
             │ ├─76206 "sshd-session: ceph-admin [priv]"
             │ └─76226 "sshd-session: ceph-admin"
             ├─session-22.scope
             │ ├─76223 "sshd-session: ceph-admin [priv]"
             │ └─76228 "sshd-session: ceph-admin@notty"
             ├─session-23.scope
             │ ├─76254 "sshd-session: ceph-admin [priv]"
             │ └─76257 "sshd-session: ceph-admin@notty"
             ├─session-24.scope
             │ ├─76283 "sshd-session: ceph-admin [priv]"
             │ └─76286 "sshd-session: ceph-admin@notty"
             ├─session-25.scope
             │ ├─76312 "sshd-session: ceph-admin [priv]"
             │ └─76315 "sshd-session: ceph-admin@notty"
             ├─session-26.scope
             │ ├─76341 "sshd-session: ceph-admin [priv]"
             │ └─76344 "sshd-session: ceph-admin@notty"
             ├─session-27.scope
             │ ├─76370 "sshd-session: ceph-admin [priv]"
             │ └─76373 "sshd-session: ceph-admin@notty"
             ├─session-28.scope
             │ ├─76399 "sshd-session: ceph-admin [priv]"
             │ └─76402 "sshd-session: ceph-admin@notty"
             ├─session-29.scope
             │ ├─76428 "sshd-session: ceph-admin [priv]"
             │ └─76431 "sshd-session: ceph-admin@notty"
             ├─session-30.scope
             │ ├─76457 "sshd-session: ceph-admin [priv]"
             │ └─76460 "sshd-session: ceph-admin@notty"
             ├─session-31.scope
             │ ├─76484 "sshd-session: ceph-admin [priv]"
             │ └─76487 "sshd-session: ceph-admin@notty"
             ├─session-32.scope
             │ ├─76513 "sshd-session: ceph-admin [priv]"
             │ └─76516 "sshd-session: ceph-admin@notty"
             └─user@42477.service
               └─init.scope
                 ├─76210 /usr/lib/systemd/systemd --user
                 └─76212 "(sd-pam)"

Dec 13 07:36:37 compute-0 podman[249934]: 2025-12-13 07:36:37.1878562 +0000 UTC m=+0.112081716 container init 404541e0807a459ae6ec7f22f774933b836db8dd393ed1f5af5f1eac4d23db94 (image=quay.io/ceph/ceph@sha256:1228c3d05e45fbc068a8c33614e4409b6dac688bcc77369b06009b5830fa8d86, name=blissful_aryabhata, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, org.label-schema.license=GPLv2, CEPH_REF=tentacle, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.41.3, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20251030, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=69f84cc2651aa259a15bc192ddaabd3baba07489)
Dec 13 07:36:37 compute-0 podman[249934]: 2025-12-13 07:36:37.09598201 +0000 UTC m=+0.020207506 image pull 524f3da276461682bec27427fb8a63b5139c40ad4185939aede197474a6817b3 quay.io/ceph/ceph@sha256:1228c3d05e45fbc068a8c33614e4409b6dac688bcc77369b06009b5830fa8d86
Dec 13 07:36:37 compute-0 podman[249934]: 2025-12-13 07:36:37.200129848 +0000 UTC m=+0.124355344 container start 404541e0807a459ae6ec7f22f774933b836db8dd393ed1f5af5f1eac4d23db94 (image=quay.io/ceph/ceph@sha256:1228c3d05e45fbc068a8c33614e4409b6dac688bcc77369b06009b5830fa8d86, name=blissful_aryabhata, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, CEPH_SHA1=69f84cc2651aa259a15bc192ddaabd3baba07489, CEPH_REF=tentacle, org.label-schema.schema-version=1.0, io.buildah.version=1.41.3, org.label-schema.build-date=20251030, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Dec 13 07:36:37 compute-0 podman[249934]: 2025-12-13 07:36:37.201229926 +0000 UTC m=+0.125455422 container attach 404541e0807a459ae6ec7f22f774933b836db8dd393ed1f5af5f1eac4d23db94 (image=quay.io/ceph/ceph@sha256:1228c3d05e45fbc068a8c33614e4409b6dac688bcc77369b06009b5830fa8d86, name=blissful_aryabhata, ceph=True, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=69f84cc2651aa259a15bc192ddaabd3baba07489, org.label-schema.schema-version=1.0, io.buildah.version=1.41.3, org.label-schema.build-date=20251030, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=tentacle)
Dec 13 07:36:37 compute-0 podman[249934]: 2025-12-13 07:36:37.806201108 +0000 UTC m=+0.730426605 container died 404541e0807a459ae6ec7f22f774933b836db8dd393ed1f5af5f1eac4d23db94 (image=quay.io/ceph/ceph@sha256:1228c3d05e45fbc068a8c33614e4409b6dac688bcc77369b06009b5830fa8d86, name=blissful_aryabhata, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.41.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, org.label-schema.build-date=20251030, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=69f84cc2651aa259a15bc192ddaabd3baba07489, OSD_FLAVOR=default, org.label-schema.license=GPLv2, CEPH_REF=tentacle, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Dec 13 07:36:37 compute-0 podman[249934]: 2025-12-13 07:36:37.857963317 +0000 UTC m=+0.782188813 container remove 404541e0807a459ae6ec7f22f774933b836db8dd393ed1f5af5f1eac4d23db94 (image=quay.io/ceph/ceph@sha256:1228c3d05e45fbc068a8c33614e4409b6dac688bcc77369b06009b5830fa8d86, name=blissful_aryabhata, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, org.label-schema.license=GPLv2, CEPH_REF=tentacle, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.41.3, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=69f84cc2651aa259a15bc192ddaabd3baba07489, org.label-schema.build-date=20251030)
Dec 13 07:36:37 compute-0 sudo[249775]: pam_unix(sudo:session): session closed for user root
Dec 13 07:36:38 compute-0 sudo[250109]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/ls /etc/sysctl.d
Dec 13 07:36:38 compute-0 sudo[250109]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Dec 13 07:36:38 compute-0 sudo[250109]: pam_unix(sudo:session): session closed for user root

● user.slice - User and Session Slice
     Loaded: loaded (/usr/lib/systemd/system/user.slice; static)
     Active: active since Sat 2025-12-13 06:42:10 UTC; 54min ago
      Until: Sat 2025-12-13 06:42:10 UTC; 54min ago
       Docs: man:systemd.special(7)
         IO: 576.8M read, 8.5G written
      Tasks: 63
     Memory: 3.5G (peak: 4.1G)
        CPU: 17min 30.615s
     CGroup: /user.slice
             ├─user-1000.slice
             │ ├─session-1.scope
             │ │ └─4579 /usr/bin/python3
             │ ├─session-54.scope
             │ │ ├─246077 "sshd-session: zuul [priv]"
             │ │ ├─246080 "sshd-session: zuul@notty"
             │ │ ├─246081 sudo bash -c "rm -rf /var/tmp/sos-osp && mkdir /var/tmp/sos-osp && sos report --batch --all-logs --tmp-dir=/var/tmp/sos-osp  -p container,openstack_edpm,system,storage,virt"
             │ │ ├─246105 /usr/bin/python3 -s /sbin/sos report --batch --all-logs --tmp-dir=/var/tmp/sos-osp -p container,openstack_edpm,system,storage,virt
             │ │ ├─252668 timeout 15s turbostat --debug sleep 10
             │ │ ├─253143 timeout 300s systemctl status --all
             │ │ ├─253144 systemctl status --all
             │ │ ├─253187 timeout 300s ceph osd pool autoscale-status --format json-pretty
             │ │ ├─253188 /usr/bin/python3 -s /usr/bin/ceph osd pool autoscale-status --format json-pretty
             │ │ ├─253210 timeout 300s tuned-adm recommend
             │ │ └─253211 /usr/bin/python3 -Es /usr/sbin/tuned-adm recommend
             │ └─user@1000.service
             │   ├─app.slice
             │   │ └─dbus-broker.service
             │   │   ├─14044 /usr/bin/dbus-broker-launch --scope user
             │   │   └─14045 dbus-broker --log 4 --controller 9 --machine-id 64f1d6692049d8be5e8b216cc203502c --max-bytes 100000000000000 --max-fds 25000000000000 --max-matches 5000000000
             │   ├─init.scope
             │   │ ├─4373 /usr/lib/systemd/systemd --user
             │   │ └─4375 "(sd-pam)"
             │   └─user.slice
             │     └─podman-pause-1c7804ac.scope
             │       └─14027 catatonit -P
             └─user-42477.slice
               ├─session-20.scope
               │ ├─76206 "sshd-session: ceph-admin [priv]"
               │ └─76226 "sshd-session: ceph-admin"
               ├─session-22.scope
               │ ├─76223 "sshd-session: ceph-admin [priv]"
               │ └─76228 "sshd-session: ceph-admin@notty"
               ├─session-23.scope
               │ ├─76254 "sshd-session: ceph-admin [priv]"
               │ └─76257 "sshd-session: ceph-admin@notty"
               ├─session-24.scope
               │ ├─76283 "sshd-session: ceph-admin [priv]"
               │ └─76286 "sshd-session: ceph-admin@notty"
               ├─session-25.scope
               │ ├─76312 "sshd-session: ceph-admin [priv]"
               │ └─76315 "sshd-session: ceph-admin@notty"
               ├─session-26.scope
               │ ├─76341 "sshd-session: ceph-admin [priv]"
               │ └─76344 "sshd-session: ceph-admin@notty"
               ├─session-27.scope
               │ ├─76370 "sshd-session: ceph-admin [priv]"
               │ └─76373 "sshd-session: ceph-admin@notty"
               ├─session-28.scope
               │ ├─76399 "sshd-session: ceph-admin [priv]"
               │ └─76402 "sshd-session: ceph-admin@notty"
               ├─session-29.scope
               │ ├─76428 "sshd-session: ceph-admin [priv]"
               │ └─76431 "sshd-session: ceph-admin@notty"
               ├─session-30.scope
               │ ├─76457 "sshd-session: ceph-admin [priv]"
               │ └─76460 "sshd-session: ceph-admin@notty"
               ├─session-31.scope
               │ ├─76484 "sshd-session: ceph-admin [priv]"
               │ └─76487 "sshd-session: ceph-admin@notty"
               ├─session-32.scope
               │ ├─76513 "sshd-session: ceph-admin [priv]"
               │ └─76516 "sshd-session: ceph-admin@notty"
               └─user@42477.service
                 └─init.scope
                   ├─76210 /usr/lib/systemd/systemd --user
                   └─76212 "(sd-pam)"

● dbus.socket - D-Bus System Message Bus Socket
     Loaded: loaded (/usr/lib/systemd/system/dbus.socket; enabled; preset: enabled)
     Active: active (running) since Sat 2025-12-13 06:42:10 UTC; 54min ago
      Until: Sat 2025-12-13 06:42:10 UTC; 54min ago
   Triggers: ● dbus-broker.service
     Listen: /run/dbus/system_bus_socket (Stream)
     CGroup: /system.slice/dbus.socket

Dec 13 06:42:10 localhost systemd[1]: Listening on D-Bus System Message Bus Socket.

● dm-event.socket - Device-mapper event daemon FIFOs
     Loaded: loaded (/usr/lib/systemd/system/dm-event.socket; enabled; preset: enabled)
     Active: active (listening) since Sat 2025-12-13 07:07:56 UTC; 28min ago
      Until: Sat 2025-12-13 07:07:56 UTC; 28min ago
   Triggers: ● dm-event.service
       Docs: man:dmeventd(8)
     Listen: /run/dmeventd-server (FIFO)
             /run/dmeventd-client (FIFO)
     CGroup: /system.slice/dm-event.socket

Dec 13 07:07:56 compute-0 systemd[1]: Listening on Device-mapper event daemon FIFOs.

● iscsid.socket - Open-iSCSI iscsid Socket
     Loaded: loaded (/usr/lib/systemd/system/iscsid.socket; enabled; preset: enabled)
     Active: active (running) since Sat 2025-12-13 07:27:27 UTC; 9min ago
      Until: Sat 2025-12-13 07:27:27 UTC; 9min ago
   Triggers: ● iscsid.service
       Docs: man:iscsid(8)
             man:iscsiadm(8)
     Listen: @ISCSIADM_ABSTRACT_NAMESPACE (Stream)
     CGroup: /system.slice/iscsid.socket

Dec 13 07:27:27 compute-0 systemd[1]: Listening on Open-iSCSI iscsid Socket.

○ iscsiuio.socket - Open-iSCSI iscsiuio Socket
     Loaded: loaded (/usr/lib/systemd/system/iscsiuio.socket; enabled; preset: enabled)
     Active: inactive (dead)
   Triggers: ● iscsiuio.service
       Docs: man:iscsiuio(8)
     Listen: @ISCSID_UIP_ABSTRACT_NAMESPACE (Stream)

○ libvirtd-admin.socket - libvirt legacy monolithic daemon admin socket
     Loaded: loaded (/usr/lib/systemd/system/libvirtd-admin.socket; disabled; preset: disabled)
     Active: inactive (dead)
   Triggers: ● libvirtd.service
     Listen: /run/libvirt/libvirt-admin-sock (Stream)

○ libvirtd-ro.socket - libvirt legacy monolithic daemon read-only socket
     Loaded: loaded (/usr/lib/systemd/system/libvirtd-ro.socket; disabled; preset: disabled)
     Active: inactive (dead)
   Triggers: ● libvirtd.service
     Listen: /run/libvirt/libvirt-sock-ro (Stream)

○ libvirtd-tls.socket
     Loaded: masked (Reason: Unit libvirtd-tls.socket is masked.)
     Active: inactive (dead)

○ libvirtd.socket - libvirt legacy monolithic daemon socket
     Loaded: loaded (/usr/lib/systemd/system/libvirtd.socket; disabled; preset: disabled)
     Active: inactive (dead)
   Triggers: ● libvirtd.service
     Listen: /run/libvirt/libvirt-sock (Stream)

● lvm2-lvmpolld.socket - LVM2 poll daemon socket
     Loaded: loaded (/usr/lib/systemd/system/lvm2-lvmpolld.socket; enabled; preset: enabled)
     Active: active (listening) since Sat 2025-12-13 07:07:56 UTC; 28min ago
      Until: Sat 2025-12-13 07:07:56 UTC; 28min ago
   Triggers: ● lvm2-lvmpolld.service
       Docs: man:lvmpolld(8)
     Listen: /run/lvm/lvmpolld.socket (Stream)
     CGroup: /system.slice/lvm2-lvmpolld.socket

Dec 13 07:07:56 compute-0 systemd[1]: Listening on LVM2 poll daemon socket.

● rpcbind.socket - RPCbind Server Activation Socket
     Loaded: loaded (/usr/lib/systemd/system/rpcbind.socket; enabled; preset: enabled)
     Active: active (running) since Sat 2025-12-13 06:42:10 UTC; 54min ago
      Until: Sat 2025-12-13 06:42:10 UTC; 54min ago
   Triggers: ● rpcbind.service
     Listen: /run/rpcbind.sock (Stream)
             0.0.0.0:111 (Stream)
             0.0.0.0:111 (Datagram)
             [::]:111 (Stream)
             [::]:111 (Datagram)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48568)
     Memory: 16.0K (peak: 288.0K)
        CPU: 3ms
     CGroup: /system.slice/rpcbind.socket

● sssd-kcm.socket - SSSD Kerberos Cache Manager responder socket
     Loaded: loaded (/usr/lib/systemd/system/sssd-kcm.socket; enabled; preset: enabled)
     Active: active (listening) since Sat 2025-12-13 06:42:10 UTC; 54min ago
      Until: Sat 2025-12-13 06:42:10 UTC; 54min ago
   Triggers: ● sssd-kcm.service
       Docs: man:sssd-kcm(8)
     Listen: /run/.heim_org.h5l.kcm-socket (Stream)
     CGroup: /system.slice/sssd-kcm.socket

Dec 13 06:42:10 localhost systemd[1]: Listening on SSSD Kerberos Cache Manager responder socket.

○ syslog.socket - Syslog Socket
     Loaded: loaded (/usr/lib/systemd/system/syslog.socket; static)
     Active: inactive (dead)
   Triggers: ● syslog.service
       Docs: man:systemd.special(7)
             https://www.freedesktop.org/wiki/Software/systemd/syslog
     Listen: /run/systemd/journal/syslog (Datagram)

● systemd-coredump.socket - Process Core Dump Socket
     Loaded: loaded (/usr/lib/systemd/system/systemd-coredump.socket; static)
     Active: active (listening) since Sat 2025-12-13 06:42:10 UTC; 54min ago
      Until: Sat 2025-12-13 06:42:10 UTC; 54min ago
       Docs: man:systemd-coredump(8)
     Listen: /run/systemd/coredump (SequentialPacket)
   Accepted: 0; Connected: 0;
     CGroup: /system.slice/systemd-coredump.socket

● systemd-initctl.socket - initctl Compatibility Named Pipe
     Loaded: loaded (/usr/lib/systemd/system/systemd-initctl.socket; static)
     Active: active (listening) since Sat 2025-12-13 06:42:10 UTC; 54min ago
      Until: Sat 2025-12-13 06:42:10 UTC; 54min ago
   Triggers: ● systemd-initctl.service
       Docs: man:systemd-initctl.socket(8)
     Listen: /run/initctl (FIFO)
     CGroup: /system.slice/systemd-initctl.socket

● systemd-journald-dev-log.socket - Journal Socket (/dev/log)
     Loaded: loaded (/usr/lib/systemd/system/systemd-journald-dev-log.socket; static)
     Active: active (running) since Sat 2025-12-13 06:42:07 UTC; 54min ago
      Until: Sat 2025-12-13 06:42:07 UTC; 54min ago
   Triggers: ● systemd-journald.service
       Docs: man:systemd-journald.service(8)
             man:journald.conf(5)
     Listen: /run/systemd/journal/dev-log (Datagram)
     CGroup: /system.slice/systemd-journald-dev-log.socket

Notice: journal has been rotated since unit was started, output may be incomplete.

● systemd-journald.socket - Journal Socket
     Loaded: loaded (/usr/lib/systemd/system/systemd-journald.socket; static)
     Active: active (running) since Sat 2025-12-13 06:42:07 UTC; 54min ago
      Until: Sat 2025-12-13 06:42:07 UTC; 54min ago
   Triggers: ● systemd-journald.service
       Docs: man:systemd-journald.service(8)
             man:journald.conf(5)
     Listen: /run/systemd/journal/socket (Datagram)
             /run/systemd/journal/stdout (Stream)
     CGroup: /system.slice/systemd-journald.socket

Notice: journal has been rotated since unit was started, output may be incomplete.

● systemd-rfkill.socket - Load/Save RF Kill Switch Status /dev/rfkill Watch
     Loaded: loaded (/usr/lib/systemd/system/systemd-rfkill.socket; static)
     Active: active (listening) since Sat 2025-12-13 06:42:11 UTC; 54min ago
      Until: Sat 2025-12-13 06:42:11 UTC; 54min ago
   Triggers: ● systemd-rfkill.service
       Docs: man:systemd-rfkill.socket(8)
     Listen: /dev/rfkill (Special)
     CGroup: /system.slice/systemd-rfkill.socket

Dec 13 06:42:11 np0005558317 systemd[1]: Listening on Load/Save RF Kill Switch Status /dev/rfkill Watch.

● systemd-udevd-control.socket - udev Control Socket
     Loaded: loaded (/usr/lib/systemd/system/systemd-udevd-control.socket; static)
     Active: active (running) since Sat 2025-12-13 06:42:10 UTC; 54min ago
      Until: Sat 2025-12-13 06:42:10 UTC; 54min ago
   Triggers: ● systemd-udevd.service
       Docs: man:systemd-udevd-control.socket(8)
             man:udev(7)
     Listen: /run/udev/control (SequentialPacket)
     CGroup: /system.slice/systemd-udevd-control.socket

● systemd-udevd-kernel.socket - udev Kernel Socket
     Loaded: loaded (/usr/lib/systemd/system/systemd-udevd-kernel.socket; static)
     Active: active (running) since Sat 2025-12-13 06:42:10 UTC; 54min ago
      Until: Sat 2025-12-13 06:42:10 UTC; 54min ago
   Triggers: ● systemd-udevd.service
       Docs: man:systemd-udevd-kernel.socket(8)
             man:udev(7)
     Listen: kobject-uevent 1 (Netlink)
     CGroup: /system.slice/systemd-udevd-kernel.socket

○ virtinterfaced-admin.socket - libvirt interface daemon admin socket
     Loaded: loaded (/usr/lib/systemd/system/virtinterfaced-admin.socket; enabled; preset: enabled)
     Active: inactive (dead)
   Triggers: ● virtinterfaced.service
     Listen: /run/libvirt/virtinterfaced-admin-sock (Stream)

○ virtinterfaced-ro.socket - libvirt interface daemon read-only socket
     Loaded: loaded (/usr/lib/systemd/system/virtinterfaced-ro.socket; enabled; preset: enabled)
     Active: inactive (dead)
   Triggers: ● virtinterfaced.service
     Listen: /run/libvirt/virtinterfaced-sock-ro (Stream)

○ virtinterfaced.socket - libvirt interface daemon socket
     Loaded: loaded (/usr/lib/systemd/system/virtinterfaced.socket; enabled; preset: enabled)
     Active: inactive (dead)
   Triggers: ● virtinterfaced.service
     Listen: /run/libvirt/virtinterfaced-sock (Stream)

○ virtlockd-admin.socket - libvirt locking daemon admin socket
     Loaded: loaded (/usr/lib/systemd/system/virtlockd-admin.socket; enabled; preset: enabled)
     Active: inactive (dead)
   Triggers: ● virtlockd.service
     Listen: /run/libvirt/virtlockd-admin-sock (Stream)

● virtlockd.socket - libvirt locking daemon socket
     Loaded: loaded (/usr/lib/systemd/system/virtlockd.socket; enabled; preset: enabled)
     Active: active (listening) since Sat 2025-12-13 07:26:42 UTC; 10min ago
      Until: Sat 2025-12-13 07:26:42 UTC; 10min ago
   Triggers: ● virtlockd.service
     Listen: /run/libvirt/virtlockd-sock (Stream)
     CGroup: /system.slice/virtlockd.socket

Dec 13 07:26:42 compute-0 systemd[1]: Listening on libvirt locking daemon socket.

● virtlogd-admin.socket - libvirt logging daemon admin socket
     Loaded: loaded (/usr/lib/systemd/system/virtlogd-admin.socket; enabled; preset: enabled)
    Drop-In: /etc/systemd/system/virtlogd-admin.socket.d
             └─override.conf
     Active: active (running) since Sat 2025-12-13 07:26:39 UTC; 10min ago
      Until: Sat 2025-12-13 07:26:39 UTC; 10min ago
   Triggers: ● virtlogd.service
     Listen: /run/libvirt/virtlogd-admin-sock (Stream)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48568)
     Memory: 0B (peak: 512.0K)
        CPU: 1ms
     CGroup: /system.slice/virtlogd-admin.socket

Dec 13 07:26:39 compute-0 systemd[1]: Starting libvirt logging daemon admin socket...
Dec 13 07:26:39 compute-0 systemd[1]: Listening on libvirt logging daemon admin socket.

● virtlogd.socket - libvirt logging daemon socket
     Loaded: loaded (/usr/lib/systemd/system/virtlogd.socket; enabled; preset: enabled)
    Drop-In: /etc/systemd/system/virtlogd.socket.d
             └─override.conf
     Active: active (running) since Sat 2025-12-13 07:26:39 UTC; 10min ago
      Until: Sat 2025-12-13 07:26:39 UTC; 10min ago
   Triggers: ● virtlogd.service
     Listen: /run/libvirt/virtlogd-sock (Stream)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48568)
     Memory: 0B (peak: 512.0K)
        CPU: 2ms
     CGroup: /system.slice/virtlogd.socket

Dec 13 07:26:39 compute-0 systemd[1]: Starting libvirt logging daemon socket...
Dec 13 07:26:39 compute-0 systemd[1]: Listening on libvirt logging daemon socket.

○ virtnetworkd-admin.socket - libvirt network daemon admin socket
     Loaded: loaded (/usr/lib/systemd/system/virtnetworkd-admin.socket; enabled; preset: enabled)
     Active: inactive (dead)
   Triggers: ● virtnetworkd.service
     Listen: /run/libvirt/virtnetworkd-admin-sock (Stream)

○ virtnetworkd-ro.socket - libvirt network daemon read-only socket
     Loaded: loaded (/usr/lib/systemd/system/virtnetworkd-ro.socket; enabled; preset: enabled)
     Active: inactive (dead)
   Triggers: ● virtnetworkd.service
     Listen: /run/libvirt/virtnetworkd-sock-ro (Stream)

○ virtnetworkd.socket - libvirt network daemon socket
     Loaded: loaded (/usr/lib/systemd/system/virtnetworkd.socket; enabled; preset: enabled)
     Active: inactive (dead)
   Triggers: ● virtnetworkd.service
     Listen: /run/libvirt/virtnetworkd-sock (Stream)

● virtnodedevd-admin.socket - libvirt nodedev daemon admin socket
     Loaded: loaded (/usr/lib/systemd/system/virtnodedevd-admin.socket; enabled; preset: enabled)
    Drop-In: /etc/systemd/system/virtnodedevd-admin.socket.d
             └─override.conf
     Active: active (running) since Sat 2025-12-13 07:26:40 UTC; 10min ago
      Until: Sat 2025-12-13 07:26:40 UTC; 10min ago
   Triggers: ● virtnodedevd.service
     Listen: /run/libvirt/virtnodedevd-admin-sock (Stream)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48568)
     Memory: 4.0K (peak: 512.0K)
        CPU: 2ms
     CGroup: /system.slice/virtnodedevd-admin.socket

Dec 13 07:26:40 compute-0 systemd[1]: Starting libvirt nodedev daemon admin socket...
Dec 13 07:26:40 compute-0 systemd[1]: Listening on libvirt nodedev daemon admin socket.

● virtnodedevd-ro.socket - libvirt nodedev daemon read-only socket
     Loaded: loaded (/usr/lib/systemd/system/virtnodedevd-ro.socket; enabled; preset: enabled)
    Drop-In: /etc/systemd/system/virtnodedevd-ro.socket.d
             └─override.conf
     Active: active (running) since Sat 2025-12-13 07:26:40 UTC; 10min ago
      Until: Sat 2025-12-13 07:26:40 UTC; 10min ago
   Triggers: ● virtnodedevd.service
     Listen: /run/libvirt/virtnodedevd-sock-ro (Stream)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48568)
     Memory: 4.0K (peak: 580.0K)
        CPU: 2ms
     CGroup: /system.slice/virtnodedevd-ro.socket

Dec 13 07:26:40 compute-0 systemd[1]: Starting libvirt nodedev daemon read-only socket...
Dec 13 07:26:40 compute-0 systemd[1]: Listening on libvirt nodedev daemon read-only socket.

● virtnodedevd.socket - libvirt nodedev daemon socket
     Loaded: loaded (/usr/lib/systemd/system/virtnodedevd.socket; enabled; preset: enabled)
    Drop-In: /etc/systemd/system/virtnodedevd.socket.d
             └─override.conf
     Active: active (running) since Sat 2025-12-13 07:26:40 UTC; 10min ago
      Until: Sat 2025-12-13 07:26:40 UTC; 10min ago
   Triggers: ● virtnodedevd.service
     Listen: /run/libvirt/virtnodedevd-sock (Stream)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48568)
     Memory: 0B (peak: 512.0K)
        CPU: 2ms
     CGroup: /system.slice/virtnodedevd.socket

Dec 13 07:26:40 compute-0 systemd[1]: Starting libvirt nodedev daemon socket...
Dec 13 07:26:40 compute-0 systemd[1]: Listening on libvirt nodedev daemon socket.

○ virtnwfilterd-admin.socket - libvirt nwfilter daemon admin socket
     Loaded: loaded (/usr/lib/systemd/system/virtnwfilterd-admin.socket; enabled; preset: enabled)
     Active: inactive (dead)
   Triggers: ● virtnwfilterd.service
     Listen: /run/libvirt/virtnwfilterd-admin-sock (Stream)

○ virtnwfilterd-ro.socket - libvirt nwfilter daemon read-only socket
     Loaded: loaded (/usr/lib/systemd/system/virtnwfilterd-ro.socket; enabled; preset: enabled)
     Active: inactive (dead)
   Triggers: ● virtnwfilterd.service
     Listen: /run/libvirt/virtnwfilterd-sock-ro (Stream)

○ virtnwfilterd.socket - libvirt nwfilter daemon socket
     Loaded: loaded (/usr/lib/systemd/system/virtnwfilterd.socket; enabled; preset: enabled)
     Active: inactive (dead)
   Triggers: ● virtnwfilterd.service
     Listen: /run/libvirt/virtnwfilterd-sock (Stream)

● virtproxyd-admin.socket - libvirt proxy daemon admin socket
     Loaded: loaded (/usr/lib/systemd/system/virtproxyd-admin.socket; enabled; preset: enabled)
    Drop-In: /etc/systemd/system/virtproxyd-admin.socket.d
             └─override.conf
     Active: active (listening) since Sat 2025-12-13 07:26:41 UTC; 10min ago
      Until: Sat 2025-12-13 07:26:41 UTC; 10min ago
   Triggers: ● virtproxyd.service
     Listen: /run/libvirt/libvirt-admin-sock (Stream)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48568)
     Memory: 4.0K (peak: 500.0K)
        CPU: 2ms
     CGroup: /system.slice/virtproxyd-admin.socket

Dec 13 07:26:41 compute-0 systemd[1]: Starting libvirt proxy daemon admin socket...
Dec 13 07:26:41 compute-0 systemd[1]: Listening on libvirt proxy daemon admin socket.

● virtproxyd-ro.socket - libvirt proxy daemon read-only socket
     Loaded: loaded (/usr/lib/systemd/system/virtproxyd-ro.socket; enabled; preset: enabled)
    Drop-In: /etc/systemd/system/virtproxyd-ro.socket.d
             └─override.conf
     Active: active (listening) since Sat 2025-12-13 07:26:41 UTC; 10min ago
      Until: Sat 2025-12-13 07:26:41 UTC; 10min ago
   Triggers: ● virtproxyd.service
     Listen: /run/libvirt/libvirt-sock-ro (Stream)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48568)
     Memory: 0B (peak: 436.0K)
        CPU: 1ms
     CGroup: /system.slice/virtproxyd-ro.socket

Dec 13 07:26:41 compute-0 systemd[1]: Starting libvirt proxy daemon read-only socket...
Dec 13 07:26:41 compute-0 systemd[1]: Listening on libvirt proxy daemon read-only socket.

● virtproxyd-tls.socket - libvirt proxy daemon TLS IP socket
     Loaded: loaded (/usr/lib/systemd/system/virtproxyd-tls.socket; enabled; preset: disabled)
     Active: active (listening) since Sat 2025-12-13 07:25:56 UTC; 10min ago
      Until: Sat 2025-12-13 07:25:56 UTC; 10min ago
   Triggers: ● virtproxyd.service
     Listen: [::]:16514 (Stream)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48568)
     Memory: 8.0K (peak: 256.0K)
        CPU: 875us
     CGroup: /system.slice/virtproxyd-tls.socket

Dec 13 07:25:56 compute-0 systemd[1]: Listening on libvirt proxy daemon TLS IP socket.

● virtproxyd.socket - libvirt proxy daemon socket
     Loaded: loaded (/usr/lib/systemd/system/virtproxyd.socket; enabled; preset: enabled)
    Drop-In: /etc/systemd/system/virtproxyd.socket.d
             └─override.conf
     Active: active (listening) since Sat 2025-12-13 07:25:56 UTC; 10min ago
      Until: Sat 2025-12-13 07:25:56 UTC; 10min ago
   Triggers: ● virtproxyd.service
     Listen: /run/libvirt/libvirt-sock (Stream)
     CGroup: /system.slice/virtproxyd.socket

Dec 13 07:25:56 compute-0 systemd[1]: Listening on libvirt proxy daemon socket.

● virtqemud-admin.socket - libvirt QEMU daemon admin socket
     Loaded: loaded (/usr/lib/systemd/system/virtqemud-admin.socket; enabled; preset: enabled)
    Drop-In: /etc/systemd/system/virtqemud-admin.socket.d
             └─override.conf
     Active: active (running) since Sat 2025-12-13 07:26:42 UTC; 10min ago
      Until: Sat 2025-12-13 07:26:42 UTC; 10min ago
   Triggers: ● virtqemud.service
     Listen: /run/libvirt/virtqemud-admin-sock (Stream)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48568)
     Memory: 0B (peak: 580.0K)
        CPU: 2ms
     CGroup: /system.slice/virtqemud-admin.socket

Dec 13 07:26:42 compute-0 systemd[1]: Starting libvirt QEMU daemon admin socket...
Dec 13 07:26:42 compute-0 systemd[1]: Listening on libvirt QEMU daemon admin socket.

● virtqemud-ro.socket - libvirt QEMU daemon read-only socket
     Loaded: loaded (/usr/lib/systemd/system/virtqemud-ro.socket; enabled; preset: enabled)
    Drop-In: /etc/systemd/system/virtqemud-ro.socket.d
             └─override.conf
     Active: active (running) since Sat 2025-12-13 07:26:42 UTC; 10min ago
      Until: Sat 2025-12-13 07:26:42 UTC; 10min ago
   Triggers: ● virtqemud.service
     Listen: /run/libvirt/virtqemud-sock-ro (Stream)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48568)
     Memory: 4.0K (peak: 516.0K)
        CPU: 2ms
     CGroup: /system.slice/virtqemud-ro.socket

Dec 13 07:26:42 compute-0 systemd[1]: Starting libvirt QEMU daemon read-only socket...
Dec 13 07:26:42 compute-0 systemd[1]: Listening on libvirt QEMU daemon read-only socket.

● virtqemud.socket - libvirt QEMU daemon socket
     Loaded: loaded (/usr/lib/systemd/system/virtqemud.socket; enabled; preset: enabled)
    Drop-In: /etc/systemd/system/virtqemud.socket.d
             └─override.conf
     Active: active (running) since Sat 2025-12-13 07:26:42 UTC; 10min ago
      Until: Sat 2025-12-13 07:26:42 UTC; 10min ago
   Triggers: ● virtqemud.service
     Listen: /run/libvirt/virtqemud-sock (Stream)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48568)
     Memory: 0B (peak: 572.0K)
        CPU: 2ms
     CGroup: /system.slice/virtqemud.socket

Dec 13 07:26:42 compute-0 systemd[1]: Starting libvirt QEMU daemon socket...
Dec 13 07:26:42 compute-0 systemd[1]: Listening on libvirt QEMU daemon socket.

● virtsecretd-admin.socket - libvirt secret daemon admin socket
     Loaded: loaded (/usr/lib/systemd/system/virtsecretd-admin.socket; enabled; preset: enabled)
    Drop-In: /etc/systemd/system/virtsecretd-admin.socket.d
             └─override.conf
     Active: active (listening) since Sat 2025-12-13 07:26:43 UTC; 10min ago
      Until: Sat 2025-12-13 07:26:43 UTC; 10min ago
   Triggers: ● virtsecretd.service
     Listen: /run/libvirt/virtsecretd-admin-sock (Stream)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48568)
     Memory: 0B (peak: 560.0K)
        CPU: 2ms
     CGroup: /system.slice/virtsecretd-admin.socket

Dec 13 07:26:43 compute-0 systemd[1]: Starting libvirt secret daemon admin socket...
Dec 13 07:26:43 compute-0 systemd[1]: Listening on libvirt secret daemon admin socket.

● virtsecretd-ro.socket - libvirt secret daemon read-only socket
     Loaded: loaded (/usr/lib/systemd/system/virtsecretd-ro.socket; enabled; preset: enabled)
    Drop-In: /etc/systemd/system/virtsecretd-ro.socket.d
             └─override.conf
     Active: active (listening) since Sat 2025-12-13 07:26:43 UTC; 10min ago
      Until: Sat 2025-12-13 07:26:43 UTC; 10min ago
   Triggers: ● virtsecretd.service
     Listen: /run/libvirt/virtsecretd-sock-ro (Stream)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48568)
     Memory: 0B (peak: 512.0K)
        CPU: 2ms
     CGroup: /system.slice/virtsecretd-ro.socket

Dec 13 07:26:43 compute-0 systemd[1]: Starting libvirt secret daemon read-only socket...
Dec 13 07:26:43 compute-0 systemd[1]: Listening on libvirt secret daemon read-only socket.

● virtsecretd.socket - libvirt secret daemon socket
     Loaded: loaded (/usr/lib/systemd/system/virtsecretd.socket; enabled; preset: enabled)
    Drop-In: /etc/systemd/system/virtsecretd.socket.d
             └─override.conf
     Active: active (listening) since Sat 2025-12-13 07:26:43 UTC; 10min ago
      Until: Sat 2025-12-13 07:26:43 UTC; 10min ago
   Triggers: ● virtsecretd.service
     Listen: /run/libvirt/virtsecretd-sock (Stream)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48568)
     Memory: 0B (peak: 512.0K)
        CPU: 2ms
     CGroup: /system.slice/virtsecretd.socket

Dec 13 07:26:43 compute-0 systemd[1]: Starting libvirt secret daemon socket...
Dec 13 07:26:43 compute-0 systemd[1]: Listening on libvirt secret daemon socket.

○ virtstoraged-admin.socket - libvirt storage daemon admin socket
     Loaded: loaded (/usr/lib/systemd/system/virtstoraged-admin.socket; enabled; preset: enabled)
     Active: inactive (dead)
   Triggers: ● virtstoraged.service
     Listen: /run/libvirt/virtstoraged-admin-sock (Stream)

○ virtstoraged-ro.socket - libvirt storage daemon read-only socket
     Loaded: loaded (/usr/lib/systemd/system/virtstoraged-ro.socket; enabled; preset: enabled)
     Active: inactive (dead)
   Triggers: ● virtstoraged.service
     Listen: /run/libvirt/virtstoraged-sock-ro (Stream)

○ virtstoraged.socket - libvirt storage daemon socket
     Loaded: loaded (/usr/lib/systemd/system/virtstoraged.socket; enabled; preset: enabled)
     Active: inactive (dead)
   Triggers: ● virtstoraged.service
     Listen: /run/libvirt/virtstoraged-sock (Stream)

● swap.swap - /swap
     Loaded: loaded (/etc/fstab; generated)
     Active: active since Sat 2025-12-13 07:09:16 UTC; 27min ago
      Until: Sat 2025-12-13 07:09:16 UTC; 27min ago
       What: /swap
       Docs: man:fstab(5)
             man:systemd-fstab-generator(8)

● basic.target - Basic System
     Loaded: loaded (/usr/lib/systemd/system/basic.target; static)
     Active: active since Sat 2025-12-13 06:42:10 UTC; 54min ago
      Until: Sat 2025-12-13 06:42:10 UTC; 54min ago
       Docs: man:systemd.special(7)

Dec 13 06:42:10 localhost systemd[1]: Reached target Basic System.

○ blockdev@dev-disk-by\x2duuid-cbdedf45\x2ded1d\x2d4952\x2d82a8\x2d33a12c0ba266.target - Block Device Preparation for /dev/disk/by-uuid/cbdedf45-ed1d-4952-82a8-33a12c0ba266
     Loaded: loaded (/usr/lib/systemd/system/blockdev@.target; static)
     Active: inactive (dead)
       Docs: man:systemd.special(7)

○ blockdev@dev-vda1.target - Block Device Preparation for /dev/vda1
     Loaded: loaded (/usr/lib/systemd/system/blockdev@.target; static)
     Active: inactive (dead)
       Docs: man:systemd.special(7)

● ceph-00fdae1b-7fad-5f1b-8734-ba4d9298a6de.target - Ceph cluster 00fdae1b-7fad-5f1b-8734-ba4d9298a6de
     Loaded: loaded (/etc/systemd/system/ceph-00fdae1b-7fad-5f1b-8734-ba4d9298a6de.target; enabled; preset: disabled)
     Active: active since Sat 2025-12-13 07:13:20 UTC; 23min ago
      Until: Sat 2025-12-13 07:13:20 UTC; 23min ago

Dec 13 07:13:20 compute-0 systemd[1]: Reached target Ceph cluster 00fdae1b-7fad-5f1b-8734-ba4d9298a6de.

● ceph.target - All Ceph clusters and services
     Loaded: loaded (/etc/systemd/system/ceph.target; enabled; preset: disabled)
     Active: active since Sat 2025-12-13 07:13:20 UTC; 23min ago
      Until: Sat 2025-12-13 07:13:20 UTC; 23min ago

Dec 13 07:13:20 compute-0 systemd[1]: Reached target All Ceph clusters and services.

● cloud-config.target - Cloud-config availability
     Loaded: loaded (/usr/lib/systemd/system/cloud-config.target; static)
     Active: active since Sat 2025-12-13 06:42:16 UTC; 54min ago
      Until: Sat 2025-12-13 06:42:16 UTC; 54min ago

Dec 13 06:42:16 np0005558317 systemd[1]: Reached target Cloud-config availability.

● cloud-init.target - Cloud-init target
     Loaded: loaded (/usr/lib/systemd/system/cloud-init.target; enabled-runtime; preset: disabled)
     Active: active since Sat 2025-12-13 06:42:17 UTC; 54min ago
      Until: Sat 2025-12-13 06:42:17 UTC; 54min ago

Dec 13 06:42:17 np0005558317 systemd[1]: Reached target Cloud-init target.

○ cryptsetup-pre.target - Local Encrypted Volumes (Pre)
     Loaded: loaded (/usr/lib/systemd/system/cryptsetup-pre.target; static)
     Active: inactive (dead)
       Docs: man:systemd.special(7)

● cryptsetup.target - Local Encrypted Volumes
     Loaded: loaded (/usr/lib/systemd/system/cryptsetup.target; static)
     Active: active since Sat 2025-12-13 06:42:10 UTC; 54min ago
      Until: Sat 2025-12-13 06:42:10 UTC; 54min ago
       Docs: man:systemd.special(7)

● edpm_libvirt.target
     Loaded: loaded (/etc/systemd/system/edpm_libvirt.target; static)
     Active: active since Sat 2025-12-13 07:27:05 UTC; 9min ago
      Until: Sat 2025-12-13 07:27:05 UTC; 9min ago

Dec 13 07:27:05 compute-0 systemd[1]: Reached target edpm_libvirt.target.

○ emergency.target - Emergency Mode
     Loaded: loaded (/usr/lib/systemd/system/emergency.target; static)
     Active: inactive (dead)
       Docs: man:systemd.special(7)

○ first-boot-complete.target - First Boot Complete
     Loaded: loaded (/usr/lib/systemd/system/first-boot-complete.target; static)
     Active: inactive (dead)
  Condition: start condition failed at Sat 2025-12-13 06:42:10 UTC; 54min ago
       Docs: man:systemd.special(7)

Dec 13 06:42:10 localhost systemd[1]: First Boot Complete was skipped because of an unmet condition check (ConditionFirstBoot=yes).

○ getty-pre.target - Preparation for Logins
     Loaded: loaded (/usr/lib/systemd/system/getty-pre.target; static)
     Active: inactive (dead)
       Docs: man:systemd.special(7)
             man:systemd-getty-generator(8)
             http://0pointer.de/blog/projects/serial-console.html

● getty.target - Login Prompts
     Loaded: loaded (/usr/lib/systemd/system/getty.target; static)
     Active: active since Sat 2025-12-13 06:42:16 UTC; 54min ago
      Until: Sat 2025-12-13 06:42:16 UTC; 54min ago
       Docs: man:systemd.special(7)
             man:systemd-getty-generator(8)
             http://0pointer.de/blog/projects/serial-console.html

Dec 13 06:42:16 np0005558317 systemd[1]: Reached target Login Prompts.

○ graphical.target - Graphical Interface
     Loaded: loaded (/usr/lib/systemd/system/graphical.target; static)
     Active: inactive (dead)
       Docs: man:systemd.special(7)

○ initrd-fs.target - Initrd File Systems
     Loaded: loaded (/usr/lib/systemd/system/initrd-fs.target; static)
     Active: inactive (dead) since Sat 2025-12-13 06:42:10 UTC; 54min ago
       Docs: man:systemd.special(7)

Dec 13 06:42:09 localhost systemd[1]: Reached target Initrd File Systems.

○ initrd-root-device.target - Initrd Root Device
     Loaded: loaded (/usr/lib/systemd/system/initrd-root-device.target; static)
     Active: inactive (dead) since Sat 2025-12-13 06:42:09 UTC; 54min ago
       Docs: man:systemd.special(7)

Dec 13 06:42:08 localhost systemd[1]: Reached target Initrd Root Device.
Dec 13 06:42:09 localhost systemd[1]: Stopped target Initrd Root Device.

○ initrd-root-fs.target - Initrd Root File System
     Loaded: loaded (/usr/lib/systemd/system/initrd-root-fs.target; static)
     Active: inactive (dead) since Sat 2025-12-13 06:42:10 UTC; 54min ago
       Docs: man:systemd.special(7)

Dec 13 06:42:09 localhost systemd[1]: Reached target Initrd Root File System.

○ initrd-switch-root.target - Switch Root
     Loaded: loaded (/usr/lib/systemd/system/initrd-switch-root.target; static)
     Active: inactive (dead) since Sat 2025-12-13 06:42:10 UTC; 54min ago

Dec 13 06:42:09 localhost systemd[1]: Reached target Switch Root.

○ initrd-usr-fs.target - Initrd /usr File System
     Loaded: loaded (/usr/lib/systemd/system/initrd-usr-fs.target; static)
     Active: inactive (dead) since Sat 2025-12-13 06:42:09 UTC; 54min ago
       Docs: man:systemd.special(7)

Dec 13 06:42:09 localhost systemd[1]: Stopped target Initrd /usr File System.
Notice: journal has been rotated since unit was started, output may be incomplete.

○ initrd.target - Initrd Default Target
     Loaded: loaded (/usr/lib/systemd/system/initrd.target; static)
     Active: inactive (dead) since Sat 2025-12-13 06:42:09 UTC; 54min ago
       Docs: man:systemd.special(7)

Dec 13 06:42:09 localhost systemd[1]: Reached target Initrd Default Target.
Dec 13 06:42:09 localhost systemd[1]: Stopped target Initrd Default Target.

● integritysetup.target - Local Integrity Protected Volumes
     Loaded: loaded (/usr/lib/systemd/system/integritysetup.target; static)
     Active: active since Sat 2025-12-13 06:42:10 UTC; 54min ago
      Until: Sat 2025-12-13 06:42:10 UTC; 54min ago
       Docs: man:systemd.special(7)

● local-fs-pre.target - Preparation for Local File Systems
     Loaded: loaded (/usr/lib/systemd/system/local-fs-pre.target; static)
     Active: active since Sat 2025-12-13 06:42:10 UTC; 54min ago
      Until: Sat 2025-12-13 06:42:10 UTC; 54min ago
       Docs: man:systemd.special(7)

Dec 13 06:42:10 localhost systemd[1]: Reached target Preparation for Local File Systems.

● local-fs.target - Local File Systems
     Loaded: loaded (/usr/lib/systemd/system/local-fs.target; static)
     Active: active since Sat 2025-12-13 06:42:10 UTC; 54min ago
      Until: Sat 2025-12-13 06:42:10 UTC; 54min ago
       Docs: man:systemd.special(7)

Dec 13 06:42:10 localhost systemd[1]: Reached target Local File Systems.

● multi-user.target - Multi-User System
     Loaded: loaded (/usr/lib/systemd/system/multi-user.target; indirect; preset: disabled)
     Active: active since Sat 2025-12-13 06:42:16 UTC; 54min ago
      Until: Sat 2025-12-13 06:42:16 UTC; 54min ago
       Docs: man:systemd.special(7)

Dec 13 06:42:16 np0005558317 systemd[1]: Reached target Multi-User System.

● network-online.target - Network is Online
     Loaded: loaded (/usr/lib/systemd/system/network-online.target; static)
     Active: active since Sat 2025-12-13 06:42:16 UTC; 54min ago
      Until: Sat 2025-12-13 06:42:16 UTC; 54min ago
       Docs: man:systemd.special(7)
             https://systemd.io/NETWORK_ONLINE

Dec 13 06:42:16 np0005558317 systemd[1]: Reached target Network is Online.

● network-pre.target - Preparation for Network
     Loaded: loaded (/usr/lib/systemd/system/network-pre.target; static)
     Active: active since Sat 2025-12-13 06:42:11 UTC; 54min ago
      Until: Sat 2025-12-13 06:42:11 UTC; 54min ago
       Docs: man:systemd.special(7)
             https://systemd.io/NETWORK_ONLINE

Dec 13 06:42:11 np0005558317 systemd[1]: Reached target Preparation for Network.

● network.target - Network
     Loaded: loaded (/usr/lib/systemd/system/network.target; static)
     Active: active since Sat 2025-12-13 06:42:11 UTC; 54min ago
      Until: Sat 2025-12-13 06:42:11 UTC; 54min ago
       Docs: man:systemd.special(7)
             https://systemd.io/NETWORK_ONLINE

Dec 13 06:42:11 np0005558317 systemd[1]: Reached target Network.

● nfs-client.target - NFS client services
     Loaded: loaded (/usr/lib/systemd/system/nfs-client.target; enabled; preset: disabled)
     Active: active since Sat 2025-12-13 06:42:11 UTC; 54min ago
      Until: Sat 2025-12-13 06:42:11 UTC; 54min ago

Dec 13 06:42:11 np0005558317 systemd[1]: Reached target NFS client services.

○ nss-lookup.target - Host and Network Name Lookups
     Loaded: loaded (/usr/lib/systemd/system/nss-lookup.target; static)
     Active: inactive (dead)
       Docs: man:systemd.special(7)

● nss-user-lookup.target - User and Group Name Lookups
     Loaded: loaded (/usr/lib/systemd/system/nss-user-lookup.target; static)
     Active: active since Sat 2025-12-13 06:42:10 UTC; 54min ago
      Until: Sat 2025-12-13 06:42:10 UTC; 54min ago
       Docs: man:systemd.special(7)

Dec 13 06:42:10 localhost systemd[1]: Reached target User and Group Name Lookups.

● paths.target - Path Units
     Loaded: loaded (/usr/lib/systemd/system/paths.target; static)
     Active: active since Sat 2025-12-13 06:42:10 UTC; 54min ago
      Until: Sat 2025-12-13 06:42:10 UTC; 54min ago
       Docs: man:systemd.special(7)

○ remote-cryptsetup.target - Remote Encrypted Volumes
     Loaded: loaded (/usr/lib/systemd/system/remote-cryptsetup.target; disabled; preset: enabled)
     Active: inactive (dead) since Sat 2025-12-13 06:42:09 UTC; 54min ago
       Docs: man:systemd.special(7)

Dec 13 06:42:09 localhost systemd[1]: Reached target Remote Encrypted Volumes.
Dec 13 06:42:09 localhost systemd[1]: Stopped target Remote Encrypted Volumes.

● remote-fs-pre.target - Preparation for Remote File Systems
     Loaded: loaded (/usr/lib/systemd/system/remote-fs-pre.target; static)
     Active: active since Sat 2025-12-13 06:42:11 UTC; 54min ago
      Until: Sat 2025-12-13 06:42:11 UTC; 54min ago
       Docs: man:systemd.special(7)

Dec 13 06:42:11 np0005558317 systemd[1]: Reached target Preparation for Remote File Systems.

● remote-fs.target - Remote File Systems
     Loaded: loaded (/usr/lib/systemd/system/remote-fs.target; enabled; preset: enabled)
     Active: active since Sat 2025-12-13 06:42:11 UTC; 54min ago
      Until: Sat 2025-12-13 06:42:11 UTC; 54min ago
       Docs: man:systemd.special(7)

Dec 13 06:42:11 np0005558317 systemd[1]: Reached target Remote File Systems.

○ remote-veritysetup.target - Remote Verity Protected Volumes
     Loaded: loaded (/usr/lib/systemd/system/remote-veritysetup.target; disabled; preset: disabled)
     Active: inactive (dead)
       Docs: man:systemd.special(7)

○ rescue.target - Rescue Mode
     Loaded: loaded (/usr/lib/systemd/system/rescue.target; static)
     Active: inactive (dead)
       Docs: man:systemd.special(7)

● rpc_pipefs.target
     Loaded: loaded (/usr/lib/systemd/system/rpc_pipefs.target; static)
     Active: active since Sat 2025-12-13 06:42:10 UTC; 54min ago
      Until: Sat 2025-12-13 06:42:10 UTC; 54min ago

● rpcbind.target - RPC Port Mapper
     Loaded: loaded (/usr/lib/systemd/system/rpcbind.target; static)
     Active: active since Sat 2025-12-13 06:42:10 UTC; 54min ago
      Until: Sat 2025-12-13 06:42:10 UTC; 54min ago
       Docs: man:systemd.special(7)

○ shutdown.target - System Shutdown
     Loaded: loaded (/usr/lib/systemd/system/shutdown.target; static)
     Active: inactive (dead)
       Docs: man:systemd.special(7)

● slices.target - Slice Units
     Loaded: loaded (/usr/lib/systemd/system/slices.target; static)
     Active: active since Sat 2025-12-13 06:42:10 UTC; 54min ago
      Until: Sat 2025-12-13 06:42:10 UUnit syslog.target could not be found.
TC; 54min ago
       Docs: man:systemd.special(7)

● sockets.target - Socket Units
     Loaded: loaded (/usr/lib/systemd/system/sockets.target; static)
     Active: active since Sat 2025-12-13 06:42:10 UTC; 54min ago
      Until: Sat 2025-12-13 06:42:10 UTC; 54min ago
       Docs: man:systemd.special(7)

Dec 13 06:42:10 localhost systemd[1]: Reached target Socket Units.

● sshd-keygen.target
     Loaded: loaded (/usr/lib/systemd/system/sshd-keygen.target; static)
     Active: active since Sat 2025-12-13 07:25:43 UTC; 11min ago
      Until: Sat 2025-12-13 07:25:43 UTC; 11min ago

Dec 13 07:25:43 compute-0 systemd[1]: Reached target sshd-keygen.target.

● swap.target - Swaps
     Loaded: loaded (/usr/lib/systemd/system/swap.target; static)
     Active: active since Sat 2025-12-13 06:42:10 UTC; 54min ago
      Until: Sat 2025-12-13 06:42:10 UTC; 54min ago
       Docs: man:systemd.special(7)

● sysinit.target - System Initialization
     Loaded: loaded (/usr/lib/systemd/system/sysinit.target; static)
     Active: active since Sat 2025-12-13 06:42:10 UTC; 54min ago
      Until: Sat 2025-12-13 06:42:10 UTC; 54min ago
       Docs: man:systemd.special(7)

Dec 13 06:42:10 localhost systemd[1]: Reached target System Initialization.

● time-set.target - System Time Set
     Loaded: loaded (/usr/lib/systemd/system/time-set.target; static)
     Active: active since Sat 2025-12-13 07:13:21 UTC; 23min ago
      Until: Sat 2025-12-13 07:13:21 UTC; 23min ago
       Docs: man:systemd.special(7)

Dec 13 07:13:21 compute-0 systemd[1]: Reached target System Time Set.

● time-sync.target - System Time Synchronized
     Loaded: loaded (/usr/lib/systemd/system/time-sync.target; static)
     Active: active since Sat 2025-12-13 07:13:21 UTC; 23min ago
      Until: Sat 2025-12-13 07:13:21 UTC; 23min ago
       Docs: man:systemd.special(7)

Dec 13 07:13:21 compute-0 systemd[1]: Reached target System Time Synchronized.

● timers.target - Timer Units
     Loaded: loaded (/usr/lib/systemd/system/timers.target; static)
     Active: active since Sat 2025-12-13 06:42:10 UTC; 54min ago
      Until: Sat 2025-12-13 06:42:10 UTC; 54min ago
       Docs: man:systemd.special(7)

Dec 13 06:42:10 localhost systemd[1]: Reached target Timer Units.

○ umount.target - Unmount All Filesystems
     Loaded: loaded (/usr/lib/systemd/system/umount.target; static)
     Active: inactive (dead)
       Docs: man:systemd.special(7)

○ veritysetup-pre.target - Local Verity Protected Volumes (Pre)
     Loaded: loaded (/usr/lib/systemd/system/veritysetup-pre.target; static)
     Active: inactive (dead)
       Docs: man:systemd.special(7)

● veritysetup.target - Local Verity Protected Volumes
     Loaded: loaded (/usr/lib/systemd/system/veritysetup.target; static)
     Active: active since Sat 2025-12-13 06:42:10 UTC; 54min ago
      Until: Sat 2025-12-13 06:42:10 UTC; 54min ago
       Docs: man:systemd.special(7)

○ virt-guest-shutdown.target - Libvirt guests shutdown
     Loaded: loaded (/etc/systemd/system/virt-guest-shutdown.target; static)
     Active: inactive (dead)
       Docs: https://libvirt.org

● 1929b765dac802841eb5d5f56597ea7bfd15768bcf514c3ef50eb60bf1b13d07-35b4a96ef7c0aecc.timer - /usr/bin/podman healthcheck run 1929b765dac802841eb5d5f56597ea7bfd15768bcf514c3ef50eb60bf1b13d07
     Loaded: loaded (/run/systemd/transient/1929b765dac802841eb5d5f56597ea7bfd15768bcf514c3ef50eb60bf1b13d07-35b4a96ef7c0aecc.timer; transient)
  Transient: yes
     Active: active (waiting) since Sat 2025-12-13 07:23:39 UTC; 13min ago
      Until: Sat 2025-12-13 07:23:39 UTC; 13min ago
    Trigger: Sat 2025-12-13 07:36:56 UTC; 5s left
   Triggers: ● 1929b765dac802841eb5d5f56597ea7bfd15768bcf514c3ef50eb60bf1b13d07-35b4a96ef7c0aecc.service

Dec 13 07:23:39 compute-0 systemd[1]: Started /usr/bin/podman healthcheck run 1929b765dac802841eb5d5f56597ea7bfd15768bcf514c3ef50eb60bf1b13d07.

● d4b07d1867f144077f7c5c5cc5ae5c3e4d24058947898d6fee77240ec3efb8ed-304b4c4e52c8cee3.timer - /usr/bin/podman healthcheck run d4b07d1867f144077f7c5c5cc5ae5c3e4d24058947898d6fee77240ec3efb8ed
     Loaded: loaded (/run/systemd/transient/d4b07d1867f144077f7c5c5cc5ae5c3e4d24058947898d6fee77240ec3efb8ed-304b4c4e52c8cee3.timer; transient)
  Transient: yes
     Active: active (waiting) since Sat 2025-12-13 07:22:49 UTC; 14min ago
      Until: Sat 2025-12-13 07:22:49 UTC; 14min ago
    Trigger: Sat 2025-12-13 07:37:07 UTC; 16s left
   Triggers: ● d4b07d1867f144077f7c5c5cc5ae5c3e4d24058947898d6fee77240ec3efb8ed-304b4c4e52c8cee3.service

Dec 13 07:22:49 compute-0 systemd[1]: Started /usr/bin/podman healthcheck run d4b07d1867f144077f7c5c5cc5ae5c3e4d24058947898d6fee77240ec3efb8ed.

● dnf-makecache.timer - dnf makecache --timer
     Loaded: loaded (/usr/lib/systemd/system/dnf-makecache.timer; enabled; preset: enabled)
     Active: active (waiting) since Sat 2025-12-13 06:42:10 UTC; 54min ago
      Until: Sat 2025-12-13 06:42:10 UTC; 54min ago
    Trigger: Sat 2025-12-13 08:41:19 UTC; 1h 4min left
   Triggers: ● dnf-makecache.service

Dec 13 06:42:10 localhost systemd[1]: Started dnf makecache --timer.

● f696b337a701eeb12548640e55e827503e894b0e602e4d8080c3212ba22210e6-407c9b11ead522c2.timer - /usr/bin/podman healthcheck run f696b337a701eeb12548640e55e827503e894b0e602e4d8080c3212ba22210e6
     Loaded: loaded (/run/systemd/transient/f696b337a701eeb12548640e55e827503e894b0e602e4d8080c3212ba22210e6-407c9b11ead522c2.timer; transient)
  Transient: yes
     Active: active (waiting) since Sat 2025-12-13 07:28:04 UTC; 8min ago
      Until: Sat 2025-12-13 07:28:04 UTC; 8min ago
    Trigger: Sat 2025-12-13 07:37:18 UTC; 27s left
   Triggers: ● f696b337a701eeb12548640e55e827503e894b0e602e4d8080c3212ba22210e6-407c9b11ead522c2.service

Dec 13 07:28:04 compute-0 systemd[1]: Started /usr/bin/podman healthcheck run f696b337a701eeb12548640e55e827503e894b0e602e4d8080c3212ba22210e6.

● logrotate.timer - Daily rotation of log files
     Loaded: loaded (/usr/lib/systemd/system/logrotate.timer; enabled; preset: enabled)
     Active: active (waiting) since Sat 2025-12-13 06:42:10 UTC; 54min ago
      Until: Sat 2025-12-13 06:42:10 UTC; 54min ago
    Trigger: Sun 2025-12-14 00:00:00 UTC; 16h left
   Triggers: ● logrotate.service
       Docs: man:logrotate(8)
             man:logrotate.conf(5)

Dec 13 06:42:10 localhost systemd[1]: Started Daily rotation of log files.

○ raid-check.timer - Weekly RAID setup health check
     Loaded: loaded (/usr/lib/systemd/system/raid-check.timer; enabled; preset: enabled)
     Active: inactive (dead)
    Trigger: n/a
   Triggers: ● raid-check.service

○ sysstat-collect.timer - Run system activity accounting tool every 10 minutes
     Loaded: loaded (/usr/lib/systemd/system/sysstat-collect.timer; enabled; preset: disabled)
     Active: inactive (dead)
    Trigger: n/a
   Triggers: ● sysstat-collect.service

○ sysstat-summary.timer - Generate summary of yesterday's process accounting
     Loaded: loaded (/usr/lib/systemd/system/sysstat-summary.timer; enabled; preset: disabled)
     Active: inactive (dead)
    Trigger: n/a
   Triggers: ● sysstat-summary.service

● systemd-tmpfiles-clean.timer - Daily Cleanup of Temporary Directories
     Loaded: loaded (/usr/lib/systemd/system/systemd-tmpfiles-clean.timer; static)
     Active: active (waiting) since Sat 2025-12-13 06:42:10 UTC; 54min ago
      Until: Sat 2025-12-13 06:42:10 UTC; 54min ago
    Trigger: Sun 2025-12-14 06:57:37 UTC; 23h left
   Triggers: ● systemd-tmpfiles-clean.service
       Docs: man:tmpfiles.d(5)
             man:systemd-tmpfiles(8)

Dec 13 06:42:10 localhost systemd[1]: Started Daily Cleanup of Temporary Directories.

● unbound-anchor.timer - daily update of the root trust anchor for DNSSEC
     Loaded: loaded (/usr/lib/systemd/system/unbound-anchor.timer; enabled; preset: enabled)
     Active: active (waiting) since Sat 2025-12-13 07:09:44 UTC; 27min ago
      Until: Sat 2025-12-13 07:09:44 UTC; 27min ago
    Trigger: Sun 2025-12-14 00:00:00 UTC; 16h left
   Triggers: ● unbound-anchor.service
       Docs: man:unbound-anchor(8)

Dec 13 07:09:44 compute-0 systemd[1]: Started daily update of the root trust anchor for DNSSEC.
