● compute-0
    State: running
    Units: 476 loaded (incl. loaded aliases)
     Jobs: 0 queued
   Failed: 0 units
    Since: Sat 2025-11-29 06:29:41 UTC; 1h 47min ago
  systemd: 252-59.el9
   CGroup: /
           ├─318245 turbostat --debug sleep 10
           ├─318248 sleep 10
           ├─init.scope
           │ └─1 /usr/lib/systemd/systemd --switched-root --system --deserialize 31
           ├─machine.slice
           │ ├─libpod-23d05e03be4cb9084b6afbc3edf0d56047ca0f3f1aaf62172739e6d6ce3f7fa8.scope
           │ │ └─container
           │ │   ├─153385 dumb-init --single-child -- kolla_start
           │ │   └─153388 /usr/bin/ovn-controller --pidfile unix:/run/openvswitch/db.sock -p /etc/pki/tls/private/ovndb.key -c /etc/pki/tls/certs/ovndb.crt -C /etc/pki/tls/certs/ovndbca.crt
           │ ├─libpod-53b3e3edbc13651958250c6312d45e987c0dee5fbb3effb8392660dfc1eaa82f.scope
           │ │ └─container
           │ │   ├─238043 dumb-init --single-child -- kolla_start
           │ │   └─238046 /usr/sbin/multipathd -d
           │ ├─libpod-8d4ccc3041d68cb3d47809db0bb1a7e168a735eaa6605edbf2567447905aa50e.scope
           │ │ └─container
           │ │   ├─163635 dumb-init --single-child -- kolla_start
           │ │   ├─163655 "neutron-ovn-metadata-agent (/usr/bin/python3 /usr/bin/neutron-ovn-metadata-agent)"
           │ │   ├─164115 "neutron-ovn-metadata-agent (/usr/bin/python3 /usr/bin/neutron-ovn-metadata-agent)"
           │ │   ├─164178 /usr/bin/python3 /bin/privsep-helper --config-file /etc/neutron/neutron.conf --config-dir /etc/neutron.conf.d --privsep_context neutron.privileged.namespace_cmd --privsep_sock_path /tmp/tmpthalsdm8/privsep.sock
           │ │   ├─266092 /usr/bin/python3 /bin/privsep-helper --config-file /etc/neutron/neutron.conf --config-dir /etc/neutron.conf.d --privsep_context neutron.privileged.default --privsep_sock_path /tmp/tmpv5pj6f28/privsep.sock
           │ │   └─266358 /usr/bin/python3 /bin/privsep-helper --config-file /etc/neutron/neutron.conf --config-dir /etc/neutron.conf.d --privsep_context neutron.privileged.link_cmd --privsep_sock_path /tmp/tmp_ylguf3k/privsep.sock
           │ └─libpod-c15969223c00c2b30a6778fcaf267330fb4765e47f851bc1febe18461e097809.scope
           │   └─container
           │     ├─256731 dumb-init --single-child -- kolla_start
           │     ├─256736 /usr/bin/python3 /usr/bin/nova-compute
           │     ├─265746 /usr/bin/python3 /bin/privsep-helper --config-file /etc/nova/nova.conf --config-file /etc/nova/nova-compute.conf --config-dir /etc/nova/nova.conf.d --privsep_context vif_plug_ovs.privsep.vif_plug --privsep_sock_path /tmp/tmp4yqe7f_k/privsep.sock
           │     ├─266745 /usr/bin/python3 /bin/privsep-helper --config-file /etc/nova/nova.conf --config-file /etc/nova/nova-compute.conf --config-dir /etc/nova/nova.conf.d --privsep_context os_brick.privileged.default --privsep_sock_path /tmp/tmp2b17b584/privsep.sock
           │     └─266829 /usr/bin/python3 /bin/privsep-helper --config-file /etc/nova/nova.conf --config-file /etc/nova/nova-compute.conf --config-dir /etc/nova/nova.conf.d --privsep_context nova.privsep.sys_admin_pctxt --privsep_sock_path /tmp/tmparx_pv8r/privsep.sock
           ├─system.slice
           │ ├─NetworkManager.service
           │ │ └─48962 /usr/sbin/NetworkManager --no-daemon
           │ ├─auditd.service
           │ │ ├─704 /sbin/auditd
           │ │ └─706 /usr/sbin/sedispatch
           │ ├─chronyd.service
           │ │ └─58530 /usr/sbin/chronyd -F 2
           │ ├─crond.service
           │ │ └─1011 /usr/sbin/crond -n
           │ ├─dbus-broker.service
           │ │ ├─774 /usr/bin/dbus-broker-launch --scope system --audit
           │ │ └─781 dbus-broker --log 4 --controller 9 --machine-id 1f988c78c563e12389ab342aced42dbb --max-bytes 536870912 --max-fds 4096 --max-matches 131072 --audit
           │ ├─edpm_multipathd.service
           │ │ └─238041 /usr/bin/conmon --api-version 1 -c 53b3e3edbc13651958250c6312d45e987c0dee5fbb3effb8392660dfc1eaa82f -u 53b3e3edbc13651958250c6312d45e987c0dee5fbb3effb8392660dfc1eaa82f -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/53b3e3edbc13651958250c6312d45e987c0dee5fbb3effb8392660dfc1eaa82f/userdata -p /run/containers/storage/overlay-containers/53b3e3edbc13651958250c6312d45e987c0dee5fbb3effb8392660dfc1eaa82f/userdata/pidfile -n multipathd --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/53b3e3edbc13651958250c6312d45e987c0dee5fbb3effb8392660dfc1eaa82f --full-attach -s -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/53b3e3edbc13651958250c6312d45e987c0dee5fbb3effb8392660dfc1eaa82f/userdata/oci-log --conmon-pidfile /run/multipathd.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg 53b3e3edbc13651958250c6312d45e987c0dee5fbb3effb8392660dfc1eaa82f
           │ ├─edpm_nova_compute.service
           │ │ └─256729 /usr/bin/conmon --api-version 1 -c c15969223c00c2b30a6778fcaf267330fb4765e47f851bc1febe18461e097809 -u c15969223c00c2b30a6778fcaf267330fb4765e47f851bc1febe18461e097809 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/c15969223c00c2b30a6778fcaf267330fb4765e47f851bc1febe18461e097809/userdata -p /run/containers/storage/overlay-containers/c15969223c00c2b30a6778fcaf267330fb4765e47f851bc1febe18461e097809/userdata/pidfile -n nova_compute --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/c15969223c00c2b30a6778fcaf267330fb4765e47f851bc1febe18461e097809 --full-attach -s -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/c15969223c00c2b30a6778fcaf267330fb4765e47f851bc1febe18461e097809/userdata/oci-log --conmon-pidfile /run/nova_compute.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg c15969223c00c2b30a6778fcaf267330fb4765e47f851bc1febe18461e097809
           │ ├─edpm_ovn_controller.service
           │ │ └─153383 /usr/bin/conmon --api-version 1 -c 23d05e03be4cb9084b6afbc3edf0d56047ca0f3f1aaf62172739e6d6ce3f7fa8 -u 23d05e03be4cb9084b6afbc3edf0d56047ca0f3f1aaf62172739e6d6ce3f7fa8 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/23d05e03be4cb9084b6afbc3edf0d56047ca0f3f1aaf62172739e6d6ce3f7fa8/userdata -p /run/containers/storage/overlay-containers/23d05e03be4cb9084b6afbc3edf0d56047ca0f3f1aaf62172739e6d6ce3f7fa8/userdata/pidfile -n ovn_controller --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/23d05e03be4cb9084b6afbc3edf0d56047ca0f3f1aaf62172739e6d6ce3f7fa8 --full-attach -s -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/23d05e03be4cb9084b6afbc3edf0d56047ca0f3f1aaf62172739e6d6ce3f7fa8/userdata/oci-log --conmon-pidfile /run/ovn_controller.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg 23d05e03be4cb9084b6afbc3edf0d56047ca0f3f1aaf62172739e6d6ce3f7fa8
           │ ├─edpm_ovn_metadata_agent.service
           │ │ └─163632 /usr/bin/conmon --api-version 1 -c 8d4ccc3041d68cb3d47809db0bb1a7e168a735eaa6605edbf2567447905aa50e -u 8d4ccc3041d68cb3d47809db0bb1a7e168a735eaa6605edbf2567447905aa50e -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/8d4ccc3041d68cb3d47809db0bb1a7e168a735eaa6605edbf2567447905aa50e/userdata -p /run/containers/storage/overlay-containers/8d4ccc3041d68cb3d47809db0bb1a7e168a735eaa6605edbf2567447905aa50e/userdata/pidfile -n ovn_metadata_agent --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/8d4ccc3041d68cb3d47809db0bb1a7e168a735eaa6605edbf2567447905aa50e --full-attach -s -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/8d4ccc3041d68cb3d47809db0bb1a7e168a735eaa6605edbf2567447905aa50e/userdata/oci-log --conmon-pidfile /run/ovn_metadata_agent.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg 8d4ccc3041d68cb3d47809db0bb1a7e168a735eaa6605edbf2567447905aa50e
           │ ├─gssproxy.service
           │ │ └─875 /usr/sbin/gssproxy -D
           │ ├─irqbalance.service
           │ │ └─804 /usr/sbin/irqbalance
           │ ├─iscsid.service
           │ │ └─240752 /usr/sbin/iscsid -f
           │ ├─ovs-vswitchd.service
           │ │ └─47265 ovs-vswitchd unix:/var/run/openvswitch/db.sock -vconsole:emer -vsyslog:err -vfile:info --mlockall --user openvswitch:hugetlbfs --no-chdir --log-file=/var/log/openvswitch/ovs-vswitchd.log --pidfile=/var/run/openvswitch/ovs-vswitchd.pid --detach
           │ ├─ovsdb-server.service
           │ │ └─47183 ovsdb-server /etc/openvswitch/conf.db -vconsole:emer -vsyslog:err -vfile:info --remote=punix:/var/run/openvswitch/db.sock --private-key=db:Open_vSwitch,SSL,private_key --certificate=db:Open_vSwitch,SSL,certificate --bootstrap-ca-cert=db:Open_vSwitch,SSL,ca_cert --user openvswitch:hugetlbfs --no-chdir --log-file=/var/log/openvswitch/ovsdb-server.log --pidfile=/var/run/openvswitch/ovsdb-server.pid --detach
           │ ├─polkit.service
           │ │ └─43449 /usr/lib/polkit-1/polkitd --no-debug
           │ ├─rpcbind.service
           │ │ └─702 /usr/bin/rpcbind -w -f
           │ ├─rsyslog.service
           │ │ └─1007 /usr/sbin/rsyslogd -n
           │ ├─sshd.service
           │ │ └─191978 "sshd: /usr/sbin/sshd -D [listener] 0 of 10-100 startups"
           │ ├─system-ceph\x2d14ff1f30\x2d5059\x2d58f1\x2d9a23\x2d69871bb275a1.slice
           │ │ ├─ceph-14ff1f30-5059-58f1-9a23-69871bb275a1@crash.compute-0.service
           │ │ │ ├─libpod-payload-7eb39cf0035c22ff4e83a7371bb415c7d467398eea843a964591e85500be2230
           │ │ │ │ ├─82993 /run/podman-init -- /usr/bin/ceph-crash -n client.crash.compute-0
           │ │ │ │ └─82995 /usr/bin/python3 -s /usr/bin/ceph-crash -n client.crash.compute-0
           │ │ │ └─runtime
           │ │ │   └─82991 /usr/bin/conmon --api-version 1 -c 7eb39cf0035c22ff4e83a7371bb415c7d467398eea843a964591e85500be2230 -u 7eb39cf0035c22ff4e83a7371bb415c7d467398eea843a964591e85500be2230 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/7eb39cf0035c22ff4e83a7371bb415c7d467398eea843a964591e85500be2230/userdata -p /run/containers/storage/overlay-containers/7eb39cf0035c22ff4e83a7371bb415c7d467398eea843a964591e85500be2230/userdata/pidfile -n ceph-14ff1f30-5059-58f1-9a23-69871bb275a1-crash-compute-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/7eb39cf0035c22ff4e83a7371bb415c7d467398eea843a964591e85500be2230 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/7eb39cf0035c22ff4e83a7371bb415c7d467398eea843a964591e85500be2230/userdata/oci-log --conmon-pidfile /run/ceph-14ff1f30-5059-58f1-9a23-69871bb275a1@crash.compute-0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 7eb39cf0035c22ff4e83a7371bb415c7d467398eea843a964591e85500be2230
           │ │ ├─ceph-14ff1f30-5059-58f1-9a23-69871bb275a1@mds.cephfs.compute-0.bdhrqf.service
           │ │ │ ├─libpod-payload-f32e02240e8a83659f9e6ca6aa769bfab0c78f2322374b714daa8a8d1689a511
           │ │ │ │ ├─102314 /run/podman-init -- /usr/bin/ceph-mds -n mds.cephfs.compute-0.bdhrqf -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
           │ │ │ │ └─102316 /usr/bin/ceph-mds -n mds.cephfs.compute-0.bdhrqf -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
           │ │ │ └─runtime
           │ │ │   └─102312 /usr/bin/conmon --api-version 1 -c f32e02240e8a83659f9e6ca6aa769bfab0c78f2322374b714daa8a8d1689a511 -u f32e02240e8a83659f9e6ca6aa769bfab0c78f2322374b714daa8a8d1689a511 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/f32e02240e8a83659f9e6ca6aa769bfab0c78f2322374b714daa8a8d1689a511/userdata -p /run/containers/storage/overlay-containers/f32e02240e8a83659f9e6ca6aa769bfab0c78f2322374b714daa8a8d1689a511/userdata/pidfile -n ceph-14ff1f30-5059-58f1-9a23-69871bb275a1-mds-cephfs-compute-0-bdhrqf --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/f32e02240e8a83659f9e6ca6aa769bfab0c78f2322374b714daa8a8d1689a511 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/f32e02240e8a83659f9e6ca6aa769bfab0c78f2322374b714daa8a8d1689a511/userdata/oci-log --conmon-pidfile /run/ceph-14ff1f30-5059-58f1-9a23-69871bb275a1@mds.cephfs.compute-0.bdhrqf.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg f32e02240e8a83659f9e6ca6aa769bfab0c78f2322374b714daa8a8d1689a511
           │ │ ├─ceph-14ff1f30-5059-58f1-9a23-69871bb275a1@mgr.compute-0.kzdpag.service
           │ │ │ ├─libpod-payload-cf5b754473e023a6c38808405cba62d535fd2d4dd4ad3580b8df218d673d96ea
           │ │ │ │ ├─75343 /run/podman-init -- /usr/bin/ceph-mgr -n mgr.compute-0.kzdpag -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
           │ │ │ │ └─75345 /usr/bin/ceph-mgr -n mgr.compute-0.kzdpag -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
           │ │ │ └─runtime
           │ │ │   └─75341 /usr/bin/conmon --api-version 1 -c cf5b754473e023a6c38808405cba62d535fd2d4dd4ad3580b8df218d673d96ea -u cf5b754473e023a6c38808405cba62d535fd2d4dd4ad3580b8df218d673d96ea -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/cf5b754473e023a6c38808405cba62d535fd2d4dd4ad3580b8df218d673d96ea/userdata -p /run/containers/storage/overlay-containers/cf5b754473e023a6c38808405cba62d535fd2d4dd4ad3580b8df218d673d96ea/userdata/pidfile -n ceph-14ff1f30-5059-58f1-9a23-69871bb275a1-mgr-compute-0-kzdpag --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/cf5b754473e023a6c38808405cba62d535fd2d4dd4ad3580b8df218d673d96ea --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/cf5b754473e023a6c38808405cba62d535fd2d4dd4ad3580b8df218d673d96ea/userdata/oci-log --conmon-pidfile /run/ceph-14ff1f30-5059-58f1-9a23-69871bb275a1@mgr.compute-0.kzdpag.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg cf5b754473e023a6c38808405cba62d535fd2d4dd4ad3580b8df218d673d96ea
           │ │ ├─ceph-14ff1f30-5059-58f1-9a23-69871bb275a1@mon.compute-0.service
           │ │ │ ├─libpod-payload-21a56ae912cb8d8d1f0dc09cd0d64941e849dd5a597340fef403575f5f6dca90
           │ │ │ │ ├─75048 /run/podman-init -- /usr/bin/ceph-mon -n mon.compute-0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false
           │ │ │ │ └─75050 /usr/bin/ceph-mon -n mon.compute-0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false
           │ │ │ └─runtime
           │ │ │   └─75046 /usr/bin/conmon --api-version 1 -c 21a56ae912cb8d8d1f0dc09cd0d64941e849dd5a597340fef403575f5f6dca90 -u 21a56ae912cb8d8d1f0dc09cd0d64941e849dd5a597340fef403575f5f6dca90 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/21a56ae912cb8d8d1f0dc09cd0d64941e849dd5a597340fef403575f5f6dca90/userdata -p /run/containers/storage/overlay-containers/21a56ae912cb8d8d1f0dc09cd0d64941e849dd5a597340fef403575f5f6dca90/userdata/pidfile -n ceph-14ff1f30-5059-58f1-9a23-69871bb275a1-mon-compute-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/21a56ae912cb8d8d1f0dc09cd0d64941e849dd5a597340fef403575f5f6dca90 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/21a56ae912cb8d8d1f0dc09cd0d64941e849dd5a597340fef403575f5f6dca90/userdata/oci-log --conmon-pidfile /run/ceph-14ff1f30-5059-58f1-9a23-69871bb275a1@mon.compute-0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 21a56ae912cb8d8d1f0dc09cd0d64941e849dd5a597340fef403575f5f6dca90
           │ │ ├─ceph-14ff1f30-5059-58f1-9a23-69871bb275a1@osd.0.service
           │ │ │ ├─libpod-payload-9e203bb2012357f685d3a92116fa2f82a2b0bf3d53f620d86c98c827de1eec96
           │ │ │ │ ├─88829 /run/podman-init -- /usr/bin/ceph-osd -n osd.0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
           │ │ │ │ └─88831 /usr/bin/ceph-osd -n osd.0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
           │ │ │ └─runtime
           │ │ │   └─88827 /usr/bin/conmon --api-version 1 -c 9e203bb2012357f685d3a92116fa2f82a2b0bf3d53f620d86c98c827de1eec96 -u 9e203bb2012357f685d3a92116fa2f82a2b0bf3d53f620d86c98c827de1eec96 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/9e203bb2012357f685d3a92116fa2f82a2b0bf3d53f620d86c98c827de1eec96/userdata -p /run/containers/storage/overlay-containers/9e203bb2012357f685d3a92116fa2f82a2b0bf3d53f620d86c98c827de1eec96/userdata/pidfile -n ceph-14ff1f30-5059-58f1-9a23-69871bb275a1-osd-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/9e203bb2012357f685d3a92116fa2f82a2b0bf3d53f620d86c98c827de1eec96 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/9e203bb2012357f685d3a92116fa2f82a2b0bf3d53f620d86c98c827de1eec96/userdata/oci-log --conmon-pidfile /run/ceph-14ff1f30-5059-58f1-9a23-69871bb275a1@osd.0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 9e203bb2012357f685d3a92116fa2f82a2b0bf3d53f620d86c98c827de1eec96
           │ │ ├─ceph-14ff1f30-5059-58f1-9a23-69871bb275a1@osd.1.service
           │ │ │ ├─libpod-payload-6a5fc11573d1a39f1563ae47c276c7e603d4b25acd108fa32dcccaad74ad1d11
           │ │ │ │ ├─89838 /run/podman-init -- /usr/bin/ceph-osd -n osd.1 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
           │ │ │ │ └─89840 /usr/bin/ceph-osd -n osd.1 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
           │ │ │ └─runtime
           │ │ │   └─89836 /usr/bin/conmon --api-version 1 -c 6a5fc11573d1a39f1563ae47c276c7e603d4b25acd108fa32dcccaad74ad1d11 -u 6a5fc11573d1a39f1563ae47c276c7e603d4b25acd108fa32dcccaad74ad1d11 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/6a5fc11573d1a39f1563ae47c276c7e603d4b25acd108fa32dcccaad74ad1d11/userdata -p /run/containers/storage/overlay-containers/6a5fc11573d1a39f1563ae47c276c7e603d4b25acd108fa32dcccaad74ad1d11/userdata/pidfile -n ceph-14ff1f30-5059-58f1-9a23-69871bb275a1-osd-1 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/6a5fc11573d1a39f1563ae47c276c7e603d4b25acd108fa32dcccaad74ad1d11 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/6a5fc11573d1a39f1563ae47c276c7e603d4b25acd108fa32dcccaad74ad1d11/userdata/oci-log --conmon-pidfile /run/ceph-14ff1f30-5059-58f1-9a23-69871bb275a1@osd.1.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 6a5fc11573d1a39f1563ae47c276c7e603d4b25acd108fa32dcccaad74ad1d11
           │ │ ├─ceph-14ff1f30-5059-58f1-9a23-69871bb275a1@osd.2.service
           │ │ │ ├─libpod-payload-2e6c1ee4769aea5e614a2b0f0b65dd1997bbdea3f16ea1e0ab78d05fbef53f23
           │ │ │ │ ├─91078 /run/podman-init -- /usr/bin/ceph-osd -n osd.2 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
           │ │ │ │ └─91083 /usr/bin/ceph-osd -n osd.2 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
           │ │ │ └─runtime
           │ │ │   └─91076 /usr/bin/conmon --api-version 1 -c 2e6c1ee4769aea5e614a2b0f0b65dd1997bbdea3f16ea1e0ab78d05fbef53f23 -u 2e6c1ee4769aea5e614a2b0f0b65dd1997bbdea3f16ea1e0ab78d05fbef53f23 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/2e6c1ee4769aea5e614a2b0f0b65dd1997bbdea3f16ea1e0ab78d05fbef53f23/userdata -p /run/containers/storage/overlay-containers/2e6c1ee4769aea5e614a2b0f0b65dd1997bbdea3f16ea1e0ab78d05fbef53f23/userdata/pidfile -n ceph-14ff1f30-5059-58f1-9a23-69871bb275a1-osd-2 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/2e6c1ee4769aea5e614a2b0f0b65dd1997bbdea3f16ea1e0ab78d05fbef53f23 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/2e6c1ee4769aea5e614a2b0f0b65dd1997bbdea3f16ea1e0ab78d05fbef53f23/userdata/oci-log --conmon-pidfile /run/ceph-14ff1f30-5059-58f1-9a23-69871bb275a1@osd.2.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 2e6c1ee4769aea5e614a2b0f0b65dd1997bbdea3f16ea1e0ab78d05fbef53f23
           │ │ └─ceph-14ff1f30-5059-58f1-9a23-69871bb275a1@rgw.rgw.compute-0.qxekyl.service
           │ │   ├─libpod-payload-4edd4e145deee38b7ccf448ee661159c19a1b05ba5a7fb95dd92a6fb13e29fbf
           │ │   │ ├─99621 /run/podman-init -- /usr/bin/radosgw -n client.rgw.rgw.compute-0.qxekyl -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
           │ │   │ └─99623 /usr/bin/radosgw -n client.rgw.rgw.compute-0.qxekyl -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
           │ │   └─runtime
           │ │     └─99619 /usr/bin/conmon --api-version 1 -c 4edd4e145deee38b7ccf448ee661159c19a1b05ba5a7fb95dd92a6fb13e29fbf -u 4edd4e145deee38b7ccf448ee661159c19a1b05ba5a7fb95dd92a6fb13e29fbf -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/4edd4e145deee38b7ccf448ee661159c19a1b05ba5a7fb95dd92a6fb13e29fbf/userdata -p /run/containers/storage/overlay-containers/4edd4e145deee38b7ccf448ee661159c19a1b05ba5a7fb95dd92a6fb13e29fbf/userdata/pidfile -n ceph-14ff1f30-5059-58f1-9a23-69871bb275a1-rgw-rgw-compute-0-qxekyl --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/4edd4e145deee38b7ccf448ee661159c19a1b05ba5a7fb95dd92a6fb13e29fbf --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/4edd4e145deee38b7ccf448ee661159c19a1b05ba5a7fb95dd92a6fb13e29fbf/userdata/oci-log --conmon-pidfile /run/ceph-14ff1f30-5059-58f1-9a23-69871bb275a1@rgw.rgw.compute-0.qxekyl.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 4edd4e145deee38b7ccf448ee661159c19a1b05ba5a7fb95dd92a6fb13e29fbf
           │ ├─system-getty.slice
           │ │ └─getty@tty1.service
           │ │   └─1013 /sbin/agetty -o "-p -- \\u" --noclear - linux
           │ ├─system-serial\x2dgetty.slice
           │ │ └─serial-getty@ttyS0.service
           │ │   └─1014 /sbin/agetty -o "-p -- \\u" --keep-baud 115200,57600,38400,9600 - vt220
           │ ├─systemd-hostnamed.service
           │ │ └─314855 /usr/lib/systemd/systemd-hostnamed
           │ ├─systemd-journald.service
           │ │ └─681 /usr/lib/systemd/systemd-journald
           │ ├─systemd-logind.service
           │ │ └─807 /usr/lib/systemd/systemd-logind
           │ ├─systemd-machined.service
           │ │ └─217781 /usr/lib/systemd/systemd-machined
           │ ├─systemd-udevd.service
           │ │ └─udev
           │ │   └─733 /usr/lib/systemd/systemd-udevd
           │ ├─tuned.service
           │ │ └─113121 /usr/bin/python3 -Es /usr/sbin/tuned -l -P
           │ ├─virtlogd.service
           │ │ └─217150 /usr/sbin/virtlogd
           │ ├─virtnodedevd.service
           │ │ └─256467 /usr/sbin/virtnodedevd --timeout 120
           │ ├─virtqemud.service
           │ │ └─256259 /usr/sbin/virtqemud --timeout 120
           │ └─virtsecretd.service
           │   └─266034 /usr/sbin/virtsecretd --timeout 120
           └─user.slice
             ├─user-1000.slice
             │ ├─session-1.scope
             │ │ └─4513 /usr/bin/python3
             │ ├─session-51.scope
             │ │ ├─311291 "sshd-session: zuul [priv]"
             │ │ ├─311294 "sshd-session: zuul@notty"
             │ │ ├─311295 sudo bash -c "rm -rf /var/tmp/sos-osp && mkdir /var/tmp/sos-osp && sos report --batch --all-logs --tmp-dir=/var/tmp/sos-osp  -p container,openstack_edpm,system,storage,virt"
             │ │ ├─311319 /usr/bin/python3 -s /sbin/sos report --batch --all-logs --tmp-dir=/var/tmp/sos-osp -p container,openstack_edpm,system,storage,virt
             │ │ ├─318244 timeout 15s turbostat --debug sleep 10
             │ │ ├─318920 timeout 300s systemctl status --all
             │ │ ├─318922 systemctl status --all
             │ │ └─318923 timeout --foreground 300s virsh -r nodedev-dumpxml block_vda
             │ └─user@1000.service
             │   ├─app.slice
             │   │ └─dbus-broker.service
             │   │   ├─14284 /usr/bin/dbus-broker-launch --scope user
             │   │   └─14294 dbus-broker --log 4 --controller 9 --machine-id 1f988c78c563e12389ab342aced42dbb --max-bytes 100000000000000 --max-fds 25000000000000 --max-matches 5000000000
             │   ├─init.scope
             │   │ ├─4301 /usr/lib/systemd/systemd --user
             │   │ └─4303 "(sd-pam)"
             │   └─user.slice
             │     └─podman-pause-aee1f4af.scope
             │       └─14221 catatonit -P
             └─user-42477.slice
               ├─session-20.scope
               │ ├─76613 "sshd-session: ceph-admin [priv]"
               │ └─76635 "sshd-session: ceph-admin"
               ├─session-22.scope
               │ ├─76620 "sshd-session: ceph-admin [priv]"
               │ └─76636 "sshd-session: ceph-admin@notty"
               ├─session-23.scope
               │ ├─76687 "sshd-session: ceph-admin [priv]"
               │ └─76690 "sshd-session: ceph-admUnit boot.automount could not be found.
in@notty"
               ├─session-24.scope
               │ ├─76741 "sshd-session: ceph-admin [priv]"
               │ └─76744 "sshd-session: ceph-admin@notty"
               ├─session-25.scope
               │ ├─76795 "sshd-session: ceph-admin [priv]"
               │ └─76798 "sshd-session: ceph-admin@notty"
               ├─session-26.scope
               │ ├─76849 "sshd-session: ceph-admin [priv]"
               │ └─76852 "sshd-session: ceph-admin@notty"
               ├─session-27.scope
               │ ├─76903 "sshd-session: ceph-admin [priv]"
               │ └─76906 "sshd-session: ceph-admin@notty"
               ├─session-28.scope
               │ ├─76957 "sshd-session: ceph-admin [priv]"
               │ └─76960 "sshd-session: ceph-admin@notty"
               ├─session-29.scope
               │ ├─77011 "sshd-session: ceph-admin [priv]"
               │ └─77014 "sshd-session: ceph-admin@notty"
               ├─session-30.scope
               │ ├─77065 "sshd-session: ceph-admin [priv]"
               │ └─77068 "sshd-session: ceph-admin@notty"
               ├─session-31.scope
               │ ├─77092 "sshd-session: ceph-admin [priv]"
               │ └─77095 "sshd-session: ceph-admin@notty"
               ├─session-32.scope
               │ ├─77146 "sshd-session: ceph-admin [priv]"
               │ └─77149 "sshd-session: ceph-admin@notty"
               └─user@42477.service
                 └─init.scope
                   ├─76617 /usr/lib/systemd/systemd --user
                   └─76619 "(sd-pam)"

● proc-sys-fs-binfmt_misc.automount - Arbitrary Executable File Formats File System Automount Point
     Loaded: loaded (/usr/lib/systemd/system/proc-sys-fs-binfmt_misc.automount; static)
     Active: active (running) since Sat 2025-11-29 06:29:42 UTC; 1h 47min ago
      Until: Sat 2025-11-29 06:29:42 UTC; 1h 47min ago
   Triggers: ● proc-sys-fs-binfmt_misc.mount
      Where: /proc/sys/fs/binfmt_misc
       Docs: https://docs.kernel.org/admin-guide/binfmt-misc.html
             https://www.freedesktop.org/wiki/Software/systemd/APIFileSystems

Nov 29 07:12:21 compute-0 systemd[1]: proc-sys-fs-binfmt_misc.automount: Got automount request for /proc/sys/fs/binfmt_misc, triggered by 78078 (sysctl)

● dev-cdrom.device - QEMU_DVD-ROM config-2
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:01.1-ata1-host0-target0:0:0-0:0:0:0-block-sr0.device
     Loaded: loaded
     Active: active (plugged) since Sat 2025-11-29 06:29:44 UTC; 1h 47min ago
      Until: Sat 2025-11-29 06:29:44 UTC; 1h 47min ago
     Device: /sys/devices/pci0000:00/0000:00:01.1/ata1/host0/target0:0:0/0:0:0:0/block/sr0

● dev-ceph_vg0-ceph_lv0.device - /dev/ceph_vg0/ceph_lv0
    Follows: unit currently follows state of sys-devices-virtual-block-dm\x2d0.device
     Loaded: loaded
     Active: active (plugged) since Sat 2025-11-29 07:10:28 UTC; 1h 6min ago
      Until: Sat 2025-11-29 07:10:28 UTC; 1h 6min ago
     Device: /sys/devices/virtual/block/dm-0

● dev-ceph_vg1-ceph_lv1.device - /dev/ceph_vg1/ceph_lv1
    Follows: unit currently follows state of sys-devices-virtual-block-dm\x2d1.device
     Loaded: loaded
     Active: active (plugged) since Sat 2025-11-29 07:10:33 UTC; 1h 6min ago
      Until: Sat 2025-11-29 07:10:33 UTC; 1h 6min ago
     Device: /sys/devices/virtual/block/dm-1

● dev-ceph_vg2-ceph_lv2.device - /dev/ceph_vg2/ceph_lv2
    Follows: unit currently follows state of sys-devices-virtual-block-dm\x2d2.device
     Loaded: loaded
     Active: active (plugged) since Sat 2025-11-29 07:10:38 UTC; 1h 6min ago
      Until: Sat 2025-11-29 07:10:38 UTC; 1h 6min ago
     Device: /sys/devices/virtual/block/dm-2

● dev-disk-by\x2ddiskseq-1.device - /dev/disk/by-diskseq/1
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:04.0-virtio2-block-vda.device
     Loaded: loaded
     Active: active (plugged) since Sat 2025-11-29 06:29:44 UTC; 1h 47min ago
      Until: Sat 2025-11-29 06:29:44 UTC; 1h 47min ago
     Device: /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda

● dev-disk-by\x2ddiskseq-11.device - /dev/disk/by-diskseq/11
    Follows: unit currently follows state of sys-devices-virtual-block-loop5.device
     Loaded: loaded
     Active: active (plugged) since Sat 2025-11-29 07:10:37 UTC; 1h 6min ago
      Until: Sat 2025-11-29 07:10:37 UTC; 1h 6min ago
     Device: /sys/devices/virtual/block/loop5

● dev-disk-by\x2ddiskseq-12.device - /dev/disk/by-diskseq/12
    Follows: unit currently follows state of sys-devices-virtual-block-dm\x2d2.device
     Loaded: loaded
     Active: active (plugged) since Sat 2025-11-29 07:10:38 UTC; 1h 6min ago
      Until: Sat 2025-11-29 07:10:38 UTC; 1h 6min ago
     Device: /sys/devices/virtual/block/dm-2

● dev-disk-by\x2ddiskseq-3.device - QEMU_DVD-ROM config-2
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:01.1-ata1-host0-target0:0:0-0:0:0:0-block-sr0.device
     Loaded: loaded
     Active: active (plugged) since Sat 2025-11-29 06:29:44 UTC; 1h 47min ago
      Until: Sat 2025-11-29 06:29:44 UTC; 1h 47min ago
     Device: /sys/devices/pci0000:00/0000:00:01.1/ata1/host0/target0:0:0/0:0:0:0/block/sr0

● dev-disk-by\x2ddiskseq-5.device - /dev/disk/by-diskseq/5
    Follows: unit currently follows state of sys-devices-virtual-block-loop3.device
     Loaded: loaded
     Active: active (plugged) since Sat 2025-11-29 07:10:28 UTC; 1h 6min ago
      Until: Sat 2025-11-29 07:10:28 UTC; 1h 6min ago
     Device: /sys/devices/virtual/block/loop3

● dev-disk-by\x2ddiskseq-6.device - /dev/disk/by-diskseq/6
    Follows: unit currently follows state of sys-devices-virtual-block-dm\x2d0.device
     Loaded: loaded
     Active: active (plugged) since Sat 2025-11-29 07:10:28 UTC; 1h 6min ago
      Until: Sat 2025-11-29 07:10:28 UTC; 1h 6min ago
     Device: /sys/devices/virtual/block/dm-0

● dev-disk-by\x2ddiskseq-8.device - /dev/disk/by-diskseq/8
    Follows: unit currently follows state of sys-devices-virtual-block-loop4.device
     Loaded: loaded
     Active: active (plugged) since Sat 2025-11-29 07:10:32 UTC; 1h 6min ago
      Until: Sat 2025-11-29 07:10:32 UTC; 1h 6min ago
     Device: /sys/devices/virtual/block/loop4

● dev-disk-by\x2ddiskseq-9.device - /dev/disk/by-diskseq/9
    Follows: unit currently follows state of sys-devices-virtual-block-dm\x2d1.device
     Loaded: loaded
     Active: active (plugged) since Sat 2025-11-29 07:10:33 UTC; 1h 6min ago
      Until: Sat 2025-11-29 07:10:33 UTC; 1h 6min ago
     Device: /sys/devices/virtual/block/dm-1

● dev-disk-by\x2did-ata\x2dQEMU_DVD\x2dROM_QM00001.device - QEMU_DVD-ROM config-2
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:01.1-ata1-host0-target0:0:0-0:0:0:0-block-sr0.device
     Loaded: loaded
     Active: active (plugged) since Sat 2025-11-29 06:29:44 UTC; 1h 47min ago
      Until: Sat 2025-11-29 06:29:44 UTC; 1h 47min ago
     Device: /sys/devices/pci0000:00/0000:00:01.1/ata1/host0/target0:0:0/0:0:0:0/block/sr0

● dev-disk-by\x2did-dm\x2dname\x2dceph_vg0\x2dceph_lv0.device - /dev/disk/by-id/dm-name-ceph_vg0-ceph_lv0
    Follows: unit currently follows state of sys-devices-virtual-block-dm\x2d0.device
     Loaded: loaded
     Active: active (plugged) since Sat 2025-11-29 07:10:28 UTC; 1h 6min ago
      Until: Sat 2025-11-29 07:10:28 UTC; 1h 6min ago
     Device: /sys/devices/virtual/block/dm-0

● dev-disk-by\x2did-dm\x2dname\x2dceph_vg1\x2dceph_lv1.device - /dev/disk/by-id/dm-name-ceph_vg1-ceph_lv1
    Follows: unit currently follows state of sys-devices-virtual-block-dm\x2d1.device
     Loaded: loaded
     Active: active (plugged) since Sat 2025-11-29 07:10:33 UTC; 1h 6min ago
      Until: Sat 2025-11-29 07:10:33 UTC; 1h 6min ago
     Device: /sys/devices/virtual/block/dm-1

● dev-disk-by\x2did-dm\x2dname\x2dceph_vg2\x2dceph_lv2.device - /dev/disk/by-id/dm-name-ceph_vg2-ceph_lv2
    Follows: unit currently follows state of sys-devices-virtual-block-dm\x2d2.device
     Loaded: loaded
     Active: active (plugged) since Sat 2025-11-29 07:10:38 UTC; 1h 6min ago
      Until: Sat 2025-11-29 07:10:38 UTC; 1h 6min ago
     Device: /sys/devices/virtual/block/dm-2

● dev-disk-by\x2did-dm\x2duuid\x2dLVM\x2d5mKDzfN81CKLmVuC7sZRxXEDnxbqe7TKMm22d7ePBSXWUtPCqd9E3ea6BfdSpmt6.device - /dev/disk/by-id/dm-uuid-LVM-5mKDzfN81CKLmVuC7sZRxXEDnxbqe7TKMm22d7ePBSXWUtPCqd9E3ea6BfdSpmt6
    Follows: unit currently follows state of sys-devices-virtual-block-dm\x2d2.device
     Loaded: loaded
     Active: active (plugged) since Sat 2025-11-29 07:10:38 UTC; 1h 6min ago
      Until: Sat 2025-11-29 07:10:38 UTC; 1h 6min ago
     Device: /sys/devices/virtual/block/dm-2

● dev-disk-by\x2did-dm\x2duuid\x2dLVM\x2dXlGv7forWV30Gyff6MYiVOjSF0Hqfs53fUsuEra2xBK4DKtC82L0OMbFgNJPh5ox.device - /dev/disk/by-id/dm-uuid-LVM-XlGv7forWV30Gyff6MYiVOjSF0Hqfs53fUsuEra2xBK4DKtC82L0OMbFgNJPh5ox
    Follows: unit currently follows state of sys-devices-virtual-block-dm\x2d1.device
     Loaded: loaded
     Active: active (plugged) since Sat 2025-11-29 07:10:33 UTC; 1h 6min ago
      Until: Sat 2025-11-29 07:10:33 UTC; 1h 6min ago
     Device: /sys/devices/virtual/block/dm-1

● dev-disk-by\x2did-dm\x2duuid\x2dLVM\x2dYxoxXAAGFRKahWC9rJ3Njdqv9ndleS4WQku2cVoQWxaCxs1V8BJ1wV5EWgZJy3Ew.device - /dev/disk/by-id/dm-uuid-LVM-YxoxXAAGFRKahWC9rJ3Njdqv9ndleS4WQku2cVoQWxaCxs1V8BJ1wV5EWgZJy3Ew
    Follows: unit currently follows state of sys-devices-virtual-block-dm\x2d0.device
     Loaded: loaded
     Active: active (plugged) since Sat 2025-11-29 07:10:28 UTC; 1h 6min ago
      Until: Sat 2025-11-29 07:10:28 UTC; 1h 6min ago
     Device: /sys/devices/virtual/block/dm-0

● dev-disk-by\x2did-lvm\x2dpv\x2duuid\x2d3fQuDS\x2ddDFg\x2dIO8E\x2dvROU\x2dvdZt\x2dsaDZ\x2dFtDRt4.device - /dev/disk/by-id/lvm-pv-uuid-3fQuDS-dDFg-IO8E-vROU-vdZt-saDZ-FtDRt4
    Follows: unit currently follows state of sys-devices-virtual-block-loop3.device
     Loaded: loaded
     Active: active (plugged) since Sat 2025-11-29 07:10:28 UTC; 1h 6min ago
      Until: Sat 2025-11-29 07:10:28 UTC; 1h 6min ago
     Device: /sys/devices/virtual/block/loop3

● dev-disk-by\x2did-lvm\x2dpv\x2duuid\x2dyWfFQh\x2dpMna\x2dKMQc\x2dpPAf\x2dR4rK\x2d5avK\x2dyxu6de.device - /dev/disk/by-id/lvm-pv-uuid-yWfFQh-pMna-KMQc-pPAf-R4rK-5avK-yxu6de
    Follows: unit currently follows state of sys-devices-virtual-block-loop4.device
     Loaded: loaded
     Active: active (plugged) since Sat 2025-11-29 07:10:33 UTC; 1h 6min ago
      Until: Sat 2025-11-29 07:10:33 UTC; 1h 6min ago
     Device: /sys/devices/virtual/block/loop4

● dev-disk-by\x2did-lvm\x2dpv\x2duuid\x2dzRFzpu\x2dSkBk\x2d3pco\x2dasdr\x2d0jYL\x2dhuzO\x2d4VywDf.device - /dev/disk/by-id/lvm-pv-uuid-zRFzpu-SkBk-3pco-asdr-0jYL-huzO-4VywDf
    Follows: unit currently follows state of sys-devices-virtual-block-loop5.device
     Loaded: loaded
     Active: active (plugged) since Sat 2025-11-29 07:10:37 UTC; 1h 6min ago
      Until: Sat 2025-11-29 07:10:37 UTC; 1h 6min ago
     Device: /sys/devices/virtual/block/loop5

● dev-disk-by\x2dlabel-config\x2d2.device - QEMU_DVD-ROM config-2
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:01.1-ata1-host0-target0:0:0-0:0:0:0-block-sr0.device
     Loaded: loaded
     Active: active (plugged) since Sat 2025-11-29 06:29:44 UTC; 1h 47min ago
      Until: Sat 2025-11-29 06:29:44 UTC; 1h 47min ago
     Device: /sys/devices/pci0000:00/0000:00:01.1/ata1/host0/target0:0:0/0:0:0:0/block/sr0

● dev-disk-by\x2dpartuuid-5a328d8c\x2d01.device - /dev/disk/by-partuuid/5a328d8c-01
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:04.0-virtio2-block-vda-vda1.device
     Loaded: loaded
     Active: active (plugged) since Sat 2025-11-29 06:29:44 UTC; 1h 47min ago
      Until: Sat 2025-11-29 06:29:44 UTC; 1h 47min ago
     Device: /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda/vda1

● dev-disk-by\x2dpath-pci\x2d0000:00:01.1\x2data\x2d1.0.device - QEMU_DVD-ROM config-2
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:01.1-ata1-host0-target0:0:0-0:0:0:0-block-sr0.device
     Loaded: loaded
     Active: active (plugged) since Sat 2025-11-29 06:29:44 UTC; 1h 47min ago
      Until: Sat 2025-11-29 06:29:44 UTC; 1h 47min ago
     Device: /sys/devices/pci0000:00/0000:00:01.1/ata1/host0/target0:0:0/0:0:0:0/block/sr0

● dev-disk-by\x2dpath-pci\x2d0000:00:01.1\x2data\x2d1.device - QEMU_DVD-ROM config-2
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:01.1-ata1-host0-target0:0:0-0:0:0:0-block-sr0.device
     Loaded: loaded
     Active: active (plugged) since Sat 2025-11-29 06:29:44 UTC; 1h 47min ago
      Until: Sat 2025-11-29 06:29:44 UTC; 1h 47min ago
     Device: /sys/devices/pci0000:00/0000:00:01.1/ata1/host0/target0:0:0/0:0:0:0/block/sr0

● dev-disk-by\x2dpath-pci\x2d0000:00:04.0.device - /dev/disk/by-path/pci-0000:00:04.0
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:04.0-virtio2-block-vda.device
     Loaded: loaded
     Active: active (plugged) since Sat 2025-11-29 06:29:44 UTC; 1h 47min ago
      Until: Sat 2025-11-29 06:29:44 UTC; 1h 47min ago
     Device: /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda

● dev-disk-by\x2dpath-pci\x2d0000:00:04.0\x2dpart1.device - /dev/disk/by-path/pci-0000:00:04.0-part1
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:04.0-virtio2-block-vda-vda1.device
     Loaded: loaded
     Active: active (plugged) since Sat 2025-11-29 06:29:44 UTC; 1h 47min ago
      Until: Sat 2025-11-29 06:29:44 UTC; 1h 47min ago
     Device: /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda/vda1

● dev-disk-by\x2dpath-virtio\x2dpci\x2d0000:00:04.0.device - /dev/disk/by-path/virtio-pci-0000:00:04.0
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:04.0-virtio2-block-vda.device
     Loaded: loaded
     Active: active (plugged) since Sat 2025-11-29 06:29:44 UTC; 1h 47min ago
      Until: Sat 2025-11-29 06:29:44 UTC; 1h 47min ago
     Device: /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda

● dev-disk-by\x2dpath-virtio\x2dpci\x2d0000:00:04.0\x2dpart1.device - /dev/disk/by-path/virtio-pci-0000:00:04.0-part1
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:04.0-virtio2-block-vda-vda1.device
     Loaded: loaded
     Active: active (plugged) since Sat 2025-11-29 06:29:44 UTC; 1h 47min ago
      Until: Sat 2025-11-29 06:29:44 UTC; 1h 47min ago
     Device: /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda/vda1

● dev-disk-by\x2duuid-2025\x2d11\x2d29\x2d06\x2d27\x2d47\x2d00.device - QEMU_DVD-ROM config-2
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:01.1-ata1-host0-target0:0:0-0:0:0:0-block-sr0.device
     Loaded: loaded
     Active: active (plugged) since Sat 2025-11-29 06:29:44 UTC; 1h 47min ago
      Until: Sat 2025-11-29 06:29:44 UTC; 1h 47min ago
     Device: /sys/devices/pci0000:00/0000:00:01.1/ata1/host0/target0:0:0/0:0:0:0/block/sr0

● dev-disk-by\x2duuid-b277050f\x2d8ace\x2d464d\x2dabb6\x2d4c46d4c45253.device - /dev/disk/by-uuid/b277050f-8ace-464d-abb6-4c46d4c45253
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:04.0-virtio2-block-vda-vda1.device
     Loaded: loaded
     Active: active (plugged) since Sat 2025-11-29 06:29:44 UTC; 1h 47min ago
      Until: Sat 2025-11-29 06:29:44 UTC; 1h 47min ago
     Device: /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda/vda1

Nov 29 06:28:08 localhost systemd[1]: Found device /dev/disk/by-uuid/b277050f-8ace-464d-abb6-4c46d4c45253.

● dev-dm\x2d0.device - /dev/dm-0
    Follows: unit currently follows state of sys-devices-virtual-block-dm\x2d0.device
     Loaded: loaded
     Active: active (plugged) since Sat 2025-11-29 07:10:28 UTC; 1h 6min ago
      Until: Sat 2025-11-29 07:10:28 UTC; 1h 6min ago
     Device: /sys/devices/virtual/block/dm-0

● dev-dm\x2d1.device - /dev/dm-1
    Follows: unit currently follows state of sys-devices-virtual-block-dm\x2d1.device
     Loaded: loaded
     Active: active (plugged) since Sat 2025-11-29 07:10:33 UTC; 1h 6min ago
      Until: Sat 2025-11-29 07:10:33 UTC; 1h 6min ago
     Device: /sys/devices/virtual/block/dm-1

● dev-dm\x2d2.device - /dev/dm-2
    Follows: unit currently follows state of sys-devices-virtual-block-dm\x2d2.device
     Loaded: loaded
     Active: active (plugged) since Sat 2025-11-29 07:10:38 UTC; 1h 6min ago
      Until: Sat 2025-11-29 07:10:38 UTC; 1h 6min ago
     Device: /sys/devices/virtual/block/dm-2

● dev-loop3.device - /dev/loop3
    Follows: unit currently follows state of sys-devices-virtual-block-loop3.device
     Loaded: loaded
     Active: active (plugged) since Sat 2025-11-29 07:10:28 UTC; 1h 6min ago
      Until: Sat 2025-11-29 07:10:28 UTC; 1h 6min ago
     Device: /sys/devices/virtual/block/loop3

● dev-loop4.device - /dev/loop4
    Follows: unit currently follows state of sys-devices-virtual-block-loop4.device
     Loaded: loaded
     Active: active (plugged) since Sat 2025-11-29 07:10:32 UTC; 1h 6min ago
      Until: Sat 2025-11-29 07:10:32 UTC; 1h 6min ago
     Device: /sys/devices/virtual/block/loop4

● dev-loop5.device - /dev/loop5
    Follows: unit currently follows state of sys-devices-virtual-block-loop5.device
     Loaded: loaded
     Active: active (plugged) since Sat 2025-11-29 07:10:37 UTC; 1h 6min ago
      Until: Sat 2025-11-29 07:10:37 UTC; 1h 6min ago
     Device: /sys/devices/virtual/block/loop5

● dev-mapper-ceph_vg0\x2dceph_lv0.device - /dev/mapper/ceph_vg0-ceph_lv0
    Follows: unit currently follows state of sys-devices-virtual-block-dm\x2d0.device
     Loaded: loaded
     Active: active (plugged) since Sat 2025-11-29 07:10:28 UTC; 1h 6min ago
      Until: Sat 2025-11-29 07:10:28 UTC; 1h 6min ago
     Device: /sys/devices/virtual/block/dm-0

● dev-mapper-ceph_vg1\x2dceph_lv1.device - /dev/mapper/ceph_vg1-ceph_lv1
    Follows: unit currently follows state of sys-devices-virtual-block-dm\x2d1.device
     Loaded: loaded
     Active: active (plugged) since Sat 2025-11-29 07:10:33 UTC; 1h 6min ago
      Until: Sat 2025-11-29 07:10:33 UTC; 1h 6min ago
     Device: /sys/devices/virtual/block/dm-1

● dev-mapper-ceph_vg2\x2dceph_lv2.device - /dev/mapper/ceph_vg2-ceph_lv2
    Follows: unit currently follows state of sys-devices-virtual-block-dm\x2d2.device
     Loaded: loaded
     Active: active (plugged) since Sat 2025-11-29 07:10:38 UTC; 1h 6min ago
      Until: Sat 2025-11-29 07:10:38 UTC; 1h 6min ago
     Device: /sys/devices/virtual/block/dm-2

● dev-rfkill.device - /dev/rfkill
    Follows: unit currently follows state of sys-devices-virtual-misc-rfkill.device
     Loaded: loaded
     Active: active (plugged) since Sat 2025-11-29 06:29:45 UTC; 1h 47min ago
      Until: Sat 2025-11-29 06:29:45 UTC; 1h 47min ago
     Device: /sys/devices/virtual/misc/rfkill

● dev-sr0.device - QEMU_DVD-ROM config-2
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:01.1-ata1-host0-target0:0:0-0:0:0:0-block-sr0.device
     Loaded: loaded
     Active: active (plugged) since Sat 2025-11-29 06:29:44 UTC; 1h 47min ago
      Until: Sat 2025-11-29 06:29:44 UTC; 1h 47min ago
     Device: /sys/devices/pci0000:00/0000:00:01.1/ata1/host0/target0:0:0/0:0:0:0/block/sr0

● dev-ttyS0.device - /dev/ttyS0
    Follows: unit currently follows state of sys-devices-pnp0-00:00-tty-ttyS0.device
     Loaded: loaded
     Active: active (plugged) since Sat 2025-11-29 06:29:43 UTC; 1h 47min ago
      Until: Sat 2025-11-29 06:29:43 UTC; 1h 47min ago
     Device: /sys/devices/pnp0/00:00/tty/ttyS0

Nov 29 06:29:43 localhost systemd[1]: Condition check resulted in /dev/ttyS0 being skipped.

● dev-ttyS1.device - /dev/ttyS1
    Follows: unit currently follows state of sys-devices-platform-serial8250-tty-ttyS1.device
     Loaded: loaded
     Active: active (plugged) since Sat 2025-11-29 06:29:43 UTC; 1h 47min ago
      Until: Sat 2025-11-29 06:29:43 UTC; 1h 47min ago
     Device: /sys/devices/platform/serial8250/tty/ttyS1

● dev-ttyS2.device - /dev/ttyS2
    Follows: unit currently follows state of sys-devices-platform-serial8250-tty-ttyS2.device
     Loaded: loaded
     Active: active (plugged) since Sat 2025-11-29 06:29:43 UTC; 1h 47min ago
      Until: Sat 2025-11-29 06:29:43 UTC; 1h 47min ago
     Device: /sys/devices/platform/serial8250/tty/ttyS2

● dev-ttyS3.device - /dev/ttyS3
    Follows: unit currently follows state of sys-devices-platform-serial8250-tty-ttyS3.device
     Loaded: loaded
     Active: active (plugged) since Sat 2025-11-29 06:29:43 UTC; 1h 47min ago
      Until: Sat 2025-11-29 06:29:43 UTC; 1h 47min ago
     Device: /sys/devices/platform/serial8250/tty/ttyS3

● dev-vda.device - /dev/vda
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:04.0-virtio2-block-vda.device
     Loaded: loaded
     Active: active (plugged) since Sat 2025-11-29 06:29:44 UTC; 1h 47min ago
      Until: Sat 2025-11-29 06:29:44 UTC; 1h 47min ago
     Device: /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda

● dev-vda1.device - /dev/vda1
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:04.0-virtio2-block-vda-vda1.device
     Loaded: loaded
     Active: active (plugged) since Sat 2025-11-29 06:29:44 UTC; 1h 47min ago
      Until: Sat 2025-11-29 06:29:44 UTC; 1h 47min ago
     Device: /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda/vda1

● sys-devices-pci0000:00-0000:00:01.1-ata1-host0-target0:0:0-0:0:0:0-block-sr0.device - QEMU_DVD-ROM config-2
     Loaded: loaded
     Active: active (plugged) since Sat 2025-11-29 06:29:44 UTC; 1h 47min ago
      Until: Sat 2025-11-29 06:29:44 UTC; 1h 47min ago
     Device: /sys/devices/pci0000:00/0000:00:01.1/ata1/host0/target0:0:0/0:0:0:0/block/sr0

● sys-devices-pci0000:00-0000:00:03.0-virtio1-net-eth0.device - Virtio network device
     Loaded: loaded
     Active: active (plugged) since Sat 2025-11-29 06:29:44 UTC; 1h 47min ago
      Until: Sat 2025-11-29 06:29:44 UTC; 1h 47min ago
     Device: /sys/devices/pci0000:00/0000:00:03.0/virtio1/net/eth0

● sys-devices-pci0000:00-0000:00:04.0-virtio2-block-vda-vda1.device - /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda/vda1
     Loaded: loaded
     Active: active (plugged) since Sat 2025-11-29 06:29:44 UTC; 1h 47min ago
      Until: Sat 2025-11-29 06:29:44 UTC; 1h 47min ago
     Device: /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda/vda1

● sys-devices-pci0000:00-0000:00:04.0-virtio2-block-vda.device - /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda
     Loaded: loaded
     Active: active (plugged) since Sat 2025-11-29 06:29:44 UTC; 1h 47min ago
      Until: Sat 2025-11-29 06:29:44 UTC; 1h 47min ago
     Device: /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda

● sys-devices-pci0000:00-0000:00:07.0-virtio5-net-eth1.device - Virtio network device
     Loaded: loaded
     Active: active (plugged) since Sat 2025-11-29 06:31:56 UTC; 1h 45min ago
      Until: Sat 2025-11-29 06:31:56 UTC; 1h 45min ago
     Device: /sys/devices/pci0000:00/0000:00:07.0/virtio5/net/eth1

● sys-devices-platform-serial8250-tty-ttyS1.device - /sys/devices/platform/serial8250/tty/ttyS1
     Loaded: loaded
     Active: active (plugged) since Sat 2025-11-29 06:29:43 UTC; 1h 47min ago
      Until: Sat 2025-11-29 06:29:43 UTC; 1h 47min ago
     Device: /sys/devices/platform/serial8250/tty/ttyS1

● sys-devices-platform-serial8250-tty-ttyS2.device - /sys/devices/platform/serial8250/tty/ttyS2
     Loaded: loaded
     Active: active (plugged) since Sat 2025-11-29 06:29:43 UTC; 1h 47min ago
      Until: Sat 2025-11-29 06:29:43 UTC; 1h 47min ago
     Device: /sys/devices/platform/serial8250/tty/ttyS2

● sys-devices-platform-serial8250-tty-ttyS3.device - /sys/devices/platform/serial8250/tty/ttyS3
     Loaded: loaded
     Active: active (plugged) since Sat 2025-11-29 06:29:43 UTC; 1h 47min ago
      Until: Sat 2025-11-29 06:29:43 UTC; 1h 47min ago
     Device: /sys/devices/platform/serial8250/tty/ttyS3

● sys-devices-pnp0-00:00-tty-ttyS0.device - /sys/devices/pnp0/00:00/tty/ttyS0
     Loaded: loaded
     Active: active (plugged) since Sat 2025-11-29 06:29:43 UTC; 1h 47min ago
      Until: Sat 2025-11-29 06:29:43 UTC; 1h 47min ago
     Device: /sys/devices/pnp0/00:00/tty/ttyS0

● sys-devices-virtual-block-dm\x2d0.device - /sys/devices/virtual/block/dm-0
     Loaded: loaded
     Active: active (plugged) since Sat 2025-11-29 07:10:28 UTC; 1h 6min ago
      Until: Sat 2025-11-29 07:10:28 UTC; 1h 6min ago
     Device: /sys/devices/virtual/block/dm-0

● sys-devices-virtual-block-dm\x2d1.device - /sys/devices/virtual/block/dm-1
     Loaded: loaded
     Active: active (plugged) since Sat 2025-11-29 07:10:33 UTC; 1h 6min ago
      Until: Sat 2025-11-29 07:10:33 UTC; 1h 6min ago
     Device: /sys/devices/virtual/block/dm-1

● sys-devices-virtual-block-dm\x2d2.device - /sys/devices/virtual/block/dm-2
     Loaded: loaded
     Active: active (plugged) since Sat 2025-11-29 07:10:38 UTC; 1h 6min ago
      Until: Sat 2025-11-29 07:10:38 UTC; 1h 6min ago
     Device: /sys/devices/virtual/block/dm-2

● sys-devices-virtual-block-loop3.device - /sys/devices/virtual/block/loop3
     Loaded: loaded
     Active: active (plugged) since Sat 2025-11-29 07:10:28 UTC; 1h 6min ago
      Until: Sat 2025-11-29 07:10:28 UTC; 1h 6min ago
     Device: /sys/devices/virtual/block/loop3

● sys-devices-virtual-block-loop4.device - /sys/devices/virtual/block/loop4
     Loaded: loaded
     Active: active (plugged) since Sat 2025-11-29 07:10:32 UTC; 1h 6min ago
      Until: Sat 2025-11-29 07:10:32 UTC; 1h 6min ago
     Device: /sys/devices/virtual/block/loop4

● sys-devices-virtual-block-loop5.device - /sys/devices/virtual/block/loop5
     Loaded: loaded
     Active: active (plugged) since Sat 2025-11-29 07:10:37 UTC; 1h 6min ago
      Until: Sat 2025-11-29 07:10:37 UTC; 1h 6min ago
     Device: /sys/devices/virtual/block/loop5

● sys-devices-virtual-misc-rfkill.device - /sys/devices/virtual/misc/rfkill
     Loaded: loaded
     Active: active (plugged) since Sat 2025-11-29 06:29:45 UTC; 1h 47min ago
      Until: Sat 2025-11-29 06:29:45 UTC; 1h 47min ago
     Device: /sys/devices/virtual/misc/rfkill

● sys-devices-virtual-net-br\x2dex.device - /sys/devices/virtual/net/br-ex
     Loaded: loaded
     Active: active (plugged) since Sat 2025-11-29 07:06:55 UTC; 1h 10min ago
      Until: Sat 2025-11-29 07:06:55 UTC; 1h 10min ago
     Device: /sys/devices/virtual/net/br-ex

● sys-devices-virtual-net-br\x2dint.device - /sys/devices/virtual/net/br-int
     Loaded: loaded
     Active: active (plugged) since Sat 2025-11-29 07:24:14 UTC; 53min ago
      Until: Sat 2025-11-29 07:24:14 UTC; 53min ago
     Device: /sys/devices/virtual/net/br-int

● sys-devices-virtual-net-genev_sys_6081.device - /sys/devices/virtual/net/genev_sys_6081
     Loaded: loaded
     Active: active (plugged) since Sat 2025-11-29 07:24:15 UTC; 53min ago
      Until: Sat 2025-11-29 07:24:15 UTC; 53min ago
     Device: /sys/devices/virtual/net/genev_sys_6081

● sys-devices-virtual-net-ovs\x2dsystem.device - /sys/devices/virtual/net/ovs-system
     Loaded: loaded
     Active: active (plugged) since Sat 2025-11-29 07:06:55 UTC; 1h 10min ago
      Until: Sat 2025-11-29 07:06:55 UTC; 1h 10min ago
     Device: /sys/devices/virtual/net/ovs-system

● sys-devices-virtual-net-vlan20.device - /sys/devices/virtual/net/vlan20
     Loaded: loaded
     Active: active (plugged) since Sat 2025-11-29 07:06:55 UTC; 1h 10min ago
      Until: Sat 2025-11-29 07:06:55 UTC; 1h 10min ago
     Device: /sys/devices/virtual/net/vlan20

● sys-devices-virtual-net-vlan21.device - /sys/devices/virtual/net/vlan21
     Loaded: loaded
     Active: active (plugged) since Sat 2025-11-29 07:06:55 UTC; 1h 10min ago
      Until: Sat 2025-11-29 07:06:55 UTC; 1h 10min ago
     Device: /sys/devices/virtual/net/vlan21

● sys-devices-virtual-net-vlan22.device - /sys/devices/virtual/net/vlan22
     Loaded: loaded
     Active: active (plugged) since Sat 2025-11-29 07:06:55 UTC; 1h 10min ago
      Until: Sat 2025-11-29 07:06:55 UTC; 1h 10min ago
     Device: /sys/devices/virtual/net/vlan22

● sys-devices-virtual-net-vlan23.device - /sys/devices/virtual/net/vlan23
     Loaded: loaded
     Active: active (plugged) since Sat 2025-11-29 07:06:55 UTC; 1h 10min ago
      Until: Sat 2025-11-29 07:06:55 UTC; 1h 10min agUnit boot.mount could not be found.
o
     Device: /sys/devices/virtual/net/vlan23

● sys-module-configfs.device - /sys/module/configfs
     Loaded: loaded
     Active: active (plugged) since Sat 2025-11-29 06:29:43 UTC; 1h 47min ago
      Until: Sat 2025-11-29 06:29:43 UTC; 1h 47min ago
     Device: /sys/module/configfs

● sys-module-fuse.device - /sys/module/fuse
     Loaded: loaded
     Active: active (plugged) since Sat 2025-11-29 06:29:44 UTC; 1h 47min ago
      Until: Sat 2025-11-29 06:29:44 UTC; 1h 47min ago
     Device: /sys/module/fuse

● sys-subsystem-net-devices-br\x2dex.device - /sys/subsystem/net/devices/br-ex
     Loaded: loaded
     Active: active (plugged) since Sat 2025-11-29 07:06:55 UTC; 1h 10min ago
      Until: Sat 2025-11-29 07:06:55 UTC; 1h 10min ago
     Device: /sys/devices/virtual/net/br-ex

● sys-subsystem-net-devices-br\x2dint.device - /sys/subsystem/net/devices/br-int
     Loaded: loaded
     Active: active (plugged) since Sat 2025-11-29 07:24:14 UTC; 53min ago
      Until: Sat 2025-11-29 07:24:14 UTC; 53min ago
     Device: /sys/devices/virtual/net/br-int

● sys-subsystem-net-devices-eth0.device - Virtio network device
     Loaded: loaded
     Active: active (plugged) since Sat 2025-11-29 06:29:44 UTC; 1h 47min ago
      Until: Sat 2025-11-29 06:29:44 UTC; 1h 47min ago
     Device: /sys/devices/pci0000:00/0000:00:03.0/virtio1/net/eth0

● sys-subsystem-net-devices-eth1.device - Virtio network device
     Loaded: loaded
     Active: active (plugged) since Sat 2025-11-29 06:31:56 UTC; 1h 45min ago
      Until: Sat 2025-11-29 06:31:56 UTC; 1h 45min ago
     Device: /sys/devices/pci0000:00/0000:00:07.0/virtio5/net/eth1

● sys-subsystem-net-devices-genev_sys_6081.device - /sys/subsystem/net/devices/genev_sys_6081
     Loaded: loaded
     Active: active (plugged) since Sat 2025-11-29 07:24:15 UTC; 53min ago
      Until: Sat 2025-11-29 07:24:15 UTC; 53min ago
     Device: /sys/devices/virtual/net/genev_sys_6081

● sys-subsystem-net-devices-ovs\x2dsystem.device - /sys/subsystem/net/devices/ovs-system
     Loaded: loaded
     Active: active (plugged) since Sat 2025-11-29 07:06:55 UTC; 1h 10min ago
      Until: Sat 2025-11-29 07:06:55 UTC; 1h 10min ago
     Device: /sys/devices/virtual/net/ovs-system

● sys-subsystem-net-devices-vlan20.device - /sys/subsystem/net/devices/vlan20
     Loaded: loaded
     Active: active (plugged) since Sat 2025-11-29 07:06:55 UTC; 1h 10min ago
      Until: Sat 2025-11-29 07:06:55 UTC; 1h 10min ago
     Device: /sys/devices/virtual/net/vlan20

● sys-subsystem-net-devices-vlan21.device - /sys/subsystem/net/devices/vlan21
     Loaded: loaded
     Active: active (plugged) since Sat 2025-11-29 07:06:55 UTC; 1h 10min ago
      Until: Sat 2025-11-29 07:06:55 UTC; 1h 10min ago
     Device: /sys/devices/virtual/net/vlan21

● sys-subsystem-net-devices-vlan22.device - /sys/subsystem/net/devices/vlan22
     Loaded: loaded
     Active: active (plugged) since Sat 2025-11-29 07:06:55 UTC; 1h 10min ago
      Until: Sat 2025-11-29 07:06:55 UTC; 1h 10min ago
     Device: /sys/devices/virtual/net/vlan22

● sys-subsystem-net-devices-vlan23.device - /sys/subsystem/net/devices/vlan23
     Loaded: loaded
     Active: active (plugged) since Sat 2025-11-29 07:06:55 UTC; 1h 10min ago
      Until: Sat 2025-11-29 07:06:55 UTC; 1h 10min ago
     Device: /sys/devices/virtual/net/vlan23

● -.mount - Root Mount
     Loaded: loaded (/etc/fstab; generated)
     Active: active (mounted)
      Where: /
       What: /dev/vda1
       Docs: man:fstab(5)
             man:systemd-fstab-generator(8)

● dev-hugepages.mount - Huge Pages File System
     Loaded: loaded (/usr/lib/systemd/system/dev-hugepages.mount; static)
     Active: active (mounted) since Sat 2025-11-29 06:29:42 UTC; 1h 47min ago
      Until: Sat 2025-11-29 06:29:42 UTC; 1h 47min ago
      Where: /dev/hugepages
       What: hugetlbfs
       Docs: https://docs.kernel.org/admin-guide/mm/hugetlbpage.html
             https://www.freedesktop.org/wiki/Software/systemd/APIFileSystems
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48573)
     Memory: 52Unit home.mount could not be found.
.0K (peak: 556.0K)
        CPU: 8ms
     CGroup: /dev-hugepages.mount

● dev-hugepages1G.mount - /dev/hugepages1G
     Loaded: loaded (/etc/fstab; generated)
     Active: active (mounted) since Sat 2025-11-29 07:09:22 UTC; 1h 7min ago
      Until: Sat 2025-11-29 07:09:22 UTC; 1h 7min ago
      Where: /dev/hugepages1G
       What: none
       Docs: man:fstab(5)
             man:systemd-fstab-generator(8)

● dev-hugepages2M.mount - /dev/hugepages2M
     Loaded: loaded (/etc/fstab; generated)
     Active: active (mounted) since Sat 2025-11-29 07:09:22 UTC; 1h 7min ago
      Until: Sat 2025-11-29 07:09:22 UTC; 1h 7min ago
      Where: /dev/hugepages2M
       What: none
       Docs: man:fstab(5)
             man:systemd-fstab-generator(8)

● dev-mqueue.mount - POSIX Message Queue File System
     Loaded: loaded (/usr/lib/systemd/system/dev-mqueue.mount; static)
     Active: active (mounted) since Sat 2025-11-29 06:29:42 UTC; 1h 47min ago
      Until: Sat 2025-11-29 06:29:42 UTC; 1h 47min ago
      Where: /dev/mqueue
       What: mqueue
       Docs: man:mq_overview(7)
             https://www.freedesktop.org/wiki/Software/systemd/APIFileSystems
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48573)
     Memory: 4.0K (peak: 540.0K)
        CPU: 5ms
     CGroup: /dev-mqueue.mount

○ proc-fs-nfsd.mount - NFSD configuration filesystem
     Loaded: loaded (/usr/lib/systemd/system/proc-fs-nfsd.mount; static)
     Active: inactive (dead)
      Where: /proc/fs/nfsd
       What: nfsd

● proc-sys-fs-binfmt_misc.mount - Arbitrary Executable File Formats File System
     Loaded: loaded (/usr/lib/systemd/system/proc-sys-fs-binfmt_misc.mount; disabled; preset: disabled)
     Active: active (mounted) since Sat 2025-11-29 07:12:21 UTC; 1h 4min ago
      Until: Sat 2025-11-29 07:12:21 UTC; 1h 4min ago
TriggeredBy: ● proc-sys-fs-binfmt_misc.automount
      Where: /proc/sys/fs/binfmt_misc
       What: binfmt_misc
       Docs: https://docs.kernel.org/admin-guide/binfmt-misc.html
             https://www.freedesktop.org/wiki/Software/systemd/APIFileSystems
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48573)
     Memory: 8.0K (peak: 552.0K)
        CPU: 5ms
     CGroup: /proc-sys-fs-binfmt_misc.mount

Nov 29 07:12:21 compute-0 systemd[1]: Mounting Arbitrary Executable File Formats File System...
Nov 29 07:12:21 compute-0 systemd[1]: Mounted Arbitrary Executable File Formats File System.

● run-credentials-systemd\x2dsysctl.service.mount - /run/credentials/systemd-sysctl.service
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Sat 2025-11-29 07:05:20 UTC; 1h 11min ago
      Until: Sat 2025-11-29 07:05:20 UTC; 1h 11min ago
      Where: /run/credentials/systemd-sysctl.service
       What: none

● run-credentials-systemd\x2dsysusers.service.mount - /run/credentials/systemd-sysusers.service
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Sat 2025-11-29 06:29:42 UTC; 1h 47min ago
      Until: Sat 2025-11-29 06:29:42 UTC; 1h 47min ago
      Where: /run/credentials/systemd-sysusers.service
       What: none

● run-credentials-systemd\x2dtmpfiles\x2dsetup.service.mount - /run/credentials/systemd-tmpfiles-setup.service
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Sat 2025-11-29 06:29:43 UTC; 1h 47min ago
      Until: Sat 2025-11-29 06:29:43 UTC; 1h 47min ago
      Where: /run/credentials/systemd-tmpfiles-setup.service
       What: none

● run-credentials-systemd\x2dtmpfiles\x2dsetup\x2ddev.service.mount - /run/credentials/systemd-tmpfiles-setup-dev.service
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Sat 2025-11-29 06:29:43 UTC; 1h 47min ago
      Until: Sat 2025-11-29 06:29:43 UTC; 1h 47min ago
      Where: /run/credentials/systemd-tmpfiles-setup-dev.service
       What: none

● run-netns.mount - /run/netns
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Sat 2025-11-29 07:08:48 UTC; 1h 8min ago
      Until: Sat 2025-11-29 07:08:48 UTC; 1h 8min ago
      Where:Unit sysroot.mount could not be found.
 /run/netns
       What: tmpfs

● run-user-1000.mount - /run/user/1000
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Sat 2025-11-29 06:30:09 UTC; 1h 47min ago
      Until: Sat 2025-11-29 06:30:09 UTC; 1h 47min ago
      Where: /run/user/1000
       What: tmpfs

● run-user-42477.mount - /run/user/42477
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Sat 2025-11-29 07:12:12 UTC; 1h 5min ago
      Until: Sat 2025-11-29 07:12:12 UTC; 1h 5min ago
      Where: /run/user/42477
       What: tmpfs

● sys-fs-fuse-connections.mount - FUSE Control File System
     Loaded: loaded (/usr/lib/systemd/system/sys-fs-fuse-connections.mount; static)
     Active: active (mounted) since Sat 2025-11-29 06:29:42 UTC; 1h 47min ago
      Until: Sat 2025-11-29 06:29:42 UTC; 1h 47min ago
      Where: /sys/fs/fuse/connections
       What: fusectl
       Docs: https://docs.kernel.org/filesystems/fuse.html
             https://www.freedesktop.org/wiki/Software/systemd/APIFileSystems
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48573)
     Memory: 8.0K (peak: 540.0K)
        CPU: 4ms
     CGroup: /sys-fs-fuse-connections.mount

Nov 29 06:29:42 localhost systemd[1]: Mounting FUSE Control File System...
Nov 29 06:29:42 localhost systemd[1]: Mounted FUSE Control File System.

● sys-kernel-config.mount - Kernel Configuration File System
     Loaded: loaded (/proc/self/mountinfo; static)
     Active: active (mounted) since Sat 2025-11-29 06:29:42 UTC; 1h 47min ago
      Until: Sat 2025-11-29 06:29:42 UTC; 1h 47min ago
      Where: /sys/kernel/config
       What: configfs
       Docs: https://docs.kernel.org/filesystems/configfs.html
             https://www.freedesktop.org/wiki/Software/systemd/APIFileSystems

● sys-kernel-debug-tracing.mount - /sys/kernel/debug/tracing
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Sat 2025-11-29 08:16:35 UTC; 41s ago
      Until: Sat 2025-11-29 08:16:35 UTC; 41s ago
      Where: /sys/kernel/debug/tracing
       What: tracefs

● sys-kernel-debug.mount - Kernel Debug File System
     Loaded: loaded (/usr/lib/systemd/system/sys-kernel-debug.mount; static)
     Active: active (mounted) since Sat 2025-11-29 06:29:42 UTC; 1h 47min ago
      Until: Sat 2025-11-29 06:29:42 UTC; 1h 47min ago
      Where: /sys/kernel/debug
       What: debugfs
       Docs: https://docs.kernel.org/filesystems/debugfs.html
             https://www.freedesktop.org/wiki/Software/systemd/APIFileSystems
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48573)
     Memory: 4.0K (peak: 540.0K)
        CPU: 2ms
     CGroup: /sys-kernel-debug.mount

● sys-kernel-tracing.mount - Kernel Trace File System
     Loaded: loaded (/usr/lib/systemd/system/sys-kernel-tracing.mount; static)
     Active: active (mounted) since Sat 2025-11-29 06:29:42 UTC; 1h 47min ago
      Until: Sat 2025-11-29 06:29:42 UTC; 1h 47min ago
      Where: /sys/kernel/tracing
       What: tracefs
       Docs: https://docs.kernel.org/trace/ftrace.html
             https://www.freedesktop.org/wiki/Software/systemd/APIFileSystems
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48573)
     Memory: 4.0K (peak: 540.0K)
        CPU: 2ms
     CGroup: /sys-kernel-tracing.mount

○ tmp.mount - Temporary Directory /tmp
     Loaded: loaded (/usr/lib/systemd/system/tmp.mount; disabled; preset: disabled)
     Active: inactive (dead)
      Where: /tmp
       What: tmpfs
       Docs: https://systemd.io/TEMPORARY_DIRECTORIES
             man:file-hierarchy(7)
             https://www.freedesktop.org/wiki/Software/systemd/APIFileSystems

● var-lib-containers-storage-overlay-108fb7d8e83c79594c0af0d1c5ac8c5ce218583bbb13c906515f4c93ccb0c7f7-merged.mount - /var/lib/containers/storage/overlay/108fb7d8e83c79594c0af0d1c5ac8c5ce218583bbb13c906515f4c93ccb0c7f7/merged
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Sat 2025-11-29 07:35:57 UTC; 41min ago
      Until: Sat 2025-11-29 07:35:57 UTC; 41min ago
      Where: /var/lib/containers/storage/overlay/108fb7d8e83c79594c0af0d1c5ac8c5ce218583bbb13c906515f4c93ccb0c7f7/merged
       What: overlay

● var-lib-containers-storage-overlay-1bfdfdf558c247ee2513ed0119bd7c3a2b7280136a4dd1b92c24543f63b52341-merged.mount - /var/lib/containers/storage/overlay/1bfdfdf558c247ee2513ed0119bd7c3a2b7280136a4dd1b92c24543f63b52341/merged
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Sat 2025-11-29 07:14:10 UTC; 1h 3min ago
      Until: Sat 2025-11-29 07:14:10 UTC; 1h 3min ago
      Where: /var/lib/containers/storage/overlay/1bfdfdf558c247ee2513ed0119bd7c3a2b7280136a4dd1b92c24543f63b52341/merged
       What: overlay

● var-lib-containers-storage-overlay-26060044e6252263c619e6ad11e9a200bdd3c1256baca568245a9205489f9dc2-merged.mount - /var/lib/containers/storage/overlay/26060044e6252263c619e6ad11e9a200bdd3c1256baca568245a9205489f9dc2/merged
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Sat 2025-11-29 07:11:17 UTC; 1h 5min ago
      Until: Sat 2025-11-29 07:11:17 UTC; 1h 5min ago
      Where: /var/lib/containers/storage/overlay/26060044e6252263c619e6ad11e9a200bdd3c1256baca568245a9205489f9dc2/merged
       What: overlay

● var-lib-containers-storage-overlay-26c805e15d91b111068e49a7bce3fc58c3aae073126c7e54dd0878b194a7bd75-merged.mount - /var/lib/containers/storage/overlay/26c805e15d91b111068e49a7bce3fc58c3aae073126c7e54dd0878b194a7bd75/merged
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Sat 2025-11-29 07:13:14 UTC; 1h 4min ago
      Until: Sat 2025-11-29 07:13:14 UTC; 1h 4min ago
      Where: /var/lib/containers/storage/overlay/26c805e15d91b111068e49a7bce3fc58c3aae073126c7e54dd0878b194a7bd75/merged
       What: overlay

● var-lib-containers-storage-overlay-39b240e9e861c4101e042d2e4e8719ec006fe754e1814911523c67492f39bb67-merged.mount - /var/lib/containers/storage/overlay/39b240e9e861c4101e042d2e4e8719ec006fe754e1814911523c67492f39bb67/merged
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Sat 2025-11-29 07:38:36 UTC; 38min ago
      Until: Sat 2025-11-29 07:38:36 UTC; 38min ago
      Where: /var/lib/containers/storage/overlay/39b240e9e861c4101e042d2e4e8719ec006fe754e1814911523c67492f39bb67/merged
       What: overlay

● var-lib-containers-storage-overlay-3e95e9e3a2d7f55007da2144b93d330265e6d420a89b2b432a886c9865c6cd8f-merged.mount - /var/lib/containers/storage/overlay/3e95e9e3a2d7f55007da2144b93d330265e6d420a89b2b432a886c9865c6cd8f/merged
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Sat 2025-11-29 07:14:30 UTC; 1h 2min ago
      Until: Sat 2025-11-29 07:14:30 UTC; 1h 2min ago
      Where: /var/lib/containers/storage/overlay/3e95e9e3a2d7f55007da2144b93d330265e6d420a89b2b432a886c9865c6cd8f/merged
       What: overlay

● var-lib-containers-storage-overlay-7a0ea265aaf7308d2de4c498e01cbd6300233f36116fd8dc5825673ac27ac04e-merged.mount - /var/lib/containers/storage/overlay/7a0ea265aaf7308d2de4c498e01cbd6300233f36116fd8dc5825673ac27ac04e/merged
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Sat 2025-11-29 07:25:57 UTC; 51min ago
      Until: Sat 2025-11-29 07:25:57 UTC; 51min ago
      Where: /var/lib/containers/storage/overlay/7a0ea265aaf7308d2de4c498e01cbd6300233f36116fd8dc5825673ac27ac04e/merged
       What: overlay

● var-lib-containers-storage-overlay-9b2489226d9368031a7b3bda647aa318b561217e216d66893e4c29221678893d-merged.mount - /var/lib/containers/storage/overlay/9b2489226d9368031a7b3bda647aa318b561217e216d66893e4c29221678893d/merged
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Sat 2025-11-29 07:13:08 UTC; 1h 4min ago
      Until: Sat 2025-11-29 07:13:08 UTC; 1h 4min ago
      Where: /var/lib/containers/storage/overlay/9b2489226d9368031a7b3bda647aa318b561217e216d66893e4c29221678893d/merged
       What: overlay

● var-lib-containers-storage-overlay-9c0e7bf78f942e6aa3195dd20f290c9411868ac62665f0d752dda2d23b56c9a6-merged.mount - /var/lib/containers/storage/overlay/9c0e7bf78f942e6aa3195dd20f290c9411868ac62665f0d752dda2d23b56c9a6/merged
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Sat 2025-11-29 07:11:19 UTC; 1h 5min ago
      Until: Sat 2025-11-29 07:11:19 UTC; 1h 5min ago
      Where: /var/lib/containers/storage/overlay/9c0e7bf78f942e6aa3195dd20f290c9411868ac62665f0d752dda2d23b56c9a6/merged
       What: overlay

● var-lib-containers-storage-overlay-a686a88efca72eda8bc510d146a9fe8f82f95e9053f31a9a11942b114c9bc8a4-merged.mount - /var/lib/containers/storage/overlay/a686a88efca72eda8bc510d146a9fe8f82f95e9053f31a9a11942b114c9bc8a4/merged
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Sat 2025-11-29 07:12:33 UTC; 1h 4min ago
      Until: Sat 2025-11-29 07:12:33 UTC; 1h 4min ago
      Where: /var/lib/containers/storage/overlay/a686a88efca72eda8bc510d146a9fe8f82f95e9053f31a9a11942b114c9bc8a4/merged
       What: overlay

● var-lib-containers-storage-overlay-c9054f3889d4073dbcb6c414e87b62cec47b95f3802627f1139fd900a8039e80-merged.mount - /var/lib/containers/storage/overlay/c9054f3889d4073dbcb6c414e87b62cec47b95f3802627f1139fd900a8039e80/merged
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Sat 2025-11-29 07:13:03 UTC; 1h 4min ago
      Until: Sat 2025-11-29 07:13:03 UTC; 1h 4min ago
      Where: /var/lib/containers/storage/overlay/c9054f3889d4073dbcb6c414e87b62cec47b95f3802627f1139fd900a8039e80/merged
       What: overlay

● var-lib-containers-storage-overlay-e5e70d3ce46c267809e34f298c924f2f955d3549130488a083f5b7f9f5ca336a-merged.mount - /var/lib/containers/storage/overlay/e5e70d3ce46c267809e34f298c924f2f955d3549130488a083f5b7f9f5ca336a/merged
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Sat 2025-11-29 07:24:13 UTC; 53min ago
      Until: Sat 2025-11-29 07:24:13 UTC; 53min ago
      Where: /var/lib/containers/storage/overlay/e5e70d3ce46c267809e34f298c924f2f955d3549130488a083f5b7f9f5ca336a/merged
       What: overlay

● var-lib-containers-storage-overlay.mount - /var/lib/containers/storage/overlay
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Sat 2025-11-29 07:11:17 UTC; 1h 5min ago
      Until: Sat 2025-11-29 07:11:17 UTC; 1h 5min ago
      Where: /var/lib/containers/storage/overlay
       What: /dev/vda1

● var-lib-containers-storage-overlay\x2dcontainers-23d05e03be4cb9084b6afbc3edf0d56047ca0f3f1aaf62172739e6d6ce3f7fa8-userdata-shm.mount - /var/lib/containers/storage/overlay-containers/23d05e03be4cb9084b6afbc3edf0d56047ca0f3f1aaf62172739e6d6ce3f7fa8/userdata/shm
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Sat 2025-11-29 07:24:13 UTC; 53min ago
      Until: Sat 2025-11-29 07:24:13 UTC; 53min ago
      Where: /var/lib/containers/storage/overlay-containers/23d05e03be4cb9084b6afbc3edf0d56047ca0f3f1aaf62172739e6d6ce3f7fa8/userdata/shm
       What: shm

● var-lib-containers-storage-overlay\x2dcontainers-53b3e3edbc13651958250c6312d45e987c0dee5fbb3effb8392660dfc1eaa82f-userdata-shm.mount - /var/lib/containers/storage/overlay-containers/53b3e3edbc13651958250c6312d45e987c0dee5fbb3effb8392660dfc1eaa82f/userdata/shm
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Sat 2025-11-29 07:35:57 UTC; 41min ago
      Until: Sat 2025-11-29 07:35:57 UTC; 41min ago
      Where: /var/lib/containers/storage/overlay-containers/53b3e3edbc13651958250c6312d45e987c0dee5fbb3effb8392660dfc1eaa82f/userdata/shm
       What: shm

● var-lib-containers-storage-overlay\x2dcontainers-8d4ccc3041d68cb3d47809db0bb1a7e168a735eaa6605edbf2567447905aa50e-userdata-shm.mount - /var/lib/containers/storage/overlay-containers/8d4ccc3041d68cb3d47809db0bb1a7e168a735eaa6605edbf2567447905aa50e/userdata/shm
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Sat 2025-11-29 07:25:57 UTC; 51min ago
      Until: Sat 2025-11-29 07:25:57 UTC; 51min ago
      Where: /var/lib/containers/storage/overlay-containers/8d4ccc3041d68cb3d47809db0bb1a7e168a735eaa6605edbf2567447905aa50e/userdata/shm
       What: shm

● var-lib-containers-storage-overlay\x2dcontainers-c15969223c00c2b30a6778fcaf267330fb4765e47f851bc1febe18461e097809-userdata-shm.mount - /var/lib/containers/storage/overlay-containers/c15969223c00c2b30a6778fcaf267330fb4765e47f851bc1febe18461e097809/userdata/shm
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Sat 2025-11-29 07:38:36 UTC; 38min ago
      Until: Sat 2025-11-29 07:38:36 UTC; 38min ago
      Where: /var/lib/containers/storage/overlay-containers/c15969223c00c2b30a6778fcaf267330fb4765e47f851bc1febe18461e097809/userdata/shm
       What: shm

○ var-lib-machines.mount - Virtual Machine and Container Storage (Compatibility)
     Loaded: loaded (/usr/lib/systemd/system/var-lib-machines.mount; static)
     Active: inactive (dead)
  Condition: start condition failed at Sat 2025-11-29 07:32:01 UTC; 45min ago
      Where: /var/lib/machines
       What: /var/lib/machines.raw

Nov 29 07:32:01 compute-0 systemd[1]: Virtual Machine and Container Storage (Compatibility) was skipped because of an unmet condition check (ConditionPathExists=/var/lib/machines.raw).

● var-lib-nfs-rpc_pipefs.mount - RPC Pipe File System
     Loaded: loaded (/proc/self/mountinfo; static)
     Active: active (mounted) since Sat 2025-11-29 06:29:42 UTC; 1h 47min ago
      Until: Sat 2025-11-29 06:29:42 UTC; 1h 47min ago
      Where: /var/lib/nfs/rpc_pipefs
       What: rpc_pipefs

● systemd-ask-password-console.path - Dispatch Password Requests to Console Directory Watch
     Loaded: loaded (/usr/lib/systemd/system/systemd-ask-password-console.path; static)
     Active: active (waiting) since Sat 2025-11-29 06:29:42 UTC; 1h 47min ago
      Until: Sat 2025-11-29 06:29:42 UTC; 1h 47min ago
   Triggers: ● systemd-ask-password-console.service
       Docs: man:systemd-ask-password-console.path(8)

● systemd-ask-password-wall.path - Forward Password Requests to Wall Directory Watch
     Loaded: loaded (/usr/lib/systemd/system/systemd-ask-password-wall.path; static)
     Active: active (waiting) since Sat 2025-11-29 06:29:42 UTC; 1h 47min ago
      Until: Sat 2025-11-29 06:29:42 UTC; 1h 47min ago
   Triggers: ● systemd-ask-password-wall.service
       Docs: man:systemd-ask-password-wall.path(8)

● init.scope - System and Service Manager
     Loaded: loaded
  Transient: yes
     Active: active (running) since Sat 2025-11-29 06:28:07 UTC; 1h 49min ago
       Docs: man:systemd(1)
         IO: 932.0K read, 0B written
      Tasks: 1 (limit: 48573)
     Memory: 48.2M (peak: 65.6M)
        CPU: 1min 20.565s
     CGroup: /init.scope
             └─1 /usr/lib/systemd/systemd --switched-root --system --deserialize 31

Nov 29 08:17:03 compute-0 systemd[1]: Started libcrun container.
Nov 29 08:17:03 compute-0 systemd[1]: libpod-461e59920c5310cbd4998dedb59ec0837a1a689a1fea0d784d33ff5a9f7b8ecf.scope: Deactivated successfully.
Nov 29 08:17:03 compute-0 systemd[1]: var-lib-containers-storage-overlay-a6117d844ad7ad7a2fc33616a7a58ef68dd010ec5e13672e6ef63b2ba4ed2080-merged.mount: Deactivated successfully.
Nov 29 08:17:03 compute-0 systemd[1]: libpod-conmon-461e59920c5310cbd4998dedb59ec0837a1a689a1fea0d784d33ff5a9f7b8ecf.scope: Deactivated successfully.
Nov 29 08:17:03 compute-0 systemd[1]: Started libpod-conmon-d4a9be8cb43979961b1e87e7f3e0d6905eb8c9a0bb29d35e2dfe03c2ac9375f6.scope.
Nov 29 08:17:03 compute-0 systemd[1]: Started libcrun container.
Nov 29 08:17:04 compute-0 systemd[1]: libpod-d4a9be8cb43979961b1e87e7f3e0d6905eb8c9a0bb29d35e2dfe03c2ac9375f6.scope: Deactivated successfully.
Nov 29 08:17:04 compute-0 systemd[1]: libpod-d4a9be8cb43979961b1e87e7f3e0d6905eb8c9a0bb29d35e2dfe03c2ac9375f6.scope: Consumed 1.005s CPU time.
Nov 29 08:17:04 compute-0 systemd[1]: var-lib-containers-storage-overlay-bc80bc3e197dad729721c53f1b41d1a7516a3adf67ba9cdfa41e0969fe84d28c-merged.mount: Deactivated successfully.
Nov 29 08:17:04 compute-0 systemd[1]: libpod-conmon-d4a9be8cb43979961b1e87e7f3e0d6905eb8c9a0bb29d35e2dfe03c2ac9375f6.scope: Deactivated successfully.

● libpod-23d05e03be4cb9084b6afbc3edf0d56047ca0f3f1aaf62172739e6d6ce3f7fa8.scope - libcrun container
     Loaded: loaded (/run/systemd/transient/libpod-23d05e03be4cb9084b6afbc3edf0d56047ca0f3f1aaf62172739e6d6ce3f7fa8.scope; transient)
  Transient: yes
    Drop-In: /run/systemd/transient/libpod-23d05e03be4cb9084b6afbc3edf0d56047ca0f3f1aaf62172739e6d6ce3f7fa8.scope.d
             └─dep.conf
     Active: active (running) since Sat 2025-11-29 07:24:13 UTC; 53min ago
         IO: 7.2M read, 4.0K written
      Tasks: 6 (limit: 4096)
     Memory: 20.8M (peak: 26.3M)
        CPU: 9.316s
     CGroup: /machine.slice/libpod-23d05e03be4cb9084b6afbc3edf0d56047ca0f3f1aaf62172739e6d6ce3f7fa8.scope
             └─container
               ├─153385 dumb-init --single-child -- kolla_start
               └─153388 /usr/bin/ovn-controller --pidfile unix:/run/openvswitch/db.sock -p /etc/pki/tls/private/ovndb.key -c /etc/pki/tls/certs/ovndb.crt -C /etc/pki/tls/certs/ovndbca.crt

Nov 29 07:24:13 compute-0 systemd[1]: Started libcrun container.

● libpod-53b3e3edbc13651958250c6312d45e987c0dee5fbb3effb8392660dfc1eaa82f.scope - libcrun container
     Loaded: loaded (/run/systemd/transient/libpod-53b3e3edbc13651958250c6312d45e987c0dee5fbb3effb8392660dfc1eaa82f.scope; transient)
  Transient: yes
     Active: active (running) since Sat 2025-11-29 07:35:57 UTC; 41min ago
         IO: 92.0K read, 4.0K written
      Tasks: 8 (limit: 4096)
     Memory: 19.1M (peak: 20.9M)
        CPU: 1.615s
     CGroup: /machine.slice/libpod-53b3e3edbc13651958250c6312d45e987c0dee5fbb3effb8392660dfc1eaa82f.scope
             └─container
               ├─238043 dumb-init --single-child -- kolla_start
               └─238046 /usr/sbin/multipathd -d

Nov 29 07:35:57 compute-0 systemd[1]: Started libcrun container.
Nov 29 07:35:57 compute-0 sudo[238047]:     root : PWD=/ ; USER=root ; COMMAND=/usr/local/bin/kolla_set_configs
Nov 29 07:35:57 compute-0 sudo[238047]: pam_systemd(sudo:session): Failed to connect to system bus: No such file or directory
Nov 29 07:35:57 compute-0 sudo[238047]: pam_unix(sudo:session): session opened for user root(uid=0) by (uid=0)
Nov 29 07:35:57 compute-0 sudo[238047]: pam_unix(sudo:session): session closed for user root
Nov 29 07:35:57 compute-0 sudo[238071]:     root : PWD=/ ; USER=root ; COMMAND=/usr/local/bin/kolla_copy_cacerts
Nov 29 07:35:57 compute-0 sudo[238071]: pam_systemd(sudo:session): Failed to connect to system bus: No such file or directory
Nov 29 07:35:57 compute-0 sudo[238071]: pam_unix(sudo:session): session opened for user root(uid=0) by (uid=0)
Nov 29 07:35:57 compute-0 sudo[238071]: pam_unix(sudo:session): session closed for user root

● libpod-8d4ccc3041d68cb3d47809db0bb1a7e168a735eaa6605edbf2567447905aa50e.scope - libcrun container
     Loaded: loaded (/run/systemd/transient/libpod-8d4ccc3041d68cb3d47809db0bb1a7e168a735eaa6605edbf2567447905aa50e.scope; transient)
  Transient: yes
    Drop-In: /run/systemd/transient/libpod-8d4ccc3041d68cb3d47809db0bb1a7e168a735eaa6605edbf2567447905aa50e.scope.d
             └─dep.conf
     Active: active (running) since Sat 2025-11-29 07:25:57 UTC; 51min ago
         IO: 5.7M read, 13.1M written
      Tasks: 11 (limit: 4096)
     Memory: 426.1M (peak: 472.6M)
        CPU: 49.702s
     CGroup: /machine.slice/libpod-8d4ccc3041d68cb3d47809db0bb1a7e168a735eaa6605edbf2567447905aa50e.scope
             └─container
               ├─163635 dumb-init --single-child -- kolla_start
               ├─163655 "neutron-ovn-metadata-agent (/usr/bin/python3 /usr/bin/neutron-ovn-metadata-agent)"
               ├─164115 "neutron-ovn-metadata-agent (/usr/bin/python3 /usr/bin/neutron-ovn-metadata-agent)"
               ├─164178 /usr/bin/python3 /bin/privsep-helper --config-file /etc/neutron/neutron.conf --config-dir /etc/neutron.conf.d --privsep_context neutron.privileged.namespace_cmd --privsep_sock_path /tmp/tmpthalsdm8/privsep.sock
               ├─266092 /usr/bin/python3 /bin/privsep-helper --config-file /etc/neutron/neutron.conf --config-dir /etc/neutron.conf.d --privsep_context neutron.privileged.default --privsep_sock_path /tmp/tmpv5pj6f28/privsep.sock
               └─266358 /usr/bin/python3 /bin/privsep-helper --config-file /etc/neutron/neutron.conf --config-dir /etc/neutron.conf.d --privsep_context neutron.privileged.link_cmd --privsep_sock_path /tmp/tmp_ylguf3k/privsep.sock

Nov 29 08:10:52 compute-0 podman[303249]: 2025-11-29 08:10:52.740499231 +0000 UTC m=+0.047692317 container died 26b18a15403de53c2995223a8395c3ab3b6e63ac37a575945def568496e10817 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=neutron-haproxy-ovnmeta-dda88d46-9162-4e7c-bb47-793ac4133966, org.label-schema.name=CentOS Stream 9 Base Image, tcib_build_tag=1f5c0439f2433cb462b222a5bb23e629, tcib_managed=true, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251125, org.label-schema.vendor=CentOS, io.buildah.version=1.41.3, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2)
Nov 29 08:10:52 compute-0 podman[303249]: 2025-11-29 08:10:52.783107907 +0000 UTC m=+0.090300983 container cleanup 26b18a15403de53c2995223a8395c3ab3b6e63ac37a575945def568496e10817 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=neutron-haproxy-ovnmeta-dda88d46-9162-4e7c-bb47-793ac4133966, tcib_managed=true, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.41.3, org.label-schema.schema-version=1.0, tcib_build_tag=1f5c0439f2433cb462b222a5bb23e629, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251125, org.label-schema.vendor=CentOS)
Nov 29 08:10:52 compute-0 podman[303306]: 2025-11-29 08:10:52.861083779 +0000 UTC m=+0.050660679 container remove 26b18a15403de53c2995223a8395c3ab3b6e63ac37a575945def568496e10817 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=neutron-haproxy-ovnmeta-dda88d46-9162-4e7c-bb47-793ac4133966, tcib_managed=true, org.label-schema.build-date=20251125, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, maintainer=OpenStack Kubernetes Operator team, org.label-schema.license=GPLv2, io.buildah.version=1.41.3, org.label-schema.name=CentOS Stream 9 Base Image, tcib_build_tag=1f5c0439f2433cb462b222a5bb23e629)
Nov 29 08:11:15 compute-0 podman[303984]: 2025-11-29 08:11:15.452400776 +0000 UTC m=+0.067172834 container create a39f5667c5d102a1fad0426d3bcdeed3217b33f886d8fcab884c1d181e53f8fc (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=neutron-haproxy-ovnmeta-dda88d46-9162-4e7c-bb47-793ac4133966, org.label-schema.license=GPLv2, maintainer=OpenStack Kubernetes Operator team, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, tcib_build_tag=1f5c0439f2433cb462b222a5bb23e629, io.buildah.version=1.41.3, org.label-schema.build-date=20251125, tcib_managed=true)
Nov 29 08:11:15 compute-0 podman[303984]: 2025-11-29 08:11:15.417153164 +0000 UTC m=+0.031925252 image pull c64a92d8e8fa4f5fb5baf11a4a693a964be3868fb7e72462c6e612c604f8d071 quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified
Nov 29 08:11:15 compute-0 podman[303984]: 2025-11-29 08:11:15.573746646 +0000 UTC m=+0.188518694 container init a39f5667c5d102a1fad0426d3bcdeed3217b33f886d8fcab884c1d181e53f8fc (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=neutron-haproxy-ovnmeta-dda88d46-9162-4e7c-bb47-793ac4133966, org.label-schema.build-date=20251125, org.label-schema.license=GPLv2, tcib_build_tag=1f5c0439f2433cb462b222a5bb23e629, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, org.label-schema.name=CentOS Stream 9 Base Image, tcib_managed=true, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS)
Nov 29 08:11:15 compute-0 podman[303984]: 2025-11-29 08:11:15.579446863 +0000 UTC m=+0.194218921 container start a39f5667c5d102a1fad0426d3bcdeed3217b33f886d8fcab884c1d181e53f8fc (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=neutron-haproxy-ovnmeta-dda88d46-9162-4e7c-bb47-793ac4133966, maintainer=OpenStack Kubernetes Operator team, org.label-schema.vendor=CentOS, org.label-schema.build-date=20251125, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, tcib_managed=true, org.label-schema.schema-version=1.0, tcib_build_tag=1f5c0439f2433cb462b222a5bb23e629, io.buildah.version=1.41.3)
Nov 29 08:11:54 compute-0 podman[304712]: 2025-11-29 08:11:54.47246381 +0000 UTC m=+0.078941699 container died a39f5667c5d102a1fad0426d3bcdeed3217b33f886d8fcab884c1d181e53f8fc (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=neutron-haproxy-ovnmeta-dda88d46-9162-4e7c-bb47-793ac4133966, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251125, tcib_managed=true, io.buildah.version=1.41.3, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, tcib_build_tag=1f5c0439f2433cb462b222a5bb23e629)
Nov 29 08:11:54 compute-0 podman[304712]: 2025-11-29 08:11:54.523305243 +0000 UTC m=+0.129783142 container cleanup a39f5667c5d102a1fad0426d3bcdeed3217b33f886d8fcab884c1d181e53f8fc (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=neutron-haproxy-ovnmeta-dda88d46-9162-4e7c-bb47-793ac4133966, org.label-schema.build-date=20251125, org.label-schema.schema-version=1.0, tcib_managed=true, io.buildah.version=1.41.3, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, maintainer=OpenStack Kubernetes Operator team, tcib_build_tag=1f5c0439f2433cb462b222a5bb23e629, org.label-schema.vendor=CentOS)
Nov 29 08:11:54 compute-0 podman[304761]: 2025-11-29 08:11:54.609175673 +0000 UTC m=+0.050085273 container remove a39f5667c5d102a1fad0426d3bcdeed3217b33f886d8fcab884c1d181e53f8fc (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=neutron-haproxy-ovnmeta-dda88d46-9162-4e7c-bb47-793ac4133966, org.label-schema.build-date=20251125, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, tcib_managed=true, io.buildah.version=1.41.3, org.label-schema.name=CentOS Stream 9 Base Image, maintainer=OpenStack Kubernetes Operator team, org.label-schema.vendor=CentOS, tcib_build_tag=1f5c0439f2433cb462b222a5bb23e629)

● libpod-c15969223c00c2b30a6778fcaf267330fb4765e47f851bc1febe18461e097809.scope - libcrun container
     Loaded: loaded (/run/systemd/transient/libpod-c15969223c00c2b30a6778fcaf267330fb4765e47f851bc1febe18461e097809.scope; transient)
  Transient: yes
     Active: active (running) since Sat 2025-11-29 07:38:36 UTC; 38min ago
         IO: 36.6M read, 42.6M written
      Tasks: 29 (limit: 4096)
     Memory: 479.5M (peak: 566.0M)
        CPU: 2min 38.599s
     CGroup: /machine.slice/libpod-c15969223c00c2b30a6778fcaf267330fb4765e47f851bc1febe18461e097809.scope
             └─container
               ├─256731 dumb-init --single-child -- kolla_start
               ├─256736 /usr/bin/python3 /usr/bin/nova-compute
               ├─265746 /usr/bin/python3 /bin/privsep-helper --config-file /etc/nova/nova.conf --config-file /etc/nova/nova-compute.conf --config-dir /etc/nova/nova.conf.d --privsep_context vif_plug_ovs.privsep.vif_plug --privsep_sock_path /tmp/tmp4yqe7f_k/privsep.sock
               ├─266745 /usr/bin/python3 /bin/privsep-helper --config-file /etc/nova/nova.conf --config-file /etc/nova/nova-compute.conf --config-dir /etc/nova/nova.conf.d --privsep_context os_brick.privileged.default --privsep_sock_path /tmp/tmp2b17b584/privsep.sock
               └─266829 /usr/bin/python3 /bin/privsep-helper --config-file /etc/nova/nova.conf --config-file /etc/nova/nova-compute.conf --config-dir /etc/nova/nova.conf.d --privsep_context nova.privsep.sys_admin_pctxt --privsep_sock_path /tmp/tmparx_pv8r/privsep.sock

Nov 29 07:38:36 compute-0 systemd[1]: Started libcrun container.
Nov 29 07:47:54 compute-0 systemd-coredump[266851]: Process 266831 (qemu-img) of user 0 dumped core.
                                                    
                                                    Stack trace of thread 266841:
                                                    #0  0x00007f8ef59a703c __pthread_kill_implementation (libc.so.6 + 0x8d03c)
                                                    #1  0x00007f8ef5959b86 raise (libc.so.6 + 0x3fb86)
                                                    #2  0x00007f8ef5943873 abort (libc.so.6 + 0x29873)
                                                    #3  0x00005648e4ddd5df ___interceptor_pthread_create (qemu-img + 0x4f5df)
                                                    #4  0x00007f8ef2b7dff4 _ZN6Thread10try_createEm (libceph-common.so.2 + 0x258ff4)
                                                    #5  0x00007f8ef2b806ae _ZN6Thread6createEPKcm (libceph-common.so.2 + 0x25b6ae)
                                                    #6  0x00007f8ef3a8726b _ZNSt8_Rb_treeISt4pairINSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEESt10type_indexES0_IKS8_N4ceph12immobile_anyILm576EEEESt10_Select1stISD_ENSA_6common11CephContext19associated_objs_cmpESaISD_EE22_M_emplace_hint_uniqueIJRKSt21piecewise_construct_tSt5tupleIJRSt17basic_string_viewIcS4_ERS7_EESP_IJRKSt15in_place_type_tIN6librbd21TaskFinisherSingletonEERPSH_EEEEESt17_Rb_tree_iteratorISD_ESt23_Rb_tree_const_iteratorISD_EDpOT_.constprop.0 (librbd.so.1 + 0x51126b)
                                                    #7  0x00007f8ef36b47a6 _ZN6librbd8ImageCtx4initEv (librbd.so.1 + 0x13e7a6)
                                                    #8  0x00007f8ef378e2d3 _ZN6librbd5image11OpenRequestINS_8ImageCtxEE12send_refreshEv (librbd.so.1 + 0x2182d3)
                                                    #9  0x00007f8ef378ef46 _ZN6librbd5image11OpenRequestINS_8ImageCtxEE23handle_v2_get_data_poolEPi (librbd.so.1 + 0x218f46)
                                                    #10 0x00007f8ef378f2a7 _ZN6librbd4util6detail20rados_state_callbackINS_5image11OpenRequestINS_8ImageCtxEEEXadL_ZNS6_23handle_v2_get_data_poolEPiEELb1EEEvPvS8_ (librbd.so.1 + 0x2192a7)
                                                    #11 0x00007f8ef348d0ac _ZN5boost4asio6detail18completion_handlerINS1_7binder0IN8librados14CB_AioCompleteEEENS0_10io_context19basic_executor_typeISaIvELm0EEEE11do_completeEPvPNS1_19scheduler_operationERKNS_6system10error_codeEm (librados.so.2 + 0xad0ac)
                                                    #12 0x00007f8ef348c585 _ZN5boost4asio6detail14strand_service11do_completeEPvPNS1_19scheduler_operationERKNS_6system10error_codeEm (librados.so.2 + 0xac585)
                                                    #13 0x00007f8ef3507498 _ZN5boost4asio6detail9scheduler3runERNS_6system10error_codeE.constprop.0.isra.0 (librados.so.2 + 0x127498)
                                                    #14 0x00007f8ef34a64e4 _ZNSt6thread11_State_implINS_8_InvokerISt5tupleIJZ17make_named_threadIZN4ceph5async15io_context_pool5startEsEUlvE_JEES_St17basic_string_viewIcSt11char_traitsIcEEOT_DpOT0_EUlSD_SG_E_S7_EEEEE6_M_runEv (librados.so.2 + 0xc64e4)
                                                    #15 0x00007f8ef2214ae4 execute_native_thread_routine (libstdc++.so.6 + 0xdbae4)
                                                    #16 0x00007f8ef59a52fa start_thread (libc.so.6 + 0x8b2fa)
                                                    #17 0x00007f8ef5a2a400 __clone3 (libc.so.6 + 0x110400)
                                                    
                                                    Stack trace of thread 266833:
                                                    #0  0x00007f8ef59a238a __futex_abstimed_wait_common (libc.so.6 + 0x8838a)
                                                    #1  0x00007f8ef59a48e2 pthread_cond_wait@@GLIBC_2.3.2 (libc.so.6 + 0x8a8e2)
                                                    #2  0x00007f8ef220e6c0 _ZNSt18condition_variable4waitERSt11unique_lockISt5mutexE (libstdc++.so.6 + 0xd56c0)
                                                    #3  0x00007f8ef2d900a2 _ZN4ceph7logging3Log5entryEv (libceph-common.so.2 + 0x46b0a2)
                                                    #4  0x00007f8ef59a52fa start_thread (libc.so.6 + 0x8b2fa)
                                                    #5  0x00007f8ef5a2a400 __clone3 (libc.so.6 + 0x110400)
                                                    
                                                    Stack trace of thread 266831:
                                                    #0  0x00007f8ef59a238a __futex_abstimed_wait_common (libc.so.6 + 0x8838a)
                                                    #1  0x00007f8ef59a48e2 pthread_cond_wait@@GLIBC_2.3.2 (libc.so.6 + 0x8a8e2)
                                                    #2  0x00007f8ef220e6c0 _ZNSt18condition_variable4waitERSt11unique_lockISt5mutexE (libstdc++.so.6 + 0xd56c0)
                                                    #3  0x00007f8ef36bbeb3 _ZN6librbd10ImageStateINS_8ImageCtxEE4openEm (librbd.so.1 + 0x145eb3)
                                                    #4  0x00007f8ef368bfcb rbd_open (librbd.so.1 + 0x115fcb)
                                                    #5  0x00007f8ef3c3689d qemu_rbd_open (block-rbd.so + 0x489d)
                                                    #6  0x00005648e4dee25c bdrv_open_driver.llvm.1535778247189356743 (qemu-img + 0x6025c)
                                                    #7  0x00005648e4df34b7 bdrv_open_inherit.llvm.1535778247189356743 (qemu-img + 0x654b7)
                                                    #8  0x00005648e4e00de1 bdrv_open_child_bs.llvm.1535778247189356743 (qemu-img + 0x72de1)
                                                    #9  0x00005648e4df2c36 bdrv_open_inherit.llvm.1535778247189356743 (qemu-img + 0x64c36)
                                                    #10 0x00005648e4e224b3 blk_new_open (qemu-img + 0x944b3)
                                                    #11 0x00005648e4ee2516 img_open_file (qemu-img + 0x154516)
                                                    #12 0x00005648e4ee20c0 img_open (qemu-img + 0x1540c0)
                                                    #13 0x00005648e4ede03b img_info (qemu-img + 0x15003b)
                                                    #14 0x00005648e4ed76ca main (qemu-img + 0x1496ca)
                                                    #15 0x00007f8ef5944610 __libc_start_call_main (libc.so.6 + 0x2a610)
                                                    #16 0x00007f8ef59446c0 __libc_start_main@@GLIBC_2.34 (libc.so.6 + 0x2a6c0)
                                                    #17 0x00005648e4ddd285 _start (qemu-img + 0x4f285)
                                                    
                                                    Stack trace of thread 266832:
                                                    #0  0x00007f8ef5a2282d syscall (libc.so.6 + 0x10882d)
                                                    #1  0x00005648e4f68193 qemu_event_wait (qemu-img + 0x1da193)
                                                    #2  0x00005648e4f732e7 call_rcu_thread (qemu-img + 0x1e52e7)
                                                    #3  0x00005648e4f662aa qemu_thread_start.llvm.12875871551448449403 (qemu-img + 0x1d82aa)
                                                    #4  0x00007f8ef59a52fa start_thread (libc.so.6 + 0x8b2fa)
                                                    #5  0x00007f8ef5a2a400 __clone3 (libc.so.6 + 0x110400)
                                                    
                                                    Stack trace of thread 266840:
                                                    #0  0x00007f8ef59a238a __futex_abstimed_wait_common (libc.so.6 + 0x8838a)
                                                    #1  0x00007f8ef59a4cc0 pthread_cond_clockwait@GLIBC_2.30 (libc.so.6 + 0x8acc0)
                                                    #2  0x00007f8ef2b9e150 _ZN4ceph6common24CephContextServiceThread5entryEv (libceph-common.so.2 + 0x279150)
                                                    #3  0x00007f8ef59a52fa start_thread (libc.so.6 + 0x8b2fa)
                                                    #4  0x00007f8ef5a2a400 __clone3 (libc.so.6 + 0x110400)
                                                    
                                                    Stack trace of thread 266835:
                                                    #0  0x00007f8ef5a29a3e epoll_wait (libc.so.6 + 0x10fa3e)
                                                    #1  0x00007f8ef2d65618 _ZN11EpollDriver10event_waitERSt6vectorI14FiredFileEventSaIS1_EEP7timeval (libceph-common.so.2 + 0x440618)
                                                    #2  0x00007f8ef2d63702 _ZN11EventCenter14process_eventsEjPNSt6chrono8durationImSt5ratioILl1ELl1000000000EEEE (libceph-common.so.2 + 0x43e702)
                                                    #3  0x00007f8ef2d642c6 _ZNSt17_Function_handlerIFvvEZN12NetworkStack10add_threadEP6WorkerEUlvE_E9_M_invokeERKSt9_Any_data (libceph-common.so.2 + 0x43f2c6)
                                                    #4  0x00007f8ef2214ae4 execute_native_thread_routine (libstdc++.so.6 + 0xdbae4)
                                                    #5  0x00007f8ef59a52fa start_thread (libc.so.6 + 0x8b2fa)
                                                    #6  0x00007f8ef5a2a400 __clone3 (libc.so.6 + 0x110400)
                                                    
                                                    Stack trace of thread 266847:
                                                    #0  0x00007f8ef59a238a __futex_abstimed_wait_common (libc.so.6 + 0x8838a)
                                                    #1  0x00007f8ef59a48e2 pthread_cond_wait@@GLIBC_2.3.2 (libc.so.6 + 0x8a8e2)
                                                    #2  0x00007f8ef220e6c0 _ZNSt18condition_variable4waitERSt11unique_lockISt5mutexE (libstdc++.so.6 + 0xd56c0)
                                                    #3  0x00007f8ef2b837f8 _ZN15CommonSafeTimerISt5mutexE12timer_threadEv (libceph-common.so.2 + 0x25e7f8)
                                                    #4  0x00007f8ef2b83f81 _ZN21CommonSafeTimerThreadISt5mutexE5entryEv (libceph-common.so.2 + 0x25ef81)
                                                    #5  0x00007f8ef59a52fa start_thread (libc.so.6 + 0x8b2fa)
                                                    #6  0x00007f8ef5a2a400 __clone3 (libc.so.6 + 0x110400)
                                                    
                                                    Stack trace of thread 266848:
                                                    #0  0x00007f8ef59a238a __futex_abstimed_wait_common (libc.so.6 + 0x8838a)
                                                    #1  0x00007f8ef59a48e2 pthread_cond_wait@@GLIBC_2.3.2 (libc.so.6 + 0x8a8e2)
                                                    #2  0x00007f8ef220e6c0 _ZNSt18condition_variable4waitERSt11unique_lockISt5mutexE (libstdc++.so.6 + 0xd56c0)
                                                    #3  0x00007f8ef2b837f8 _ZN15CommonSafeTimerISt5mutexE12timer_threadEv (libceph-common.so.2 + 0x25e7f8)
                                                    #4  0x00007f8ef2b83f81 _ZN21CommonSafeTimerThreadISt5mutexE5entryEv (libceph-common.so.2 + 0x25ef81)
                                                    #5  0x00007f8ef59a52fa start_thread (libc.so.6 + 0x8b2fa)
                                                    #6  0x00007f8ef5a2a400 __clone3 (libc.so.6 + 0x110400)
                                                    
                                                    Stack trace of thread 266836:
                                                    #0  0x00007f8ef5a29a3e epoll_wait (libc.so.6 + 0x10fa3e)
                                                    #1  0x00007f8ef2d65618 _ZN11EpollDriver10event_waitERSt6vectorI14FiredFileEventSaIS1_EEP7timeval (libceph-common.so.2 + 0x440618)
                                                    #2  0x00007f8ef2d63702 _ZN11EventCenter14process_eventsEjPNSt6chrono8durationImSt5ratioILl1ELl1000000000EEEE (libceph-common.so.2 + 0x43e702)
                                                    #3  0x00007f8ef2d642c6 _ZNSt17_Function_handlerIFvvEZN12NetworkStack10add_threadEP6WorkerEUlvE_E9_M_invokeERKSt9_Any_data (libceph-common.so.2 + 0x43f2c6)
                                                    #4  0x00007f8ef2214ae4 execute_native_thread_routine (libstdc++.so.6 + 0xdbae4)
                                                    #5  0x00007f8ef59a52fa start_thread (libc.so.6 + 0x8b2fa)
                                                    #6  0x00007f8ef5a2a400 __clone3 (libc.so.6 + 0x110400)
                                                    
                                                    Stack trace of thread 266845:
                                                    #0  0x00007f8ef59a238a __futex_abstimed_wait_common (libc.so.6 + 0x8838a)
                                                    #1  0x00007f8ef59a48e2 pthread_cond_wait@@GLIBC_2.3.2 (libc.so.6 + 0x8a8e2)
                                                    #2  0x00007f8ef220e6c0 _ZNSt18condition_variable4waitERSt11unique_lockISt5mutexE (libstdc++.so.6 + 0xd56c0)
                                                    #3  0x00007f8ef2c8c0b9 _ZN13DispatchQueue18run_local_deliveryEv (libceph-common.so.2 + 0x3670b9)
                                                    #4  0x00007f8ef2d1d431 _ZN13DispatchQueue19LocalDeliveryThread5entryEv (libceph-common.so.2 + 0x3f8431)
                                                    #5  0x00007f8ef59a52fa start_thread (libc.so.6 + 0x8b2fa)
                                                    #6  0x00007f8ef5a2a400 __clone3 (libc.so.6 + 0x110400)
                                                    
                                                    Stack trace of thread 266842:
                                                    #0  0x00007f8ef59a238a __futex_abstimed_wait_common (libc.so.6 + 0x8838a)
                                                    #1  0x00007f8ef59a48e2 pthread_cond_wait@@GLIBC_2.3.2 (libc.so.6 + 0x8a8e2)
                                                    #2  0x00007f8ef3507266 _ZN5boost4asio6detail9scheduler3runERNS_6system10error_codeE.constprop.0.isra.0 (librados.so.2 + 0x127266)
                                                    #3  0x00007f8ef34a64e4 _ZNSt6thread11_State_implINS_8_InvokerISt5tupleIJZ17make_named_threadIZN4ceph5async15io_context_pool5startEsEUlvE_JEES_St17basic_string_viewIcSt11char_traitsIcEEOT_DpOT0_EUlSD_SG_E_S7_EEEEE6_M_runEv (librados.so.2 + 0xc64e4)
                                                    #4  0x00007f8ef2214ae4 execute_native_thread_routine (libstdc++.so.6 + 0xdbae4)
                                                    #5  0x00007f8ef59a52fa start_thread (libc.so.6 + 0x8b2fa)
                                                    #6  0x00007f8ef5a2a400 __clone3 (libc.so.6 + 0x110400)
                                                    
                                                    Stack trace of thread 266844:
                                                    #0  0x00007f8ef59a238a __futex_abstimed_wait_common (libc.so.6 + 0x8838a)
                                                    #1  0x00007f8ef59a48e2 pthread_cond_wait@@GLIBC_2.3.2 (libc.so.6 + 0x8a8e2)
                                                    #2  0x00007f8ef220e6c0 _ZNSt18condition_variable4waitERSt11unique_lockISt5mutexE (libstdc++.so.6 + 0xd56c0)
                                                    #3  0x00007f8ef2c8c49f _ZN13DispatchQueue5entryEv (libceph-common.so.2 + 0x36749f)
                                                    #4  0x00007f8ef2d1d411 _ZN13DispatchQueue14DispatchThread5entryEv (libceph-common.so.2 + 0x3f8411)
                                                    #5  0x00007f8ef59a52fa start_thread (libc.so.6 + 0x8b2fa)
                                                    #6  0x00007f8ef5a2a400 __clone3 (libc.so.6 + 0x110400)
                                                    
                                                    Stack trace of thread 266846:
                                                    #0  0x00007f8ef59a238a __futex_abstimed_wait_common (libc.so.6 + 0x8838a)
                                                    #1  0x00007f8ef59a4cc0 pthread_cond_clockwait@GLIBC_2.30 (libc.so.6 + 0x8acc0)
                                                    #2  0x00007f8ef2b83b23 _ZN15CommonSafeTimerISt5mutexE12timer_threadEv (libceph-common.so.2 + 0x25eb23)
                                                    #3  0x00007f8ef2b83f81 _ZN21CommonSafeTimerThreadISt5mutexE5entryEv (libceph-common.so.2 + 0x25ef81)
                                                    #4  0x00007f8ef59a52fa start_thread (libc.so.6 + 0x8b2fa)
                                                    #5  0x00007f8ef5a2a400 __clone3 (libc.so.6 + 0x110400)
                                                    
                                                    Stack trace of thread 266849:
                                                    #0  0x00007f8ef59a238a __futex_abstimed_wait_common (libc.so.6 + 0x8838a)
                                                    #1  0x00007f8ef59a48e2 pthread_cond_wait@@GLIBC_2.3.2 (libc.so.6 + 0x8a8e2)
                                                    #2  0x00007f8ef220e6c0 _ZNSt18condition_variable4waitERSt11unique_lockISt5mutexE (libstdc++.so.6 + 0xd56c0)
                                                    #3  0x00007f8ef2b837f8 _ZN15CommonSafeTimerISt5mutexE12timer_threadEv (libceph-common.so.2 + 0x25e7f8)
                                                    #4  0x00007f8ef2b83f81 _ZN21CommonSafeTimerThreadISt5mutexE5entryEv (libceph-common.so.2 + 0x25ef81)
                                                    #5  0x00007f8ef59a52fa start_thread (libc.so.6 + 0x8b2fa)
                                                    #6  0x00007f8ef5a2a400 __clone3 (libc.so.6 + 0x110400)
                                                    
                                                    Stack trace of thread 266843:
                                                    #0  0x00007f8ef59a238a __futex_abstimed_wait_common (libc.so.6 + 0x8838a)
                                                    #1  0x00007f8ef59a4cc0 pthread_cond_clockwait@GLIBC_2.30 (libc.so.6 + 0x8acc0)
                                                    #2  0x00007f8ef34df364 _ZN4ceph5timerINS_17coarse_mono_clockEE12timer_threadEv (librados.so.2 + 0xff364)
                                                    #3  0x00007f8ef2214ae4 execute_native_thread_routine (libstdc++.so.6 + 0xdbae4)
                                                    #4  0x00007f8ef59a52fa start_thread (libc.so.6 + 0x8b2fa)
                                                    #5  0x00007f8ef5a2a400 __clone3 (libc.so.6 + 0x110400)
                                                    
                                                    Stack trace of thread 266834:
                                                    #0  0x00007f8ef5a29a3e epoll_wait (libc.so.6 + 0x10fa3e)
                                                    #1  0x00007f8ef2d65618 _ZN11EpollDriver10event_waitERSt6vectorI14FiredFileEventSaIS1_EEP7timeval (libceph-common.so.2 + 0x440618)
                                                    #2  0x00007f8ef2d63702 _ZN11EventCenter14process_eventsEjPNSt6chrono8durationImSt5ratioILl1ELl1000000000EEEE (libceph-common.so.2 + 0x43e702)
                                                    #3  0x00007f8ef2d642c6 _ZNSt17_Function_handlerIFvvEZN12NetworkStack10add_threadEP6WorkerEUlvE_E9_M_invokeERKSt9_Any_data (libceph-common.so.2 + 0x43f2c6)
                                                    #4  0x00007f8ef2214ae4 execute_native_thread_routine (libstdc++.so.6 + 0xdbae4)
                                                    #5  0x00007f8ef59a52fa start_thread (libc.so.6 + 0x8b2fa)
                                                    #6  0x00007f8ef5a2a400 __clone3 (libc.so.6 + 0x110400)
                                                    ELF object binary architecture: AMD x86-64

● session-1.scope - Session 1 of User zuul
     Loaded: loaded (/run/systemd/transient/session-1.scope; transient)
  Transient: yes
     Active: active (abandoned) since Sat 2025-11-29 06:30:09 UTC; 1h 47min ago
         IO: 0B read, 0B written
      Tasks: 1
     Memory: 16.6M (peak: 37.7M)
        CPU: 1min 23.127s
     CGroup: /user.slice/user-1000.slice/session-1.scope
             └─4513 /usr/bin/python3

Nov 29 06:33:07 np0005539576.novalocal sudo[7368]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Nov 29 06:33:07 np0005539576.novalocal python3[7370]: ansible-ansible.legacy.stat Invoked with path=/etc/ci/env/networking-info.yml follow=False get_checksum=True checksum_algorithm=sha1 get_md5=False get_mime=True get_attributes=True
Nov 29 06:33:07 np0005539576.novalocal sudo[7368]: pam_unix(sudo:session): session closed for user root
Nov 29 06:33:08 np0005539576.novalocal sudo[7441]:     zuul : PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-gkbanwuozosqfaciaaujwflxjkifmlad ; OS_CLOUD=vexxhost /usr/bin/python3'
Nov 29 06:33:08 np0005539576.novalocal sudo[7441]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Nov 29 06:33:08 np0005539576.novalocal python3[7443]: ansible-ansible.legacy.copy Invoked with dest=/etc/ci/env/networking-info.yml owner=root group=root mode=0644 src=/home/zuul/.ansible/tmp/ansible-tmp-1764397987.6702394-267-81775925305784/source _original_basename=tmp4qyint8q follow=False checksum=32e66a8416a2fa12c80c0fe5eedab5d7b78f9aac backup=False force=True unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None remote_src=None local_follow=None seuser=None serole=None selevel=None setype=None attributes=None
Nov 29 06:33:08 np0005539576.novalocal sudo[7441]: pam_unix(sudo:session): session closed for user root
Nov 29 06:34:08 np0005539576.novalocal sshd-session[4310]: Received disconnect from 38.102.83.114 port 36446:11: disconnected by user
Nov 29 06:34:08 np0005539576.novalocal sshd-session[4310]: Disconnected from user zuul 38.102.83.114 port 36446
Nov 29 06:34:08 np0005539576.novalocal sshd-session[4297]: pam_unix(sshd:session): session closed for user zuul

● session-20.scope - Session 20 of User ceph-admin
     Loaded: loaded (/run/systemd/transient/session-20.scope; transient)
  Transient: yes
     Active: active (running) since Sat 2025-11-29 07:12:12 UTC; 1h 5min ago
         IO: 0B read, 0B written
      Tasks: 2
     Memory: 1.1M (peak: 1.6M)
        CPU: 181ms
     CGroup: /user.slice/user-42477.slice/session-20.scope
             ├─76613 "sshd-session: ceph-admin [priv]"
             └─76635 "sshd-session: ceph-admin"

Nov 29 07:12:12 compute-0 systemd[1]: Started Session 20 of User ceph-admin.

● session-22.scope - Session 22 of User ceph-admin
     Loaded: loaded (/run/systemd/transient/session-22.scope; transient)
  Transient: yes
     Active: active (running) since Sat 2025-11-29 07:12:12 UTC; 1h 5min ago
         IO: 0B read, 0B written
      Tasks: 2
     Memory: 1.2M (peak: 4.4M)
        CPU: 270ms
     CGroup: /user.slice/user-42477.slice/session-22.scope
             ├─76620 "sshd-session: ceph-admin [priv]"
             └─76636 "sshd-session: ceph-admin@notty"

Nov 29 07:12:12 compute-0 systemd[1]: Started Session 22 of User ceph-admin.
Nov 29 07:12:12 compute-0 sudo[76637]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Nov 29 07:12:12 compute-0 sudo[76637]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Nov 29 07:12:12 compute-0 sudo[76637]: pam_unix(sudo:session): session closed for user root
Nov 29 07:12:12 compute-0 sudo[76662]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Nov 29 07:12:12 compute-0 sudo[76662]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Nov 29 07:12:12 compute-0 sudo[76662]: pam_unix(sudo:session): session closed for user root

● session-23.scope - Session 23 of User ceph-admin
     Loaded: loaded (/run/systemd/transient/session-23.scope; transient)
  Transient: yes
     Active: active (running) since Sat 2025-11-29 07:12:12 UTC; 1h 5min ago
         IO: 0B read, 0B written
      Tasks: 2
     Memory: 1.2M (peak: 5.9M)
        CPU: 347ms
     CGroup: /user.slice/user-42477.slice/session-23.scope
             ├─76687 "sshd-session: ceph-admin [priv]"
             └─76690 "sshd-session: ceph-admin@notty"

Nov 29 07:12:12 compute-0 systemd[1]: Started Session 23 of User ceph-admin.
Nov 29 07:12:12 compute-0 sudo[76691]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Nov 29 07:12:12 compute-0 sudo[76691]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Nov 29 07:12:12 compute-0 sudo[76691]: pam_unix(sudo:session): session closed for user root
Nov 29 07:12:12 compute-0 sudo[76716]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/14ff1f30-5059-58f1-9a23-69871bb275a1/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --timeout 895 check-host --expect-hostname compute-0
Nov 29 07:12:12 compute-0 sudo[76716]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Nov 29 07:12:12 compute-0 sudo[76716]: pam_unix(sudo:session): session closed for user root

● session-24.scope - Session 24 of User ceph-admin
     Loaded: loaded (/run/systemd/transient/session-24.scope; transient)
  Transient: yes
     Active: active (running) since Sat 2025-11-29 07:12:13 UTC; 1h 5min ago
         IO: 0B read, 0B written
      Tasks: 2
     Memory: 1.2M (peak: 4.3M)
        CPU: 374ms
     CGroup: /user.slice/user-42477.slice/session-24.scope
             ├─76741 "sshd-session: ceph-admin [priv]"
             └─76744 "sshd-session: ceph-admin@notty"

Nov 29 07:12:13 compute-0 systemd[1]: Started Session 24 of User ceph-admin.
Nov 29 07:12:13 compute-0 sudo[76745]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Nov 29 07:12:13 compute-0 sudo[76745]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Nov 29 07:12:13 compute-0 sudo[76745]: pam_unix(sudo:session): session closed for user root
Nov 29 07:12:13 compute-0 sudo[76770]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/ls /var/lib/ceph/14ff1f30-5059-58f1-9a23-69871bb275a1/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d
Nov 29 07:12:13 compute-0 sudo[76770]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Nov 29 07:12:13 compute-0 sudo[76770]: pam_unix(sudo:session): session closed for user root

● session-25.scope - Session 25 of User ceph-admin
     Loaded: loaded (/run/systemd/transient/session-25.scope; transient)
  Transient: yes
     Active: active (running) since Sat 2025-11-29 07:12:13 UTC; 1h 5min ago
         IO: 0B read, 0B written
      Tasks: 2
     Memory: 1.2M (peak: 4.2M)
        CPU: 290ms
     CGroup: /user.slice/user-42477.slice/session-25.scope
             ├─76795 "sshd-session: ceph-admin [priv]"
             └─76798 "sshd-session: ceph-admin@notty"

Nov 29 07:12:13 compute-0 systemd[1]: Started Session 25 of User ceph-admin.
Nov 29 07:12:13 compute-0 sudo[76799]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Nov 29 07:12:13 compute-0 sudo[76799]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Nov 29 07:12:13 compute-0 sudo[76799]: pam_unix(sudo:session): session closed for user root
Nov 29 07:12:13 compute-0 sudo[76824]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/mkdir -p /var/lib/ceph/14ff1f30-5059-58f1-9a23-69871bb275a1
Nov 29 07:12:13 compute-0 sudo[76824]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Nov 29 07:12:13 compute-0 sudo[76824]: pam_unix(sudo:session): session closed for user root

● session-26.scope - Session 26 of User ceph-admin
     Loaded: loaded (/run/systemd/transient/session-26.scope; transient)
  Transient: yes
     Active: active (running) since Sat 2025-11-29 07:12:14 UTC; 1h 5min ago
         IO: 0B read, 0B written
      Tasks: 2
     Memory: 1.2M (peak: 4.3M)
        CPU: 305ms
     CGroup: /user.slice/user-42477.slice/session-26.scope
             ├─76849 "sshd-session: ceph-admin [priv]"
             └─76852 "sshd-session: ceph-admin@notty"

Nov 29 07:12:14 compute-0 systemd[1]: Started Session 26 of User ceph-admin.
Nov 29 07:12:14 compute-0 sudo[76853]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Nov 29 07:12:14 compute-0 sudo[76853]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Nov 29 07:12:14 compute-0 sudo[76853]: pam_unix(sudo:session): session closed for user root
Nov 29 07:12:14 compute-0 sudo[76878]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/mkdir -p /tmp/cephadm-14ff1f30-5059-58f1-9a23-69871bb275a1/var/lib/ceph/14ff1f30-5059-58f1-9a23-69871bb275a1
Nov 29 07:12:14 compute-0 sudo[76878]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Nov 29 07:12:14 compute-0 sudo[76878]: pam_unix(sudo:session): session closed for user root

● session-27.scope - Session 27 of User ceph-admin
     Loaded: loaded (/run/systemd/transient/session-27.scope; transient)
  Transient: yes
     Active: active (running) since Sat 2025-11-29 07:12:14 UTC; 1h 5min ago
         IO: 0B read, 0B written
      Tasks: 2
     Memory: 1.2M (peak: 4.1M)
        CPU: 330ms
     CGroup: /user.slice/user-42477.slice/session-27.scope
             ├─76903 "sshd-session: ceph-admin [priv]"
             └─76906 "sshd-session: ceph-admin@notty"

Nov 29 07:12:14 compute-0 systemd[1]: Started Session 27 of User ceph-admin.
Nov 29 07:12:14 compute-0 sudo[76907]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Nov 29 07:12:14 compute-0 sudo[76907]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Nov 29 07:12:14 compute-0 sudo[76907]: pam_unix(sudo:session): session closed for user root
Nov 29 07:12:14 compute-0 sudo[76932]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/touch /tmp/cephadm-14ff1f30-5059-58f1-9a23-69871bb275a1/var/lib/ceph/14ff1f30-5059-58f1-9a23-69871bb275a1/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d.new
Nov 29 07:12:14 compute-0 sudo[76932]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Nov 29 07:12:14 compute-0 sudo[76932]: pam_unix(sudo:session): session closed for user root

● session-28.scope - Session 28 of User ceph-admin
     Loaded: loaded (/run/systemd/transient/session-28.scope; transient)
  Transient: yes
     Active: active (running) since Sat 2025-11-29 07:12:15 UTC; 1h 5min ago
         IO: 0B read, 0B written
      Tasks: 2
     Memory: 1.2M (peak: 4.3M)
        CPU: 341ms
     CGroup: /user.slice/user-42477.slice/session-28.scope
             ├─76957 "sshd-session: ceph-admin [priv]"
             └─76960 "sshd-session: ceph-admin@notty"

Nov 29 07:12:15 compute-0 systemd[1]: Started Session 28 of User ceph-admin.
Nov 29 07:12:15 compute-0 sudo[76961]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Nov 29 07:12:15 compute-0 sudo[76961]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Nov 29 07:12:15 compute-0 sudo[76961]: pam_unix(sudo:session): session closed for user root
Nov 29 07:12:15 compute-0 sudo[76986]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/chown -R ceph-admin /tmp/cephadm-14ff1f30-5059-58f1-9a23-69871bb275a1
Nov 29 07:12:15 compute-0 sudo[76986]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Nov 29 07:12:15 compute-0 sudo[76986]: pam_unix(sudo:session): session closed for user root

● session-29.scope - Session 29 of User ceph-admin
     Loaded: loaded (/run/systemd/transient/session-29.scope; transient)
  Transient: yes
     Active: active (running) since Sat 2025-11-29 07:12:15 UTC; 1h 5min ago
         IO: 0B read, 0B written
      Tasks: 2
     Memory: 1.2M (peak: 4.3M)
        CPU: 293ms
     CGroup: /user.slice/user-42477.slice/session-29.scope
             ├─77011 "sshd-session: ceph-admin [priv]"
             └─77014 "sshd-session: ceph-admin@notty"

Nov 29 07:12:15 compute-0 systemd[1]: Started Session 29 of User ceph-admin.
Nov 29 07:12:15 compute-0 sudo[77015]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Nov 29 07:12:15 compute-0 sudo[77015]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Nov 29 07:12:15 compute-0 sudo[77015]: pam_unix(sudo:session): session closed for user root
Nov 29 07:12:15 compute-0 sudo[77040]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/chmod 644 /tmp/cephadm-14ff1f30-5059-58f1-9a23-69871bb275a1/var/lib/ceph/14ff1f30-5059-58f1-9a23-69871bb275a1/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d.new
Nov 29 07:12:15 compute-0 sudo[77040]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Nov 29 07:12:15 compute-0 sudo[77040]: pam_unix(sudo:session): session closed for user root

● session-30.scope - Session 30 of User ceph-admin
     Loaded: loaded (/run/systemd/transient/session-30.scope; transient)
  Transient: yes
     Active: active (running) since Sat 2025-11-29 07:12:15 UTC; 1h 5min ago
         IO: 0B read, 216.0K written
      Tasks: 2
     Memory: 1.5M (peak: 3.4M)
        CPU: 179ms
     CGroup: /user.slice/user-42477.slice/session-30.scope
             ├─77065 "sshd-session: ceph-admin [priv]"
             └─77068 "sshd-session: ceph-admin@notty"

Nov 29 07:12:15 compute-0 systemd[1]: Started Session 30 of User ceph-admin.

● session-31.scope - Session 31 of User ceph-admin
     Loaded: loaded (/run/systemd/transient/session-31.scope; transient)
  Transient: yes
     Active: active (running) since Sat 2025-11-29 07:12:16 UTC; 1h 5min ago
         IO: 0B read, 0B written
      Tasks: 2
     Memory: 1.2M (peak: 4.0M)
        CPU: 280ms
     CGroup: /user.slice/user-42477.slice/session-31.scope
             ├─77092 "sshd-session: ceph-admin [priv]"
             └─77095 "sshd-session: ceph-admin@notty"

Nov 29 07:12:16 compute-0 systemd[1]: Started Session 31 of User ceph-admin.
Nov 29 07:12:16 compute-0 sudo[77096]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Nov 29 07:12:16 compute-0 sudo[77096]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Nov 29 07:12:16 compute-0 sudo[77096]: pam_unix(sudo:session): session closed for user root
Nov 29 07:12:16 compute-0 sudo[77121]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/mv /tmp/cephadm-14ff1f30-5059-58f1-9a23-69871bb275a1/var/lib/ceph/14ff1f30-5059-58f1-9a23-69871bb275a1/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d.new /var/lib/ceph/14ff1f30-5059-58f1-9a23-69871bb275a1/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d
Nov 29 07:12:16 compute-0 sudo[77121]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Nov 29 07:12:16 compute-0 sudo[77121]: pam_unix(sudo:session): session closed for user root

● session-32.scope - Session 32 of User ceph-admin
     Loaded: loaded (/run/systemd/transient/session-32.scope; transient)
  Transient: yes
     Active: active (running) since Sat 2025-11-29 07:12:16 UTC; 1h 4min ago
         IO: 9.1M read, 173.8M written
      Tasks: 2
     Memory: 13.8M (peak: 54.8M)
        CPU: 4min 52.899s
     CGroup: /user.slice/user-42477.slice/session-32.scope
             ├─77146 "sshd-session: ceph-admin [priv]"
             └─77149 "sshd-session: ceph-admin@notty"

Nov 29 08:17:03 compute-0 podman[316184]: 2025-11-29 08:17:03.77816041 +0000 UTC m=+0.144861892 container attach d4a9be8cb43979961b1e87e7f3e0d6905eb8c9a0bb29d35e2dfe03c2ac9375f6 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=magical_shockley, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0, CEPH_REF=reef, ceph=True, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2, io.buildah.version=1.39.3, OSD_FLAVOR=default)
Nov 29 08:17:04 compute-0 podman[316184]: 2025-11-29 08:17:04.82344633 +0000 UTC m=+1.190147822 container died d4a9be8cb43979961b1e87e7f3e0d6905eb8c9a0bb29d35e2dfe03c2ac9375f6 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=magical_shockley, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2, io.buildah.version=1.39.3, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image)
Nov 29 08:17:04 compute-0 podman[316184]: 2025-11-29 08:17:04.878195159 +0000 UTC m=+1.244896641 container remove d4a9be8cb43979961b1e87e7f3e0d6905eb8c9a0bb29d35e2dfe03c2ac9375f6 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=magical_shockley, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3)
Nov 29 08:17:04 compute-0 sudo[315985]: pam_unix(sudo:session): session closed for user root
Nov 29 08:17:04 compute-0 sudo[316565]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Nov 29 08:17:05 compute-0 sudo[316565]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Nov 29 08:17:05 compute-0 sudo[316565]: pam_unix(sudo:session): session closed for user root
Nov 29 08:17:05 compute-0 sudo[316607]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/ls /etc/sysctl.d
Nov 29 08:17:05 compute-0 sudo[316607]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Nov 29 08:17:05 compute-0 sudo[316607]: pam_unix(sudo:session): session closed for user root

● session-51.scope - Session 51 of User zuul
     Loaded: loaded (/run/systemd/transient/session-51.scope; transient)
  Transient: yes
     Active: active (running) since Sat 2025-11-29 08:16:26 UTC; 50s ago
         IO: 449.4M read, 268.7M written
      Tasks: 14
     Memory: 911.8M (peak: 991.8M)
        CPU: 2min 13.841s
     CGroup: /user.slice/user-1000.slice/session-51.scope
             ├─311291 "sshd-session: zuul [priv]"
             ├─311294 "sshd-session: zuul@notty"
             ├─311295 sudo bash -c "rm -rf /var/tmp/sos-osp && mkdir /var/tmp/sos-osp && sos report --batch --all-logs --tmp-dir=/var/tmp/sos-osp  -p container,openstack_edpm,system,storage,virt"
             ├─311319 /usr/bin/python3 -s /sbin/sos report --batch --all-logs --tmp-dir=/var/tmp/sos-osp -p container,openstack_edpm,system,storage,virt
             ├─318244 timeout 15s turbostat --debug sleep 10
             ├─318920 timeout 300s systemctl status --all
             ├─318922 systemctl status --all
             ├─318956 timeout --foreground 300s virsh -r nodedev-dumpxml net_vlan21_c2_c6_49_db_df_6d
             └─318957 virsh -r nodedev-dumpxml net_vlan21_c2_c6_49_db_df_6d

Nov 29 08:16:26 compute-0 systemd[1]: Started Session 51 of User zuul.
Nov 29 08:16:26 compute-0 sudo[311295]:     zuul : PWD=/home/zuul ; USER=root ; COMMAND=/bin/bash -c 'rm -rf /var/tmp/sos-osp && mkdir /var/tmp/sos-osp && sos report --batch --all-logs --tmp-dir=/var/tmp/sos-osp  -p container,openstack_edpm,system,storage,virt'
Nov 29 08:16:26 compute-0 sudo[311295]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Nov 29 08:16:36 compute-0 ovs-vsctl[311624]: ovs|00001|db_ctl_base|ERR|no key "dpdk-init" in Open_vSwitch record "." column other_config
Nov 29 08:17:07 compute-0 ovs-appctl[317380]: ovs|00001|daemon_unix|WARN|/var/run/openvswitch/ovs-monitor-ipsec.pid: open: No such file or directory
Nov 29 08:17:07 compute-0 ovs-appctl[317386]: ovs|00001|daemon_unix|WARN|/var/run/openvswitch/ovs-monitor-ipsec.pid: open: No such file or directory

○ 23d05e03be4cb9084b6afbc3edf0d56047ca0f3f1aaf62172739e6d6ce3f7fa8-719f575451f10305.service - /usr/bin/podman healthcheck run 23d05e03be4cb9084b6afbc3edf0d56047ca0f3f1aaf62172739e6d6ce3f7fa8
     Loaded: loaded (/run/systemd/transient/23d05e03be4cb9084b6afbc3edf0d56047ca0f3f1aaf62172739e6d6ce3f7fa8-719f575451f10305.service; transient)
  Transient: yes
     Active: inactive (dead) since Sat 2025-11-29 08:16:55 UTC; 21s ago
   Duration: 153ms
TriggeredBy: ● 23d05e03be4cb9084b6afbc3edf0d56047ca0f3f1aaf62172739e6d6ce3f7fa8-719f575451f10305.timer
    Process: 314267 ExecStart=/usr/bin/podman healthcheck run 23d05e03be4cb9084b6afbc3edf0d56047ca0f3f1aaf62172739e6d6ce3f7fa8 (code=exited, status=0/SUCCESS)
   Main PID: 314267 (code=exited, status=0/SUCCESS)
        CPU: 89ms

Nov 29 08:16:55 compute-0 podman[314267]: 2025-11-29 08:16:55.760057957 +0000 UTC m=+0.127090360 container health_status 23d05e03be4cb9084b6afbc3edf0d56047ca0f3f1aaf62172739e6d6ce3f7fa8 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, managed_by=edpm_ansible, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, config_id=ovn_controller, container_name=ovn_controller, io.buildah.version=1.41.3, org.label-schema.license=GPLv2, tcib_build_tag=1f5c0439f2433cb462b222a5bb23e629, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251125, tcib_managed=true)

○ 53b3e3edbc13651958250c6312d45e987c0dee5fbb3effb8392660dfc1eaa82f-6af3e4fb7442b0af.service - /usr/bin/podman healthcheck run 53b3e3edbc13651958250c6312d45e987c0dee5fbb3effb8392660dfc1eaa82f
     Loaded: loaded (/run/systemd/transient/53b3e3edbc13651958250c6312d45e987c0dee5fbb3effb8392660dfc1eaa82f-6af3e4fb7442b0af.service; transient)
  Transient: yes
     Active: inactive (dead) since Sat 2025-11-29 08:16:55 UTC; 21s ago
   Duration: 126ms
TriggeredBy: ● 53b3e3edbc13651958250c6312d45e987c0dee5fbb3effb8392660dfc1eaa82f-6af3e4fb7442b0af.timer
    Process: 314269 ExecStart=/usr/bin/podman healthcheck run 53b3e3edbc13651958250c6312d45e987c0dee5fbb3effb8392660dfc1eaa82f (code=exited, status=0/SUCCESS)
   Main PID: 314269 (code=exited, status=0/SUCCESS)
        CPU: 71ms

Nov 29 08:16:55 compute-0 podman[314269]: 2025-11-29 08:16:55.741628055 +0000 UTC m=+0.105354898 container health_status 53b3e3edbc13651958250c6312d45e987c0dee5fbb3effb8392660dfc1eaa82f (image=quay.io/podified-antelope-centos9/openstack-multipathd:current-podified, name=multipathd, health_status=healthy, health_failing_streak=0, health_log=, tcib_build_tag=1f5c0439f2433cb462b222a5bb23e629, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, tcib_managed=true, container_name=multipathd, maintainer=OpenStack Kubernetes Operator team, org.label-schema.name=CentOS Stream 9 Base Image, config_id=multipathd, managed_by=edpm_ansible, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/multipathd', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-multipathd:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/multipathd.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run/udev:/run/udev', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:ro', '/var/lib/iscsi:/var/lib/iscsi', '/etc/multipath:/etc/multipath:z', '/etc/multipath.conf:/etc/multipath.conf:ro', '/var/lib/openstack/healthchecks/multipathd:/openstack:ro,z']}, io.buildah.version=1.41.3, org.label-schema.build-date=20251125, org.label-schema.schema-version=1.0)

○ 8d4ccc3041d68cb3d47809db0bb1a7e168a735eaa6605edbf2567447905aa50e-5e7e647fed5e19a2.service - /usr/bin/podman healthcheck run 8d4ccc3041d68cb3d47809db0bb1a7e168a735eaa6605edbf2567447905aa50e
     Loaded: loaded (/run/systemd/transient/8d4ccc3041d68cb3d47809db0bb1a7e168a735eaa6605edbf2567447905aa50e-5e7e647fed5e19a2.service; transient)
  Transient: yes
     Active: inactive (dead) since Sat 2025-11-29 08:16:55 UTC; 21s ago
   Duration: 133ms
TriggeredBy: ● 8d4ccc3041d68cb3d47809db0bb1a7e168a735eaa6605edbf2567447905aa50e-5e7e647fed5e19a2.timer
    Process: 314270 ExecStart=/usr/bin/podman healthcheck run 8d4ccc3041d68cb3d47809db0bb1a7e168a735eaa6605edbf2567447905aa50e (code=exited, status=0/SUCCESS)
   Main PID: 314270 (code=exited, status=0/SUCCESS)
        CPU: 70ms

Nov 29 08:16:55 compute-0 podman[314270]: 2025-11-29 08:16:55.754758732 +0000 UTC m=+0.114640650 container health_status 8d4ccc3041d68cb3d47809db0bb1a7e168a735eaa6605edbf2567447905aa50e (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=ovn_metadata_agent, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.vendor=CentOS, io.buildah.version=1.41.3, managed_by=edpm_ansible, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, tcib_build_tag=1f5c0439f2433cb462b222a5bb23e629, config_data={'cgroupns': 'host', 'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'EDPM_CONFIG_HASH': '0823bd3e096c75f72e4a95820d41b0d4b6a1172bd2892ddb9f29b788a11bc87d'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_metadata_agent', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified', 'net': 'host', 'pid': 'host', 'privileged': Unit apparmor.service could not be found.
Unit apt-daily.service could not be found.
Unit auto-cpufreq.service could not be found.
Unit autofs.service could not be found.
True, 'restart': 'always', 'user': 'root', 'volumes': ['/run/openvswitch:/run/openvswitch:z', '/var/lib/config-data/ansible-generated/neutron-ovn-metadata-agent:/etc/neutron.conf.d:z', '/run/netns:/run/netns:shared', '/var/lib/kolla/config_files/ovn_metadata_agent.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/neutron:/var/lib/neutron:shared,z', '/var/lib/neutron/ovn_metadata_haproxy_wrapper:/usr/local/bin/haproxy:ro', '/var/lib/neutron/kill_scripts:/etc/neutron/kill_scripts:ro', '/var/lib/openstack/cacerts/neutron-metadata/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_metadata_agent:/openstack:ro,z']}, config_id=ovn_metadata_agent, org.label-schema.build-date=20251125, tcib_managed=true, container_name=ovn_metadata_agent, maintainer=OpenStack Kubernetes Operator team, org.label-schema.schema-version=1.0)

● auditd.service - Security Auditing Service
     Loaded: loaded (/usr/lib/systemd/system/auditd.service; enabled; preset: enabled)
     Active: active (running) since Sat 2025-11-29 06:29:43 UTC; 1h 47min ago
       Docs: man:auditd(8)
             https://github.com/linux-audit/audit-documentation
   Main PID: 704 (auditd)
         IO: 0B read, 30.8M written
      Tasks: 4 (limit: 48573)
     Memory: 18.8M (peak: 19.3M)
        CPU: 8.091s
     CGroup: /system.slice/auditd.service
             ├─704 /sbin/auditd
             └─706 /usr/sbin/sedispatch

Nov 29 06:29:43 localhost augenrules[724]: pid 704
Nov 29 06:29:43 localhost augenrules[724]: rate_limit 0
Nov 29 06:29:43 localhost augenrules[724]: backlog_limit 8192
Nov 29 06:29:43 localhost augenrules[724]: lost 0
Nov 29 06:29:43 localhost augenrules[724]: backlog 0
Nov 29 06:29:43 localhost augenrules[724]: backlog_wait_time 60000
Nov 29 06:29:43 localhost augenrules[724]: backlog_wait_time_actual 0
Nov 29 06:29:43 localhost systemd[1]: Started Security Auditing Service.
Nov 29 07:27:41 compute-0 auditd[704]: Audit daemon rotating log files
Nov 29 08:17:01 compute-0 auditd[704]: Audit daemon rotating log files

○ auth-rpcgss-module.service - Kernel Module supporting RPCSEC_GSS
     Loaded: loaded (/usr/lib/systemd/system/auth-rpcgss-module.service; static)
     Active: inactive (dead)
  Condition: start condition failed at Sat 2025-11-29 06:29:42 UTC; 1h 47min ago

● ceph-14ff1f30-5059-58f1-9a23-69871bb275a1@crash.compute-0.service - Ceph crash.compute-0 for 14ff1f30-5059-58f1-9a23-69871bb275a1
     Loaded: loaded (/etc/systemd/system/ceph-14ff1f30-5059-58f1-9a23-69871bb275a1@.service; enabled; preset: disabled)
     Active: active (running) since Sat 2025-11-29 07:12:33 UTC; 1h 4min ago
   Main PID: 82991 (conmon)
         IO: 0B read, 1.1M written
      Tasks: 3 (limit: 48573)
     Memory: 12.1M (peak: 33.5M)
        CPU: 805ms
     CGroup: /system.slice/system-ceph\x2d14ff1f30\x2d5059\x2d58f1\x2d9a23\x2d69871bb275a1.slice/ceph-14ff1f30-5059-58f1-9a23-69871bb275a1@crash.compute-0.service
             ├─libpod-payload-7eb39cf0035c22ff4e83a7371bb415c7d467398eea843a964591e85500be2230
             │ ├─82993 /run/podman-init -- /usr/bin/ceph-crash -n client.crash.compute-0
             │ └─82995 /usr/bin/python3 -s /usr/bin/ceph-crash -n client.crash.compute-0
             └─runtime
               └─82991 /usr/bin/conmon --api-version 1 -c 7eb39cf0035c22ff4e83a7371bb415c7d467398eea843a964591e85500be2230 -u 7eb39cf0035c22ff4e83a7371bb415c7d467398eea843a964591e85500be2230 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/7eb39cf0035c22ff4e83a7371bb415c7d467398eea843a964591e85500be2230/userdata -p /run/containers/storage/overlay-containers/7eb39cf0035c22ff4e83a7371bb415c7d467398eea843a964591e85500be2230/userdata/pidfile -n ceph-14ff1f30-5059-58f1-9a23-69871bb275a1-crash-compute-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/7eb39cf0035c22ff4e83a7371bb415c7d467398eea843a964591e85500be2230 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/7eb39cf0035c22ff4e83a7371bb415c7d467398eea843a964591e85500be2230/userdata/oci-log --conmon-pidfile /run/ceph-14ff1f30-5059-58f1-9a23-69871bb275a1@crash.compute-0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 7eb39cf0035c22ff4e83a7371bb415c7d467398eea843a964591e85500be2230

Nov 29 07:12:33 compute-0 systemd[1]: Started Ceph crash.compute-0 for 14ff1f30-5059-58f1-9a23-69871bb275a1.
Nov 29 07:12:34 compute-0 ceph-14ff1f30-5059-58f1-9a23-69871bb275a1-crash-compute-0[82991]: INFO:ceph-crash:pinging cluster to exercise our key
Nov 29 07:12:34 compute-0 ceph-14ff1f30-5059-58f1-9a23-69871bb275a1-crash-compute-0[82991]: 2025-11-29T07:12:34.232+0000 7f87a1027640 -1 auth: unable to find a keyring on /etc/ceph/ceph.client.admin.keyring,/etc/ceph/ceph.keyring,/etc/ceph/keyring,/etc/ceph/keyring.bin: (2) No such file or directory
Nov 29 07:12:34 compute-0 ceph-14ff1f30-5059-58f1-9a23-69871bb275a1-crash-compute-0[82991]: 2025-11-29T07:12:34.232+0000 7f87a1027640 -1 AuthRegistry(0x7f879c067440) no keyring found at /etc/ceph/ceph.client.admin.keyring,/etc/ceph/ceph.keyring,/etc/ceph/keyring,/etc/ceph/keyring.bin, disabling cephx
Nov 29 07:12:34 compute-0 ceph-14ff1f30-5059-58f1-9a23-69871bb275a1-crash-compute-0[82991]: 2025-11-29T07:12:34.234+0000 7f87a1027640 -1 auth: unable to find a keyring on /etc/ceph/ceph.client.admin.keyring,/etc/ceph/ceph.keyring,/etc/ceph/keyring,/etc/ceph/keyring.bin: (2) No such file or directory
Nov 29 07:12:34 compute-0 ceph-14ff1f30-5059-58f1-9a23-69871bb275a1-crash-compute-0[82991]: 2025-11-29T07:12:34.234+0000 7f87a1027640 -1 AuthRegistry(0x7f87a1026000) no keyring found at /etc/ceph/ceph.client.admin.keyring,/etc/ceph/ceph.keyring,/etc/ceph/keyring,/etc/ceph/keyring.bin, disabling cephx
Nov 29 07:12:34 compute-0 ceph-14ff1f30-5059-58f1-9a23-69871bb275a1-crash-compute-0[82991]: 2025-11-29T07:12:34.234+0000 7f879ad76640 -1 monclient(hunting): handle_auth_bad_method server allowed_methods [2] but i only support [1]
Nov 29 07:12:34 compute-0 ceph-14ff1f30-5059-58f1-9a23-69871bb275a1-crash-compute-0[82991]: 2025-11-29T07:12:34.235+0000 7f87a1027640 -1 monclient: authenticate NOTE: no keyring found; disabled cephx authentication
Nov 29 07:12:34 compute-0 ceph-14ff1f30-5059-58f1-9a23-69871bb275a1-crash-compute-0[82991]: [errno 13] RADOS permission denied (error connecting to the cluster)
Nov 29 07:12:34 compute-0 ceph-14ff1f30-5059-58f1-9a23-69871bb275a1-crash-compute-0[82991]: INFO:ceph-crash:monitoring path /var/lib/ceph/crash, delay 600s

● ceph-14ff1f30-5059-58f1-9a23-69871bb275a1@mds.cephfs.compute-0.bdhrqf.service - Ceph mds.cephfs.compute-0.bdhrqf for 14ff1f30-5059-58f1-9a23-69871bb275a1
     Loaded: loaded (/etc/systemd/system/ceph-14ff1f30-5059-58f1-9a23-69871bb275a1@.service; enabled; preset: disabled)
     Active: active (running) since Sat 2025-11-29 07:14:30 UTC; 1h 2min ago
   Main PID: 102312 (conmon)
         IO: 0B read, 2.0M written
      Tasks: 28 (limit: 48573)
     Memory: 26.3M (peak: 27.2M)
        CPU: 7.653s
     CGroup: /system.slice/system-ceph\x2d14ff1f30\x2d5059\x2d58f1\x2d9a23\x2d69871bb275a1.slice/ceph-14ff1f30-5059-58f1-9a23-69871bb275a1@mds.cephfs.compute-0.bdhrqf.service
             ├─libpod-payload-f32e02240e8a83659f9e6ca6aa769bfab0c78f2322374b714daa8a8d1689a511
             │ ├─102314 /run/podman-init -- /usr/bin/ceph-mds -n mds.cephfs.compute-0.bdhrqf -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ └─102316 /usr/bin/ceph-mds -n mds.cephfs.compute-0.bdhrqf -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             └─runtime
               └─102312 /usr/bin/conmon --api-version 1 -c f32e02240e8a83659f9e6ca6aa769bfab0c78f2322374b714daa8a8d1689a511 -u f32e02240e8a83659f9e6ca6aa769bfab0c78f2322374b714daa8a8d1689a511 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/f32e02240e8a83659f9e6ca6aa769bfab0c78f2322374b714daa8a8d1689a511/userdata -p /run/containers/storage/overlay-containers/f32e02240e8a83659f9e6ca6aa769bfab0c78f2322374b714daa8a8d1689a511/userdata/pidfile -n ceph-14ff1f30-5059-58f1-9a23-69871bb275a1-mds-cephfs-compute-0-bdhrqf --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/f32e02240e8a83659f9e6ca6aa769bfab0c78f2322374b714daa8a8d1689a511 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/f32e02240e8a83659f9e6ca6aa769bfab0c78f2322374b714daa8a8d1689a511/userdata/oci-log --conmon-pidfile /run/ceph-14ff1f30-5059-58f1-9a23-69871bb275a1@mds.cephfs.compute-0.bdhrqf.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg f32e02240e8a83659f9e6ca6aa769bfab0c78f2322374b714daa8a8d1689a511

Nov 29 08:16:38 compute-0 ceph-mds[102316]: mds.cephfs.compute-0.bdhrqf asok_command: dump loads {prefix=dump loads} (starting...)
Nov 29 08:16:38 compute-0 ceph-mds[102316]: mds.cephfs.compute-0.bdhrqf asok_command: dump tree {prefix=dump tree,root=/} (starting...)
Nov 29 08:16:39 compute-0 ceph-mds[102316]: mds.cephfs.compute-0.bdhrqf asok_command: dump_blocked_ops {prefix=dump_blocked_ops} (starting...)
Nov 29 08:16:39 compute-0 ceph-mds[102316]: mds.cephfs.compute-0.bdhrqf asok_command: dump_historic_ops {prefix=dump_historic_ops} (starting...)
Nov 29 08:16:39 compute-0 ceph-mds[102316]: mds.cephfs.compute-0.bdhrqf asok_command: dump_historic_ops_by_duration {prefix=dump_historic_ops_by_duration} (starting...)
Nov 29 08:16:39 compute-0 ceph-mds[102316]: mds.cephfs.compute-0.bdhrqf asok_command: dump_ops_in_flight {prefix=dump_ops_in_flight} (starting...)
Nov 29 08:16:39 compute-0 ceph-mds[102316]: mds.cephfs.compute-0.bdhrqf asok_command: get subtrees {prefix=get subtrees} (starting...)
Nov 29 08:16:40 compute-0 ceph-mds[102316]: mds.cephfs.compute-0.bdhrqf asok_command: ops {prefix=ops} (starting...)
Nov 29 08:16:40 compute-0 ceph-mds[102316]: mds.cephfs.compute-0.bdhrqf asok_command: session ls {prefix=session ls} (starting...)
Nov 29 08:16:41 compute-0 ceph-mds[102316]: mds.cephfs.compute-0.bdhrqf asok_command: status {prefix=status} (starting...)

● ceph-14ff1f30-5059-58f1-9a23-69871bb275a1@mgr.compute-0.kzdpag.service - Ceph mgr.compute-0.kzdpag for 14ff1f30-5059-58f1-9a23-69871bb275a1
     Loaded: loaded (/etc/systemd/system/ceph-14ff1f30-5059-58f1-9a23-69871bb275a1@.service; enabled; preset: disabled)
     Active: active (running) since Sat 2025-11-29 07:11:19 UTC; 1h 5min ago
   Main PID: 75341 (conmon)
         IO: 0B read, 3.5M written
      Tasks: 149 (limit: 48573)
     Memory: 532.1M (peak: 533.6M)
        CPU: 1min 34.685s
     CGroup: /system.slice/system-ceph\x2d14ff1f30\x2d5059\x2d58f1\x2d9a23\x2d69871bb275a1.slice/ceph-14ff1f30-5059-58f1-9a23-69871bb275a1@mgr.compute-0.kzdpag.service
             ├─libpod-payload-cf5b754473e023a6c38808405cba62d535fd2d4dd4ad3580b8df218d673d96ea
             │ ├─75343 /run/podman-init -- /usr/bin/ceph-mgr -n mgr.compute-0.kzdpag -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ └─75345 /usr/bin/ceph-mgr -n mgr.compute-0.kzdpag -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             └─runtime
               └─75341 /usr/bin/conmon --api-version 1 -c cf5b754473e023a6c38808405cba62d535fd2d4dd4ad3580b8df218d673d96ea -u cf5b754473e023a6c38808405cba62d535fd2d4dd4ad3580b8df218d673d96ea -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/cf5b754473e023a6c38808405cba62d535fd2d4dd4ad3580b8df218d673d96ea/userdata -p /run/containers/storage/overlay-containers/cf5b754473e023a6c38808405cba62d535fd2d4dd4ad3580b8df218d673d96ea/userdata/pidfile -n ceph-14ff1f30-5059-58f1-9a23-69871bb275a1-mgr-compute-0-kzdpag --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/cf5b754473e023a6c38808405cba62d535fd2d4dd4ad3580b8df218d673d96ea --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/cf5b754473e023a6c38808405cba62d535fd2d4dd4ad3580b8df218d673d96ea/userdata/oci-log --conmon-pidfile /run/ceph-14ff1f30-5059-58f1-9a23-69871bb275a1@mgr.compute-0.kzdpag.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg cf5b754473e023a6c38808405cba62d535fd2d4dd4ad3580b8df218d673d96ea

Nov 29 08:17:15 compute-0 ceph-mgr[75345]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Nov 29 08:17:15 compute-0 ceph-mgr[75345]: [pg_autoscaler INFO root] Pool 'cephfs.cephfs.data' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Nov 29 08:17:15 compute-0 ceph-mgr[75345]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Nov 29 08:17:15 compute-0 ceph-mgr[75345]: [pg_autoscaler INFO root] Pool '.rgw.root' root_id -1 using 2.5436283128215145e-07 of space, bias 1.0, pg target 7.630884938464544e-05 quantized to 32 (current 32)
Nov 29 08:17:15 compute-0 ceph-mgr[75345]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Nov 29 08:17:15 compute-0 ceph-mgr[75345]: [pg_autoscaler INFO root] Pool 'default.rgw.log' root_id -1 using 2.1620840658982875e-06 of space, bias 1.0, pg target 0.0006486252197694863 quantized to 32 (current 32)
Nov 29 08:17:15 compute-0 ceph-mgr[75345]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Nov 29 08:17:15 compute-0 ceph-mgr[75345]: [pg_autoscaler INFO root] Pool 'default.rgw.control' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Nov 29 08:17:15 compute-0 ceph-mgr[75345]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Nov 29 08:17:15 compute-0 ceph-mgr[75345]: [pg_autoscaler INFO root] Pool 'default.rgw.meta' root_id -1 using 1.2718141564107572e-07 of space, bias 4.0, pg target 0.00015261769876929088 quantized to 32 (current 32)

● ceph-14ff1f30-5059-58f1-9a23-69871bb275a1@mon.compute-0.service - Ceph mon.compute-0 for 14ff1f30-5059-58f1-9a23-69871bb275a1
     Loaded: loaded (/etc/systemd/system/ceph-14ff1f30-5059-58f1-9a23-69871bb275a1@.service; enabled; preset: disabled)
     Active: active (running) since Sat 2025-11-29 07:11:17 UTC; 1h 5min ago
   Main PID: 75046 (conmon)
         IO: 916.0K read, 452.1M written
      Tasks: 27 (limit: 48573)
     Memory: 97.0M (peak: 113.5M)
        CPU: 51.886s
     CGroup: /system.slice/system-ceph\x2d14ff1f30\x2d5059\x2d58f1\x2d9a23\x2d69871bb275a1.slice/ceph-14ff1f30-5059-58f1-9a23-69871bb275a1@mon.compute-0.service
             ├─libpod-payload-21a56ae912cb8d8d1f0dc09cd0d64941e849dd5a597340fef403575f5f6dca90
             │ ├─75048 /run/podman-init -- /usr/bin/ceph-mon -n mon.compute-0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false
             │ └─75050 /usr/bin/ceph-mon -n mon.compute-0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false
             └─runtime
               └─75046 /usr/bin/conmon --api-version 1 -c 21a56ae912cb8d8d1f0dc09cd0d64941e849dd5a597340fef403575f5f6dca90 -u 21a56ae912cb8d8d1f0dc09cd0d64941e849dd5a597340fef403575f5f6dca90 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/21a56ae912cb8d8d1f0dc09cd0d64941e849dd5a597340fef403575f5f6dca90/userdata -p /run/containers/storage/overlay-containers/21a56ae912cb8d8d1f0dc09cd0d64941e849dd5a597340fef403575f5f6dca90/userdata/pidfile -n ceph-14ff1f30-5059-58f1-9a23-69871bb275a1-mon-compute-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/21a56ae912cb8d8d1f0dc09cd0d64941e849dd5a597340fef403575f5f6dca90 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/21a56ae912cb8d8d1f0dc09cd0d64941e849dd5a597340fef403575f5f6dca90/userdata/oci-log --conmon-pidfile /run/ceph-14ff1f30-5059-58f1-9a23-69871bb275a1@mon.compute-0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 21a56ae912cb8d8d1f0dc09cd0d64941e849dd5a597340fef403575f5f6dca90

Nov 29 08:17:14 compute-0 ceph-mon[75050]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "status", "format": "json-pretty"} v 0) v1
Nov 29 08:17:14 compute-0 ceph-mon[75050]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/1072456681' entity='client.admin' cmd=[{"prefix": "status", "format": "json-pretty"}]: dispatch
Nov 29 08:17:14 compute-0 ceph-mon[75050]: pgmap v2462: 305 pgs: 305 active+clean; 271 MiB data, 672 MiB used, 59 GiB / 60 GiB avail
Nov 29 08:17:14 compute-0 ceph-mon[75050]: from='client.19485 -' entity='client.admin' cmd=[{"prefix": "pg stat", "target": ["mon-mgr", ""], "format": "json-pretty"}]: dispatch
Nov 29 08:17:14 compute-0 ceph-mon[75050]: from='client.? 192.168.122.100:0/1072456681' entity='client.admin' cmd=[{"prefix": "status", "format": "json-pretty"}]: dispatch
Nov 29 08:17:14 compute-0 ceph-mon[75050]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "time-sync-status", "format": "json-pretty"} v 0) v1
Nov 29 08:17:14 compute-0 ceph-mon[75050]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/1810874851' entity='client.admin' cmd=[{"prefix": "time-sync-status", "format": "json-pretty"}]: dispatch
Nov 29 08:17:15 compute-0 ceph-mon[75050]: from='client.? 192.168.122.100:0/1810874851' entity='client.admin' cmd=[{"prefix": "time-sync-status", "format": "json-pretty"}]: dispatch
Nov 29 08:17:15 compute-0 ceph-mon[75050]: mon.compute-0@0(leader).osd e518 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 343932928 full_alloc: 348127232 kv_alloc: 318767104
Nov 29 08:17:16 compute-0 ceph-mon[75050]: pgmap v2463: 305 pgs: 305 active+clean; 271 MiB data, 672 MiB used, 59 GiB / 60 GiB avail

● ceph-14ff1f30-5059-58f1-9a23-69871bb275a1@osd.0.service - Ceph osd.0 for 14ff1f30-5059-58f1-9a23-69871bb275a1
     Loaded: loaded (/etc/systemd/system/ceph-14ff1f30-5059-58f1-9a23-69871bb275a1@.service; enabled; preset: disabled)
     Active: active (running) since Sat 2025-11-29 07:13:03 UTC; 1h 4min ago
   Main PID: 88827 (conmon)
         IO: 486.3M read, 8.4G written
      Tasks: 60 (limit: 48573)
     Memory: 989.2M (peak: 1.3G)
        CPU: 58.894s
     CGroup: /system.slice/system-ceph\x2d14ff1f30\x2d5059\x2d58f1\x2d9a23\x2d69871bb275a1.slice/ceph-14ff1f30-5059-58f1-9a23-69871bb275a1@osd.0.service
             ├─libpod-payload-9e203bb2012357f685d3a92116fa2f82a2b0bf3d53f620d86c98c827de1eec96
             │ ├─88829 /run/podman-init -- /usr/bin/ceph-osd -n osd.0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ └─88831 /usr/bin/ceph-osd -n osd.0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             └─runtime
               └─88827 /usr/bin/conmon --api-version 1 -c 9e203bb2012357f685d3a92116fa2f82a2b0bf3d53f620d86c98c827de1eec96 -u 9e203bb2012357f685d3a92116fa2f82a2b0bf3d53f620d86c98c827de1eec96 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/9e203bb2012357f685d3a92116fa2f82a2b0bf3d53f620d86c98c827de1eec96/userdata -p /run/containers/storage/overlay-containers/9e203bb2012357f685d3a92116fa2f82a2b0bf3d53f620d86c98c827de1eec96/userdata/pidfile -n ceph-14ff1f30-5059-58f1-9a23-69871bb275a1-osd-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/9e203bb2012357f685d3a92116fa2f82a2b0bf3d53f620d86c98c827de1eec96 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/9e203bb2012357f685d3a92116fa2f82a2b0bf3d53f620d86c98c827de1eec96/userdata/oci-log --conmon-pidfile /run/ceph-14ff1f30-5059-58f1-9a23-69871bb275a1@osd.0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 9e203bb2012357f685d3a92116fa2f82a2b0bf3d53f620d86c98c827de1eec96

Nov 29 08:16:55 compute-0 ceph-osd[88831]: prioritycache tune_memory target: 4294967296 mapped: 210583552 unmapped: 68083712 heap: 278667264 old mem: 2845415832 new mem: 2845415832
Nov 29 08:16:55 compute-0 ceph-osd[88831]: monclient: tick
Nov 29 08:16:55 compute-0 ceph-osd[88831]: monclient: _check_auth_tickets
Nov 29 08:16:55 compute-0 ceph-osd[88831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-11-29T08:16:24.280790+0000)
Nov 29 08:16:55 compute-0 ceph-osd[88831]: prioritycache tune_memory target: 4294967296 mapped: 210632704 unmapped: 68034560 heap: 278667264 old mem: 2845415832 new mem: 2845415832
Nov 29 08:16:55 compute-0 ceph-osd[88831]: monclient: tick
Nov 29 08:16:55 compute-0 ceph-osd[88831]: monclient: _check_auth_tickets
Nov 29 08:16:55 compute-0 ceph-osd[88831]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-11-29T08:16:25.280941+0000)
Nov 29 08:16:55 compute-0 ceph-osd[88831]: osd.0 518 heartbeat osd_stat(store_statfs(0x4f1a61000/0x0/0x4ffc00000, data 0x3d005be/0x3fbc000, compress 0x0/0x0/0x0, omap 0x63a, meta 0xa1df9c6), peers [1,2] op hist [])
Nov 29 08:16:55 compute-0 ceph-osd[88831]: do_command 'log dump' '{prefix=log dump}'

● ceph-14ff1f30-5059-58f1-9a23-69871bb275a1@osd.1.service - Ceph osd.1 for 14ff1f30-5059-58f1-9a23-69871bb275a1
     Loaded: loaded (/etc/systemd/system/ceph-14ff1f30-5059-58f1-9a23-69871bb275a1@.service; enabled; preset: disabled)
     Active: active (running) since Sat 2025-11-29 07:13:08 UTC; 1h 4min ago
   Main PID: 89836 (conmon)
         IO: 663.9M read, 9.0G written
      Tasks: 60 (limit: 48573)
     Memory: 939.2M (peak: 1.2G)
        CPU: 1min 566ms
     CGroup: /system.slice/system-ceph\x2d14ff1f30\x2d5059\x2d58f1\x2d9a23\x2d69871bb275a1.slice/ceph-14ff1f30-5059-58f1-9a23-69871bb275a1@osd.1.service
             ├─libpod-payload-6a5fc11573d1a39f1563ae47c276c7e603d4b25acd108fa32dcccaad74ad1d11
             │ ├─89838 /run/podman-init -- /usr/bin/ceph-osd -n osd.1 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ └─89840 /usr/bin/ceph-osd -n osd.1 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             └─runtime
               └─89836 /usr/bin/conmon --api-version 1 -c 6a5fc11573d1a39f1563ae47c276c7e603d4b25acd108fa32dcccaad74ad1d11 -u 6a5fc11573d1a39f1563ae47c276c7e603d4b25acd108fa32dcccaad74ad1d11 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/6a5fc11573d1a39f1563ae47c276c7e603d4b25acd108fa32dcccaad74ad1d11/userdata -p /run/containers/storage/overlay-containers/6a5fc11573d1a39f1563ae47c276c7e603d4b25acd108fa32dcccaad74ad1d11/userdata/pidfile -n ceph-14ff1f30-5059-58f1-9a23-69871bb275a1-osd-1 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/6a5fc11573d1a39f1563ae47c276c7e603d4b25acd108fa32dcccaad74ad1d11 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/6a5fc11573d1a39f1563ae47c276c7e603d4b25acd108fa32dcccaad74ad1d11/userdata/oci-log --conmon-pidfile /run/ceph-14ff1f30-5059-58f1-9a23-69871bb275a1@osd.1.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 6a5fc11573d1a39f1563ae47c276c7e603d4b25acd108fa32dcccaad74ad1d11

Nov 29 08:16:49 compute-0 ceph-osd[89840]: bluestore.MempoolThread(0x55f9496e3b60) _resize_shards cache_size: 2845415832 kv_alloc: 1191182336 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1124073472 meta_used: 4091644 data_alloc: 251658240 data_used: 31293440
Nov 29 08:16:49 compute-0 ceph-osd[89840]: monclient: tick
Nov 29 08:16:49 compute-0 ceph-osd[89840]: monclient: _check_auth_tickets
Nov 29 08:16:49 compute-0 ceph-osd[89840]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-11-29T08:16:17.600737+0000)
Nov 29 08:16:49 compute-0 ceph-osd[89840]: prioritycache tune_memory target: 4294967296 mapped: 218865664 unmapped: 72089600 heap: 290955264 old mem: 2845415832 new mem: 2845415832
Nov 29 08:16:49 compute-0 ceph-osd[89840]: monclient: tick
Nov 29 08:16:49 compute-0 ceph-osd[89840]: monclient: _check_auth_tickets
Nov 29 08:16:49 compute-0 ceph-osd[89840]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-11-29T08:16:18.600874+0000)
Nov 29 08:16:49 compute-0 ceph-osd[89840]: prioritycache tune_memory target: 4294967296 mapped: 218292224 unmapped: 72663040 heap: 290955264 old mem: 2845415832 new mem: 2845415832
Nov 29 08:16:49 compute-0 ceph-osd[89840]: do_command 'log dump' '{prefix=log dump}'

● ceph-14ff1f30-5059-58f1-9a23-69871bb275a1@osd.2.service - Ceph osd.2 for 14ff1f30-5059-58f1-9a23-69871bb275a1
     Loaded: loaded (/etc/systemd/system/ceph-14ff1f30-5059-58f1-9a23-69871bb275a1@.service; enabled; preset: disabled)
     Active: active (running) since Sat 2025-11-29 07:13:14 UTC; 1h 4min ago
   Main PID: 91076 (conmon)
         IO: 541.8M read, 7.6G written
      Tasks: 60 (limit: 48573)
     Memory: 799.0M (peak: 1.0G)
        CPU: 51.010s
     CGroup: /system.slice/system-ceph\x2d14ff1f30\x2d5059\x2d58f1\x2d9a23\x2d69871bb275a1.slice/ceph-14ff1f30-5059-58f1-9a23-69871bb275a1@osd.2.service
             ├─libpod-payload-2e6c1ee4769aea5e614a2b0f0b65dd1997bbdea3f16ea1e0ab78d05fbef53f23
             │ ├─91078 /run/podman-init -- /usr/bin/ceph-osd -n osd.2 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ └─91083 /usr/bin/ceph-osd -n osd.2 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             └─runtime
               └─91076 /usr/bin/conmon --api-version 1 -c 2e6c1ee4769aea5e614a2b0f0b65dd1997bbdea3f16ea1e0ab78d05fbef53f23 -u 2e6c1ee4769aea5e614a2b0f0b65dd1997bbdea3f16ea1e0ab78d05fbef53f23 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/2e6c1ee4769aea5e614a2b0f0b65dd1997bbdea3f16ea1e0ab78d05fbef53f23/userdata -p /run/containers/storage/overlay-containers/2e6c1ee4769aea5e614a2b0f0b65dd1997bbdea3f16ea1e0ab78d05fbef53f23/userdata/pidfile -n ceph-14ff1f30-5059-58f1-9a23-69871bb275a1-osd-2 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/2e6c1ee4769aea5e614a2b0f0b65dd1997bbdea3f16ea1e0ab78d05fbef53f23 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/2e6c1ee4769aea5e614a2b0f0b65dd1997bbdea3f16ea1e0ab78d05fbef53f23/userdata/oci-log --conmon-pidfile /run/ceph-14ff1f30-5059-58f1-9a23-69871bb275a1@osd.2.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 2e6c1ee4769aea5e614a2b0f0b65dd1997bbdea3f16ea1e0ab78d05fbef53f23

Nov 29 08:16:44 compute-0 ceph-osd[91083]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Nov 29 08:16:44 compute-0 ceph-osd[91083]: bluestore.MempoolThread(0x560f3dd27b60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 3528870 data_alloc: 234881024 data_used: 18898944
Nov 29 08:16:44 compute-0 ceph-osd[91083]: monclient: tick
Nov 29 08:16:44 compute-0 ceph-osd[91083]: monclient: _check_auth_tickets
Nov 29 08:16:44 compute-0 ceph-osd[91083]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-11-29T08:16:13.469665+0000)
Nov 29 08:16:44 compute-0 ceph-osd[91083]: prioritycache tune_memory target: 4294967296 mapped: 193798144 unmapped: 37797888 heap: 231596032 old mem: 2845415832 new mem: 2845415832
Nov 29 08:16:44 compute-0 ceph-osd[91083]: monclient: tick
Nov 29 08:16:44 compute-0 ceph-osd[91083]: monclient: _check_auth_tickets
Nov 29 08:16:44 compute-0 ceph-osd[91083]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-11-29T08:16:14.469791+0000)
Nov 29 08:16:44 compute-0 ceph-osd[91083]: do_command 'log dump' '{prefix=log dump}'

● ceph-14ff1f30-5059-58f1-9a23-69871bb275a1@rgw.rgw.compute-0.qxekyl.service - Ceph rgw.rgw.compute-0.qxekyl for 14ff1f30-5059-58f1-9a23-69871bb275a1
     Loaded: loaded (/etc/systemd/system/ceph-14ff1f30-5059-58f1-9a23-69871bb275a1@.service; enabled; preset: disabled)
     Active: active (running) since Sat 2025-11-29 07:14:10 UTC; 1h 3min ago
   Main PID: 99619 (conmon)
         IO: 0B read, 209.5K written
      Tasks: 605 (limit: 48573)
     Memory: 98.8M (peak: 99.8M)
        CPU: 18.771s
     CGroup: /system.slice/system-ceph\x2d14ff1f30\x2d5059\x2d58f1\x2d9a23\x2d69871bb275a1.slice/ceph-14ff1f30-5059-58f1-9a23-69871bb275a1@rgw.rgw.compute-0.qxekyl.service
             ├─libpod-payload-4edd4e145deee38b7ccf448ee661159c19a1b05ba5a7fb95dd92a6fb13e29fbf
             │ ├─99621 /run/podman-init -- /usr/bin/radosgw -n client.rgw.rgw.compute-0.qxekyl -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ └─99623 /usr/bin/radosgw -n client.rgw.rgw.compute-0.qxekyl -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             └─runtime
               └─99619 /usr/bin/conmon --api-version 1 -c 4edd4e145deee38b7ccf448ee661159c19a1b05ba5a7fb95dd92a6fb13e29fbf -u 4edd4e145deee38b7ccf448ee661159c19a1b05ba5a7fb95dd92a6fb13e29fbf -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/4edd4e145deee38b7ccf448ee661159c19a1b05ba5a7fb95dd92a6fb13e29fbf/userdata -p /run/containers/storage/overlay-containers/4edd4e145deee38b7ccf448ee661159c19a1b05ba5a7fb95dd92a6fb13e29fbf/userdata/pidfile -n ceph-14ff1f30-5059-58f1-9a23-69871bb275a1-rgw-rgw-compute-0-qxekyl --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/4edd4e145deee38b7ccf448ee661159c19a1b05ba5a7fb95dd92a6fb13e29fbf --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/4edd4e145deee38b7ccf448ee661159c19a1b05ba5a7fb95dd92a6fb13e29fbf/userdata/oci-log --conmon-pidfile /run/ceph-14ff1f30-5059-58f1-9a23-69871bb275a1@rgw.rgw.compute-0.qxekyl.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 4edd4e145deee38b7ccf448ee661159c19a1b05ba5a7fb95dd92a6fb13e29fbf

Nov 29 07:14:10 compute-0 radosgw[99623]: framework conf key: endpoint, val: 192.168.122.100:8082
Nov 29 07:14:10 compute-0 radosgw[99623]: init_numa not setting numa affinity
Nov 29 07:14:29 compute-0 radosgw[99623]: LDAP not started since no server URIs were provided in the configuration.
Nov 29 07:14:29 compute-0 ceph-14ff1f30-5059-58f1-9a23-69871bb275a1-rgw-rgw-compute-0-qxekyl[99619]: 2025-11-29T07:14:29.034+0000 7f0fe4bbb940 -1 LDAP not started since no server URIs were provided in the configuration.
Nov 29 07:14:29 compute-0 radosgw[99623]: framework: beast
Nov 29 07:14:29 compute-0 radosgw[99623]: framework conf key: ssl_certificate, val: config://rgw/cert/$realm/$zone.crt
Nov 29 07:14:29 compute-0 radosgw[99623]: framework conf key: ssl_private_key, val: config://rgw/cert/$realm/$zone.key
Nov 29 07:14:29 compute-0 radosgw[99623]: starting handler: beast
Nov 29 07:14:29 compute-0 radosgw[99623]: set uid:gid to 167:167 (ceph:ceph)
Nov 29 07:14:29 compute-0 radosgw[99623]: mgrc service_daemon_register rgw.14261 metadata {arch=x86_64,ceph_release=reef,ceph_version=ceph version 18.2.7 (6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad) reef (stable),ceph_version_short=18.2.7,container_hostname=compute-0,container_image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0,cpu=AMD EPYC-Rome Processor,distro=centos,distro_description=CentOS Stream 9,distro_version=9,frontend_config#0=beast endpoint=192.168.122.100:8082,frontend_type#0=beast,hostname=compute-0,id=rgw.compute-0.qxekyl,kernel_description=#1 SMP PREEMPT_DYNAMIC Thu Nov 20 14:15:03 UTC 2025,kernel_version=5.14.0-642.el9.x86_64,mem_swap_kb=1048572,mem_total_kb=7864328,num_handles=1,os=Linux,pid=2,realm_id=,realm_name=,zone_id=7ba03397-1943-4fb1-bfd5-2752d9c78a7d,zone_name=default,zonegroup_id=529147a5-95c6-4fc0-8a5f-d5e8d4efa3ee,zonegroup_name=default}

● ceph-osd-losetup-0.service - Ceph OSD losetup
     Loaded: loaded (/etc/systemd/system/ceph-osd-losetup-0.service; enabled; preset: disabled)
     Active: active (exited) since Sat 2025-11-29 07:10:30 UTC; 1h 6min ago
   Main PID: 72494 (code=exited, status=0/SUCCESS)
        CPU: 19ms

Nov 29 07:10:30 compute-0 systemd[1]: Starting Ceph OSD losetup...
Nov 29 07:10:30 compute-0 bash[72495]: /dev/loop3: [64513]:4327939 (/var/lib/ceph-osd-0.img)
Nov 29 07:10:30 compute-0 systemd[1]: Finished Ceph OSD losetup.

● ceph-osd-losetup-1.service - Ceph OSD losetup
     Loaded: loaded (/etc/systemd/system/ceph-osd-losetup-1.service; enabled; preset: disabled)
     Active: active (exited) since Sat 2025-11-29 07:10:35 UTC; 1h 6min ago
   Main PID: 72863 (code=exited, status=0/SUCCESS)
        CPU: 26ms

Nov 29 07:10:35 compute-0 systemd[1]: Starting Ceph OSD losetup...
Nov 29 07:10:35 compute-0 bash[72864]: /dev/loop4: [64513]:4327981 (/var/lib/ceph-osd-1.img)
Nov 29 07:10:35 compute-0 systemd[1]: Finished Ceph OSD losetup.

● ceph-osd-losetup-2.service - Ceph OSD losetup
     Loaded: loaded (/etc/systemd/system/ceph-osd-losetup-2.service; enabled; preset: disabled)
     Active: active (exited) since Sat 2025-11-29 07:10:40 UTC; 1h 6min ago
   Main PID: 73234 (code=exited, status=0/SUCCESS)
        CPU: 21ms

Nov 29 07:10:40 compute-0 systemd[1]: Starting Ceph OSD losetup...
Nov 29 07:10:40 compute-0 bash[73235]: /dev/loop5: [64513]:4328581 (/var/lib/ceph-osd-2.img)
Nov 29 07:10:40 compute-0 systemd[1]: Finished Ceph OSD losetup.

● chronyd.service - NTP client/server
     Loaded: loaded (/usr/lib/systemd/system/chronyd.service; enabled; preset: enabled)
     Active: active (running) since Sat 2025-11-29 07:08:03 UTC; 1h 9min ago
       Docs: man:chronyd(8)
             man:chrony.conf(5)
   Main PID: 58530 (chronyd)
         IO: 0B read, 4.0K written
      Tasks: 1 (limit: 48573)
     Memory: 1008.0K (peak: 1.9M)
        CPU: 80ms
     CGroup: /system.slice/chronyd.service
             └─58530 /usr/sbin/chronyd -F 2

Nov 29 07:08:03 compute-0 systemd[1]: Starting NTP client/server...
Nov 29 07:08:03 compute-0 chronyd[58530]: chronyd version 4.8 starting (+CMDMON +REFCLOCK +RTC +PRIVDROP +SCFILTER +SIGND +NTS +SECHASH +IPV6 +DEBUG)
Nov 29 07:08:03 compute-0 chronyd[58530]: Frequency -26.814 +/- 0.204 ppm read from /var/lib/chrony/drift
Nov 29 07:08:03 compute-0 chronyd[58530]: Loaded seccomp filter (level 2)
Nov 29 07:08:03 compute-0 systemd[1]: Started NTP client/server.
Nov 29 07:10:13 compute-0 chronyd[58530]: Selected source 23.133.168.246 (pool.ntp.org)

● cloud-config.service - Cloud-init: Config Stage
     Loaded: loaded (/usr/lib/systemd/system/cloud-config.service; enabled; preset: disabled)
     Active: active (exited) since Sat 2025-11-29 06:29:54 UTC; 1h 47min ago
   Main PID: 1004 (code=exited, status=0/SUCCESS)
        CPU: 437ms

Nov 29 06:29:53 np0005539576.novalocal systemd[1]: Starting Cloud-init: Config Stage...
Nov 29 06:29:53 np0005539576.novalocal cloud-init[1109]: Cloud-init v. 24.4-7.el9 running 'modules:config' at Sat, 29 Nov 2025 06:29:53 +0000. Up 109.09 seconds.
Nov 29 06:29:54 np0005539576.novalocal systemd[1]: Finished Cloud-init: Config Stage.

● cloud-final.service - Cloud-init: Final Stage
     Loaded: loaded (/usr/lib/systemd/system/cloud-final.service; enabled; preset: disabled)
     Active: active (exited) since Sat 2025-11-29 06:29:54 UTC; 1h 47min ago
   Main PID: 1156 (code=exited, status=0/SUCCESS)
        CPU: 572ms

Nov 29 06:29:54 np0005539576.novalocal cloud-init[1303]: Cloud-init v. 24.4-7.el9 running 'modules:final' at Sat, 29 Nov 2025 06:29:54 +0000. Up 109.88 seconds.
Nov 29 06:29:54 np0005539576.novalocal cloud-init[1305]: #############################################################
Nov 29 06:29:54 np0005539576.novalocal cloud-init[1306]: -----BEGIN SSH HOST KEY FINGERPRINTS-----
Nov 29 06:29:54 np0005539576.novalocal cloud-init[1308]: 256 SHA256:FOEGy4Vcbz4IWVUQy3Bxd90RQ4JSvvRdFKnRGx7VPd8 root@np0005539576.novalocal (ECDSA)
Nov 29 06:29:54 np0005539576.novalocal cloud-init[1313]: 256 SHA256:40tvmkr1UiG4MclZfkgRbALvX3NXxk8iUML07FqpSWY root@np0005539576.novalocal (ED25519)
Nov 29 06:29:54 np0005539576.novalocal cloud-init[1321]: 3072 SHA256:r76hopve4egyyGoPv+kpaE1fXgF2pJbLMOHP32RvQ90 root@np0005539576.novalocal (RSA)
Nov 29 06:29:54 np0005539576.novalocal cloud-init[1322]: -----END SSH HOST KEY FINGERPRINTS-----
Nov 29 06:29:54 np0005539576.novalocal cloud-init[1324]: #############################################################
Nov 29 06:29:54 np0005539576.novalocal cloud-init[1303]: Cloud-init v. 24.4-7.el9 finished at Sat, 29 Nov 2025 06:29:54 +0000. Datasource DataSourceConfigDrive [net,ver=2][source=/dev/sr0].  Up 110.08 seconds
Nov 29 06:29:54 np0005539576.novalocal systemd[1]: Finished Cloud-init: Final Stage.

● cloud-init-local.service - Cloud-init: Local Stage (pre-network)
     Loaded: loaded (/usr/lib/systemd/system/cloud-init-local.service; enabled; preset: disabled)
     Active: active (exited) since Sat 2025-11-29 06:29:45 UTC; 1h 47min ago
   Main PID: 783 (code=exited, status=0/SUCCESS)
        CPU: 749ms

Nov 29 06:29:44 localhost systemd[1]: Starting Cloud-init: Local Stage (pre-network)...
Nov 29 06:29:44 localhost cloud-init[842]: Cloud-init v. 24.4-7.el9 running 'init-local' at Sat, 29 Nov 2025 06:29:44 +0000. Up 99.96 seconds.
Nov 29 06:29:45 np0005539576.novalocal systemd[1]: Finished Cloud-init: Local Stage (pre-network).

● cloud-init.service - Cloud-init: Network Stage
     Loaded: loaded (/usr/lib/systemd/system/cloud-init.service; enabled; preset: disabled)
     Active: active (exited) since Sat 2025-11-29 06:29:53 UTC; 1h 47min ago
   Main PID: 901 (code=exited, status=0/SUCCESS)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48573)
     Memory: 2.2M (peak: 40.7M)
        CPU: 2.204s
     CGroup: /system.slice/cloud-init.service

Nov 29 06:29:53 np0005539576.novalocal cloud-init[923]: |     o.Bo..=   . |
Nov 29 06:29:53 np0005539576.novalocal cloud-init[923]: |      Oo+ o + . =|
Nov 29 06:29:53 np0005539576.novalocal cloud-init[923]: |     . + o o o =.|
Nov 29 06:29:53 np0005539576.novalocal cloud-init[923]: |      o S E = . .|
Nov 29 06:29:53 np0005539576.novalocal cloud-init[923]: |       + O B .   |
Nov 29 06:29:53 np0005539576.novalocal cloud-init[923]: |      . = =      |
Nov 29 06:29:53 np0005539576.novalocal cloud-init[923]: |     . . =.      |
Nov 29 06:29:53 np0005539576.novalocal cloud-init[923]: |      ..+o.      |
Nov 29 06:29:53 np0005539576.novalocal cloud-init[923]: +----[SHA256]-----+
Nov 29 06:29:53 np0005539576.novalocal systemd[1]: Finished Cloud-init: Network Stage.

○ cpupower.service - Configure CPU power related settings
     Loaded: loaded (/usr/lib/systemd/system/cpupower.service; disabled; preset: disabled)
     Active: inactive (dead)

● crond.service - Command Scheduler
     Loaded: loaUnit display-manager.service could not be found.
ded (/usr/lib/systemd/system/crond.service; enabled; preset: enabled)
     Active: active (running) since Sat 2025-11-29 06:29:53 UTC; 1h 47min ago
   Main PID: 1011 (crond)
         IO: 176.0K read, 12.0K written
      Tasks: 1 (limit: 48573)
     Memory: 1.2M (peak: 4.9M)
        CPU: 224ms
     CGroup: /system.slice/crond.service
             └─1011 /usr/sbin/crond -n

Nov 29 07:18:01 compute-0 anacron[30860]: Job `cron.daily' started
Nov 29 07:18:01 compute-0 anacron[30860]: Job `cron.daily' terminated
Nov 29 07:38:01 compute-0 anacron[30860]: Job `cron.weekly' started
Nov 29 07:38:02 compute-0 anacron[30860]: Job `cron.weekly' terminated
Nov 29 07:58:01 compute-0 anacron[30860]: Job `cron.monthly' started
Nov 29 07:58:01 compute-0 anacron[30860]: Job `cron.monthly' terminated
Nov 29 07:58:01 compute-0 anacron[30860]: Normal exit (3 jobs run)
Nov 29 08:01:01 compute-0 CROND[287529]: (root) CMD (run-parts /etc/cron.hourly)
Nov 29 08:01:01 compute-0 run-parts[287538]: (/etc/cron.hourly) finished 0anacron
Nov 29 08:01:01 compute-0 CROND[287528]: (root) CMDEND (run-parts /etc/cron.hourly)

● dbus-broker.service - D-Bus System Message Bus
     Loaded: loaded (/usr/lib/systemd/system/dbus-broker.service; enabled; preset: enabled)
     Active: active (running) since Sat 2025-11-29 06:29:44 UTC; 1h 47min ago
TriggeredBy: ● dbus.socket
       Docs: man:dbus-broker-launch(1)
   Main PID: 774 (dbus-broker-lau)
         IO: 0B read, 0B written
      Tasks: 2 (limit: 48573)
     Memory: 3.0M (peak: 3.7M)
        CPU: 7.659s
     CGroup: /system.slice/dbus-broker.service
             ├─774 /usr/bin/dbus-broker-launch --scope system --audit
             └─781 dbus-broker --log 4 --controller 9 --machine-id 1f988c78c563e12389ab342aced42dbb --max-bytes 536870912 --max-fds 4096 --max-matches 131072 --audit

Nov 29 07:05:03 compute-0 dbus-broker-launch[774]: Noticed file-system modification, trigger reload.
Nov 29 07:05:03 compute-0 dbus-broker-launch[774]: Noticed file-system modification, trigger reload.
Nov 29 07:05:59 compute-0 dbus-broker-launch[781]: avc:  op=load_policy lsm=selinux seqno=9 res=1
Nov 29 07:06:15 compute-0 dbus-broker-launch[781]: avc:  op=load_policy lsm=selinux seqno=10 res=1
Nov 29 07:23:11 compute-0 dbus-broker-launch[781]: avc:  op=load_policy lsm=selinux seqno=11 res=1
Nov 29 07:28:00 compute-0 dbus-broker-launch[781]: avc:  op=load_policy lsm=selinux seqno=13 res=1
Nov 29 07:29:16 compute-0 dbus-broker-launch[781]: avc:  op=load_policy lsm=selinux seqno=14 res=1
Nov 29 07:29:32 compute-0 dbus-broker-launch[774]: Noticed file-system modification, trigger reload.
Nov 29 07:29:32 compute-0 dbus-broker-launch[774]: Noticed file-system modification, trigger reload.
Nov 29 07:31:36 compute-0 dbus-broker-launch[781]: avc:  op=load_policy lsm=selinux seqno=15 res=1

○ dm-event.service - Device-mapper event daemon
     Loaded: loaded (/usr/lib/systemd/system/dm-event.service; static)
     Active: inactive (dead)
TriggeredBy: ● dm-event.socket
       Docs: man:dmeventd(8)

○ dnf-makecache.service - dnf makecache
     Loaded: loaded (/usr/lib/systemd/system/dnf-makecache.service; static)
     Active: inactive (dead) since Sat 2025-11-29 07:02:47 UTC; 1h 14min ago
TriggeredBy: ● dnf-makecache.timer
   Main PID: 33935 (code=exited, status=0/SUCCESS)
        CPU: 1.996s

Nov 29 07:02:45 compute-0 dnf[33935]: NFV SIG OpenvSwitch                             103 kB/s | 3.0 kB     00:00
Nov 29 07:02:45 compute-0 dnf[33935]: repo-setup-centos-appstream                     157 kB/s | 4.4 kB     00:00
Nov 29 07:02:46 compute-0 dnf[33935]: repo-setup-centos-baseos                         53 kB/s | 3.9 kB     00:00
Nov 29 07:02:46 compute-0 dnf[33935]: repo-setup-centos-highavailability              184 kB/s | 3.9 kB     00:00
Nov 29 07:02:46 compute-0 dnf[33935]: repo-setup-centos-powertools                    185 kB/s | 4.3 kB     00:00
Nov 29 07:02:46 compute-0 dnf[33935]: Extra Packages for Enterprise Linux 9 - x86_64   90 kB/s |  33 kB     00:00
Nov 29 07:02:47 compute-0 dnf[33935]: Metadata cache created.
Nov 29 07:02:47 compute-0 systemd[1]: dnf-makecache.service: Deactivated successfully.
Nov 29 07:02:47 compute-0 systemd[1]: Finished dnf makecache.
Nov 29 07:02:47 compute-0 systemd[1]: dnf-makecache.service: Consumed 1.996s CPU time.

○ dracut-cmdline.service - dracut cmdline hook
     Loaded: loaded (/usr/lib/systemd/system/dracut-cmdline.service; static)
     Active: inactive (dead) since Sat 2025-11-29 06:29:41 UTC; 1h 47min ago
   Duration: 1min 33.833s
       Docs: man:dracut-cmdline.service(8)
             man:dracut.bootup(7)
   Main PID: 324 (code=exited, status=0/SUCCESS)
        CPU: 141ms

Nov 29 06:28:07 localhost systemd[1]: Starting dracut cmdline hook...
Nov 29 06:28:07 localhost dracut-cmdline[324]: dracut-9 dracut-057-102.git20250818.el9
Nov 29 06:28:07 localhost dracut-cmdline[324]: Using kernel command line parameters:    BOOT_IMAGE=(hd0,msdos1)/boot/vmlinuz-5.14.0-642.el9.x86_64 root=UUID=b277050f-8ace-464d-abb6-4c46d4c45253 ro console=ttyS0,115200n8 no_timer_check net.ifnames=0 crashkernel=1G-2G:192M,2G-64G:256M,64G-:512M
Nov 29 06:28:07 localhost systemd[1]: Finished dracut cmdline hook.
Nov 29 06:29:41 localhost systemd[1]: dracut-cmdline.service: Deactivated successfully.
Nov 29 06:29:41 localhost systemd[1]: Stopped dracut cmdline hook.

○ dracut-initqueue.service - dracut initqueue hook
     Loaded: loaded (/usr/lib/systemd/system/dracut-initqueue.service; static)
     Active: inactive (dead) since Sat 2025-11-29 06:29:41 UTC; 1h 47min ago
   Duration: 1min 32.688s
       Docs: man:dracut-initqueue.service(8)
             man:dracut.bootup(7)
   Main PID: 492 (code=exited, status=0/SUCCESS)
        CPU: 50ms

Nov 29 06:28:08 localhost systemd[1]: Starting dracut initqueue hook...
Nov 29 06:28:08 localhost systemd[1]: Finished dracut initqueue hook.
Nov 29 06:29:41 localhost systemd[1]: dracut-initqueue.service: Deactivated successfully.
Nov 29 06:29:41 localhost systemd[1]: Stopped dracut initqueue hook.

○ dracut-mount.service - dracut mount hook
     Loaded: loaded (/usr/lib/systemd/system/dracut-mount.service; static)
     Active: inactive (dead) since Sat 2025-11-29 06:29:41 UTC; 1h 47min ago
   Duration: 203ms
       Docs: man:dracut-mount.service(8)
             man:dracut.bootup(7)
   Main PID: 573 (code=exited, status=0/SUCCESS)
        CPU: 21ms

Nov 29 06:29:41 localhost systemd[1]: Starting dracut mount hook...
Nov 29 06:29:41 localhost systemd[1]: Finished dracut mount hook.
Nov 29 06:29:41 localhost systemd[1]: dracut-mount.service: Deactivated successfully.
Nov 29 06:29:41 localhost systemd[1]: Stopped dracut mount hook.

○ dracut-pre-mount.service - dracut pre-mount hook
     Loaded: loaded (/usr/lib/systemd/system/dracut-pre-mount.service; static)
     Active: inactive (dead) since Sat 2025-11-29 06:29:41 UTC; 1h 47min ago
   Duration: 1min 32.640s
       Docs: man:dracut-pre-mount.service(8)
             man:dracut.bootup(7)
   Main PID: 549 (code=exited, status=0/SUCCESS)
        CPU: 13ms

Nov 29 06:28:08 localhost systemd[1]: Starting dracut pre-mount hook...
Nov 29 06:28:08 localhost systemd[1]: Finished dracut pre-mount hook.
Nov 29 06:29:41 localhost systemd[1]: dracut-pre-mount.service: Deactivated successfully.
Nov 29 06:29:41 localhost systemd[1]: Stopped dracut pre-mount hook.

○ dracut-pre-pivot.service - dracut pre-pivot and cleanup hook
     Loaded: loaded (/usr/lib/systemd/system/dracut-pre-pivot.service; static)
     Active: inactive (dead) since Sat 2025-11-29 06:29:41 UTC; 1h 47min ago
   Duration: 39ms
       Docs: man:dracut-pre-pivot.service(8)
             man:dracut.bootup(7)
   Main PID: 579 (code=exited, status=0/SUCCESS)
        CPU: 123ms

Nov 29 06:29:41 localhost systemd[1]: Starting dracut pre-pivot and cleanup hook...
Nov 29 06:29:41 localhost systemd[1]: Finished dracut pre-pivot and cleanup hook.
Nov 29 06:29:41 localhost systemd[1]: dracut-pre-pivot.service: Deactivated successfully.
Nov 29 06:29:41 localhost systemd[1]: Stopped dracut pre-pivot and cleanup hook.

○ dracut-pre-trigger.service - dracut pre-trigger hook
     Loaded: loaded (/usr/lib/systemd/system/dracut-pre-trigger.service; static)
     Active: inactive (dead) since Sat 2025-11-29 06:29:41 UTC; 1h 47min ago
   Duration: 1min 33.359s
       Docs: man:dracut-pre-trigger.service(8)
             man:dracut.bootup(7)
   Main PID: 464 (code=exited, status=0/SUCCESS)
        CPU: 23ms

Nov 29 06:28:08 localhost systemd[1]: Starting dracut pre-trigger hook...
Nov 29 06:28:08 localhost systemd[1]: Finished dracut pre-trigger hook.
Nov 29 06:29:41 localhost systemd[1]: dracut-pre-trigger.service: Deactivated successfully.
Nov 29 06:29:41 localhost systemd[1]: Stopped dracut pre-trigger hook.

○ dracut-pre-udev.service - dracut pre-udev hook
     Loaded: loaded (/usr/lib/systemd/system/dracut-pre-udev.service; static)
     Active: inactive (dead) since Sat 2025-11-29 06:29:41 UTC; 1h 47min ago
   Duration: 1min 33.506s
       Docs: man:dracut-pre-udev.service(8)
             man:dracut.bootup(7)
   Main PID: 413 (code=exited, status=0/SUCCESS)
        CPU: 316ms

Nov 29 06:28:07 localhost systemd[1]: Starting dracut pre-udev hook...
Nov 29 06:28:07 localhost rpc.statd[441]: Version 2.5.4 starting
Nov 29 06:28:07 localhost rpc.statd[441]: Initializing NSM state
Nov 29 06:28:07 localhost rpc.idmapd[446]: Setting log level to 0
Nov 29 06:28:07 localhost systemd[1]: Finished dracut pre-udev hook.
Nov 29 06:29:41 localhost rpc.idmapd[446]: exiting on signal 15
Nov 29 06:29:41 localhost systemd[1]: dracut-pre-udev.service: Deactivated successfully.
Nov 29 06:29:41 localhost systemd[1]: Stopped dracut pre-udev hook.

○ dracut-shutdown-onfailure.service - Service executing upon dracut-shutdown failure to perform cleanup
     Loaded: loaded (/usr/lib/systemd/system/dracut-shutdown-onfailure.service; static)
     Active: inactive (dead)
       Docs: man:dracut-shutdown.service(8)

● dracut-shutdown.service - Restore /run/initramfs on shutdown
     Loaded: loaded (/usr/lib/systemd/system/dracut-shutdown.service; static)
     Active: active (exited) since Sat 2025-11-29 06:29:44 UTC; 1h 47min ago
       Docs: man:dracut-shutdown.service(8)
   Main PID: 788 (code=exited, status=0/SUCCESS)
        CPU: 3ms

Nov 29 06:29:44 localhost systemd[1]: Starting Restore /run/initramfs on shutdown...
Nov 29 06:29:44 localhost systemd[1]: Finished Restore /run/initramfs on shutdown.

● edpm-container-shutdown.service - EDPM Container Shutdown
     Loaded: loaded (/etc/systemd/system/edpm-container-shutdown.service; enabled; preset: enabled)
     Active: active (exited) since Sat 2025-11-29 07:08:44 UTC; 1h 8min ago
       Docs: https://github.com/openstack-k8s-operators/docs
   Main PID: 61526 (code=exited, status=0/SUCCESS)
        CPU: 3ms

Nov 29 07:08:44 compute-0 systemd[1]: Starting EDPM Container Shutdown...
Nov 29 07:08:44 compute-0 systemd[1]: Finished EDPM Container Shutdown.

○ edpm_libvirt_guests.service - Suspend libvirt Guests in edpm
     Loaded: loaded (/etc/systemd/system/edpm_libvirt_guests.service; enabled; preset: disabled)
     Active: inactive (dead)
       Docs: man:libvirtd(8)
             https://libvirt.org

● edpm_multipathd.service - multipathd container
     Loaded: loaded (/etc/systemd/system/edpm_multipathd.service; enabled; preset: disabled)
     Active: active (running) since Sat 2025-11-29 07:35:57 UTC; 41min ago
   Main PID: 238041 (conmon)
         IO: 0B read, 102.0K written
      Tasks: 1 (limit: 48573)
     Memory: 668.0K (peak: 20.2M)
        CPU: 112ms
     CGroup: /system.slice/edpm_multipathd.service
             └─238041 /usr/bin/conmon --api-version 1 -c 53b3e3edbc13651958250c6312d45e987c0dee5fbb3effb8392660dfc1eaa82f -u 53b3e3edbc13651958250c6312d45e987c0dee5fbb3effb8392660dfc1eaa82f -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/53b3e3edbc13651958250c6312d45e987c0dee5fbb3effb8392660dfc1eaa82f/userdata -p /run/containers/storage/overlay-containers/53b3e3edbc13651958250c6312d45e987c0dee5fbb3effb8392660dfc1eaa82f/userdata/pidfile -n multipathd --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/53b3e3edbc13651958250c6312d45e987c0dee5fbb3effb8392660dfc1eaa82f --full-attach -s -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/53b3e3edbc13651958250c6312d45e987c0dee5fbb3effb8392660dfc1eaa82f/userdata/oci-log --conmon-pidfile /run/multipathd.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg 53b3e3edbc13651958250c6312d45e987c0dee5fbb3effb8392660dfc1eaa82f

Nov 29 07:35:57 compute-0 multipathd[238041]: + sudo kolla_copy_cacerts
Nov 29 07:35:57 compute-0 multipathd[238041]: + [[ ! -n '' ]]
Nov 29 07:35:57 compute-0 multipathd[238041]: + . kolla_extend_start
Nov 29 07:35:57 compute-0 multipathd[238041]: + echo 'Running command: '\''/usr/sbin/multipathd -d'\'''
Nov 29 07:35:57 compute-0 multipathd[238041]: Running command: '/usr/sbin/multipathd -d'
Nov 29 07:35:57 compute-0 multipathd[238041]: + umask 0022
Nov 29 07:35:57 compute-0 multipathd[238041]: + exec /usr/sbin/multipathd -d
Nov 29 07:35:57 compute-0 multipathd[238041]: 4073.142693 | --------start up--------
Nov 29 07:35:57 compute-0 multipathd[238041]: 4073.142706 | read /etc/multipath.conf
Nov 29 07:35:57 compute-0 multipathd[238041]: 4073.147690 | path checkers start up

● edpm_nova_compute.service - nova_compute container
     Loaded: loaded (/etc/systemd/system/edpm_nova_compute.service; enabled; preset: disabled)
     Active: active (running) since Sat 2025-11-29 07:38:37 UTC; 38min ago
    Process: 256702 ExecStart=/usr/bin/podman start nova_compute (code=exited, status=0/SUCCESS)
   Main PID: 256729 (conmon)
         IO: 0B read, 100.0K written
      Tasks: 1 (limit: 48573)
     Memory: 688.0K (peak: 21.0M)
        CPU: 1.439s
     CGroup: /system.slice/edpm_nova_compute.service
             └─256729 /usr/bin/conmon --api-version 1 -c c15969223c00c2b30a6778fcaf267330fb4765e47f851bc1febe18461e097809 -u c15969223c00c2b30a6778fcaf267330fb4765e47f851bc1febe18461e097809 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/c15969223c00c2b30a6778fcaf267330fb4765e47f851bc1febe18461e097809/userdata -p /run/containers/storage/overlay-containers/c15969223c00c2b30a6778fcaf267330fb4765e47f851bc1febe18461e097809/userdata/pidfile -n nova_compute --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/c15969223c00c2b30a6778fcaf267330fb4765e47f851bc1febe18461e097809 --full-attach -s -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/c15969223c00c2b30a6778fcaf267330fb4765e47f851bc1febe18461e097809/userdata/oci-log --conmon-pidfile /run/nova_compute.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg c15969223c00c2b30a6778fcaf267330fb4765e47f851bc1febe18461e097809

Nov 29 08:17:03 compute-0 nova_compute[256729]: 2025-11-29 08:17:03.543 256736 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 25 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263[00m
Nov 29 08:17:04 compute-0 nova_compute[256729]: 2025-11-29 08:17:04.824 256736 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 25 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263[00m
Nov 29 08:17:08 compute-0 nova_compute[256729]: 2025-11-29 08:17:08.544 256736 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 25 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263[00m
Nov 29 08:17:09 compute-0 nova_compute[256729]: 2025-11-29 08:17:09.868 256736 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 25 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263[00m
Nov 29 08:17:10 compute-0 nova_compute[256729]: 2025-11-29 08:17:10.143 256736 DEBUG oslo_service.periodic_task [None req-27de958c-c2ab-4243-8865-e92c1e69ab3e - - - - - -] Running periodic task ComputeManager._sync_scheduler_instance_info run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210[00m
Nov 29 08:17:11 compute-0 nova_compute[256729]: 2025-11-29 08:17:11.147 256736 DEBUG oslo_service.periodic_task [None req-27de958c-c2ab-4243-8865-e92c1e69ab3e - - - - - -] Running periodic task ComputeManager._poll_rescued_instances run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210[00m
Nov 29 08:17:13 compute-0 nova_compute[256729]: 2025-11-29 08:17:13.546 256736 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 25 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263[00m
Nov 29 08:17:14 compute-0 nova_compute[256729]: 2025-11-29 08:17:14.870 256736 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 25 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263[00m
Nov 29 08:17:15 compute-0 nova_compute[256729]: 2025-11-29 08:17:15.148 256736 DEBUG oslo_service.periodic_task [None req-27de958c-c2ab-4243-8865-e92c1e69ab3e - - - - - -] Running periodic task ComputeManager._poll_volume_usage run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210[00m
Nov 29 08:17:16 compute-0 nova_compute[256729]: 2025-11-29 08:17:16.149 256736 DEBUG oslo_service.periodic_task [None req-27de958c-c2ab-4243-8865-e92c1e69ab3e - - - - - -] Running periodic task ComputeManager._instance_usage_audit run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210[00m

● edpm_ovn_controller.service - ovn_controller container
     Loaded: loaded (/etc/systemd/system/edpm_ovn_controller.service; enabled; preset: disabled)
     Active: active (running) since Sat 2025-11-29 07:24:14 UTC; 53min ago
   Main PID: 153383 (conmon)
         IO: 0B read, 112.0K written
      Tasks: 1 (limit: 48573)
     Memory: 696.0K (peak: 18.3M)
        CPU: 279ms
     CGroup: /system.slice/edpm_ovn_controller.service
             └─153383 /usr/bin/conmon --api-version 1 -c 23d05e03be4cb9084b6afbc3edf0d56047ca0f3f1aaf62172739e6d6ce3f7fa8 -u 23d05e03be4cb9084b6afbc3edf0d56047ca0f3f1aaf62172739e6d6ce3f7fa8 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/23d05e03be4cb9084b6afbc3edf0d56047ca0f3f1aaf62172739e6d6ce3f7fa8/userdata -p /run/containers/storage/overlay-containers/23d05e03be4cb9084b6afbc3edf0d56047ca0f3f1aaf62172739e6d6ce3f7fa8/userdata/pidfile -n ovn_controller --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/23d05e03be4cb9084b6afbc3edf0d56047ca0f3f1aaf62172739e6d6ce3f7fa8 --full-attach -s -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/23d05e03be4cb9084b6afbc3edf0d56047ca0f3f1aaf62172739e6d6ce3f7fa8/userdata/oci-log --conmon-pidfile /run/ovn_controller.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg 23d05e03be4cb9084b6afbc3edf0d56047ca0f3f1aaf62172739e6d6ce3f7fa8

Nov 29 08:11:36 compute-0 ovn_controller[153383]: 2025-11-29T08:11:36Z|00076|pinctrl(ovn_pinctrl0)|WARN|DHCPREQUEST requested IP 10.100.0.10 does not match offer 10.100.0.14
Nov 29 08:11:36 compute-0 ovn_controller[153383]: 2025-11-29T08:11:36Z|00077|pinctrl(ovn_pinctrl0)|INFO|DHCPNAK fa:16:3e:d2:8f:05 10.100.0.14
Nov 29 08:11:36 compute-0 ovn_controller[153383]: 2025-11-29T08:11:36Z|00078|pinctrl(ovn_pinctrl0)|INFO|DHCPOFFER fa:16:3e:d2:8f:05 10.100.0.14
Nov 29 08:11:36 compute-0 ovn_controller[153383]: 2025-11-29T08:11:36Z|00079|pinctrl(ovn_pinctrl0)|INFO|DHCPACK fa:16:3e:d2:8f:05 10.100.0.14
Nov 29 08:11:51 compute-0 ovn_controller[153383]: 2025-11-29T08:11:51Z|00296|memory_trim|INFO|Detected inactivity (last active 30005 ms ago): trimming memory
Nov 29 08:11:54 compute-0 ovn_controller[153383]: 2025-11-29T08:11:54Z|00297|binding|INFO|Releasing lport 503caeb9-24dd-41d1-bcb9-da6866a4b3cd from this chassis (sb_readonly=0)
Nov 29 08:11:54 compute-0 ovn_controller[153383]: 2025-11-29T08:11:54Z|00298|binding|INFO|Setting lport 503caeb9-24dd-41d1-bcb9-da6866a4b3cd down in Southbound
Nov 29 08:11:54 compute-0 ovn_controller[153383]: 2025-11-29T08:11:54Z|00299|binding|INFO|Removing iface tap503caeb9-24 ovn-installed in OVS
Nov 29 08:12:43 compute-0 ovn_controller[153383]: 2025-11-29T08:12:43Z|00300|memory_trim|INFO|Detected inactivity (last active 30004 ms ago): trimming memory
Nov 29 08:15:07 compute-0 ovn_controller[153383]: 2025-11-29T08:15:07Z|00301|memory_trim|INFO|Detected inactivity (last active 30013 ms ago): trimming memory

● edpm_ovn_metadata_agent.service - ovn_metadata_agent container
     Loaded: loaded (/etc/systemd/system/edpm_ovn_metadata_agent.service; enabled; preset: disabled)
     Active: active (running) since Sat 2025-11-29 07:25:58 UTC; 51min ago
   Main PID: 163632 (conmon)
         IO: 0B read, 138.0K written
      Tasks: 1 (limit: 48573)
     Memory: 732.0K (peak: 19.5M)
        CPU: 466ms
     CGroup: /system.slice/edpm_ovn_metadata_agent.service
             └─163632 /usr/bin/conmon --api-version 1 -c 8d4ccc3041d68cb3d47809db0bb1a7e168a735eaa6605edbf2567447905aa50e -u 8d4ccc3041d68cb3d47809db0bb1a7e168a735eaa6605edbf2567447905aa50e -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/8d4ccc3041d68cb3d47809db0bb1a7e168a735eaa6605edbf2567447905aa50e/userdata -p /run/containers/storage/overlay-containers/8d4ccc3041d68cb3d47809db0bb1a7e168a735eaa6605edbf2567447905aa50e/userdata/pidfile -n ovn_metadata_agent --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/8d4ccc3041d68cb3d47809db0bb1a7e168a735eaa6605edbf2567447905aa50e --full-attach -s -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/8d4ccc3041d68cb3d47809db0bb1a7e168a735eaa6605edbf2567447905aa50e/userdata/oci-log --conmon-pidfile /run/ovn_metadata_agent.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg 8d4ccc3041d68cb3d47809db0bb1a7e168a735eaa6605edbf2567447905aa50e

Nov 29 08:14:43 compute-0 ovn_metadata_agent[163632]: 2025-11-29 08:14:43.969 163655 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Running txn n=1 command(idx=0): DbSetCommand(_result=None, table=Chassis_Private, record=df234f2c-4343-4c91-861d-13d184c56aa0, col_values=(('external_ids', {'neutron:ovn-metadata-sb-cfg': '29'}),), if_exists=True) do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:89[00m
Nov 29 08:14:59 compute-0 ovn_metadata_agent[163632]: 2025-11-29 08:14:59.793 163655 DEBUG oslo_concurrency.lockutils [-] Acquiring lock "_check_child_processes" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404[00m
Nov 29 08:14:59 compute-0 ovn_metadata_agent[163632]: 2025-11-29 08:14:59.794 163655 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" acquired by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409[00m
Nov 29 08:14:59 compute-0 ovn_metadata_agent[163632]: 2025-11-29 08:14:59.794 163655 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" "released" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: held 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423[00m
Nov 29 08:15:59 compute-0 ovn_metadata_agent[163632]: 2025-11-29 08:15:59.794 163655 DEBUG oslo_concurrency.lockutils [-] Acquiring lock "_check_child_processes" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404[00m
Nov 29 08:15:59 compute-0 ovn_metadata_agent[163632]: 2025-11-29 08:15:59.795 163655 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" acquired by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409[00m
Nov 29 08:15:59 compute-0 ovn_metadata_agent[163632]: 2025-11-29 08:15:59.795 163655 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" "released" by "neutron.agent.linux.exteUnit hv_kvp_daemon.service could not be found.
rnal_process.ProcessMonitor._check_child_processes" :: held 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423[00m
Nov 29 08:16:59 compute-0 ovn_metadata_agent[163632]: 2025-11-29 08:16:59.795 163655 DEBUG oslo_concurrency.lockutils [-] Acquiring lock "_check_child_processes" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404[00m
Nov 29 08:16:59 compute-0 ovn_metadata_agent[163632]: 2025-11-29 08:16:59.797 163655 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" acquired by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: waited 0.002s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409[00m
Nov 29 08:16:59 compute-0 ovn_metadata_agent[163632]: 2025-11-29 08:16:59.797 163655 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" "released" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: held 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423[00m

○ emergency.service - Emergency Shell
     Loaded: loaded (/usr/lib/systemd/system/emergency.service; static)
     Active: inactive (dead)
       Docs: man:sulogin(8)

● getty@tty1.service - Getty on tty1
     Loaded: loaded (/usr/lib/systemd/system/getty@.service; enabled; preset: enabled)
     Active: active (running) since Sat 2025-11-29 06:29:53 UTC; 1h 47min ago
       Docs: man:agetty(8)
             man:systemd-getty-generator(8)
             http://0pointer.de/blog/projects/serial-console.html
   Main PID: 1013 (agetty)
         IO: 0B read, 0B written
      Tasks: 1 (limit: 48573)
     Memory: 276.0K (peak: 756.0K)
        CPU: 11ms
     CGroup: /system.slice/system-getty.slice/getty@tty1.service
             └─1013 /sbin/agetty -o "-p -- \\u" --noclear - linux

Nov 29 06:29:53 np0005539576.novalocal systemd[1]: Started Getty on tty1.

● gssproxy.service - GSSAPI Proxy Daemon
     Loaded: loaded (/usr/lib/systemd/system/gssproxy.service; disabled; preset: disabled)
     Active: active (running) since Sat 2025-11-29 06:29:45 UTC; 1h 47min ago
   Main PID: 875 (gssproxy)
         IO: 0B read, 0B written
      Tasks: 6 (limit: 48573)
     Memory: 1.8M (peak: 3.1M)
        CPU: 19ms
     CGroup: /system.slice/gssproxy.service
             └─875 /usr/sbin/gssproxy -D

Nov 29 06:29:45 np0005539576.novalocal systemd[1]: Starting GSSAPI Proxy Daemon...
Nov 29 06:29:45 np0005539576.novalocal systemd[1]: Started GSSAPI Proxy Daemon.

○ import-state.service - Import network configuration from initramfs
     Loaded: loaded (/usr/lib/systemd/system/import-state.service; enabled; preset: enabled)
     Active: inactive (dead)

○ initrd-cleanup.service - Cleaning Up and Shutting Down Daemons
     Loaded: loaded (/usr/lib/systemd/system/initrd-cleanup.service; static)
     Active: inactive (dead) since Sat 2025-11-29 06:29:41 UTC; 1h 47min ago
   Main PID: 621 (code=exited, status=0/SUCCESS)
        CPU: 18ms

Nov 29 06:29:41 localhost systemd[1]: Starting Cleaning Up and Shutting Down Daemons...
Nov 29 06:29:41 localhost systemd[1]: initrd-cleanup.service: Deactivated successfully.
Nov 29 06:29:41 localhost systemd[1]: Finished Cleaning Up and Shutting Down Daemons.

○ initrd-parse-etc.service - Mountpoints Configured in the Real Root
     Loaded: loaded (/usr/lib/systemd/system/initrd-parse-etc.service; static)
     Active: inactive (dead) since Sat 2025-11-29 06:29:41 UTC; 1h 47min ago
   Main PID: 571 (code=exited, status=0/SUCCESS)
        CPU: 21ms

Nov 29 06:29:41 localhost systemd[1]: Starting Mountpoints Configured in the Real Root...
Nov 29 06:29:41 localhost systemd[1]: initrd-parse-etc.service: Deactivated successfully.
Nov 29 06:29:41 localhost systemd[1]: Finished Mountpoints Configured in the Real Root.

○ initrd-switch-root.service - Switch Root
     Loaded: loaded (/usr/lib/systemd/system/initrd-switch-root.service; static)
     Active: inactive (dead) since Sat 2025-11-29 06:29:42 UTC; 1h 47min ago
   Main PID: 625 (code=exited, status=0/SUCCESS)
        CPU: 7ms

Nov 29 06:29:41 localhost systemd[1]: Starting Switch Root...

○ initrd-udevadm-cleanup-db.service - Cleanup udev Database
     Loaded: loaded (/usr/lib/systemd/system/initrd-udevadm-cleanup-db.service; static)
     Active: inactive (dead) since Sat 2025-11-29 06:29:41 UTC; 1h 47min ago
   Main PID: 624 (code=exited, status=0/SUCCESS)
        CPU: 17ms

Nov 29 06:29:41 localhost systemd[1]: Starting Cleanup udev Database...
Nov 29 06:29:41 localhost systemd[1]: initrd-udevadm-cleanup-db.service: Deactivated successfully.
Nov 29 06:29:41 localhost systemd[1]: Finished Cleanup udev Database.

○ ip6tables.service - IPv6 firewall with ip6tables
     Loaded: loaded (/usr/lib/systemd/system/ip6tables.service; disabled; preset: disabled)
     Active: inactive (dead)

○ iptables.service - IPv4 firewall with iptables
     Loaded: loaded (/usr/lib/systemd/system/iptables.service; disabled; preset: disabled)
     Active: inactive (dead) since Sat 2025-11-29 07:08:53 UTC; 1h 8min ago
   Duration: 39min 8.961s
   Main PID: 797 (code=exited, status=0/SUCCESS)
        CPU: 97ms

Nov 29 06:29:44 localhost systemd[1]: Starting IPv4 firewall with iptables...
Nov 29 06:29:44 localhost iptables.init[797]: iptables: Applying firewall rules: [  OK  ]
Nov 29 06:29:44 localhost systemd[1]: Finished IPv4 firewall with iptables.
Nov 29 07:08:53 compute-0 systemd[1]: Stopping IPv4 firewall with iptables...
Nov 29 07:08:53 compute-0 iptables.init[62778]: iptables: Setting chains to policy ACCEPT: raw mangle filter nat [  OK  ]
Nov 29 07:08:53 compute-0 iptables.init[62778]: iptables: Flushing firewall rules: [  OK  ]
Nov 29 07:08:53 compute-0 systemd[1]: iptables.service: Deactivated successfully.
Nov 29 07:08:53 compute-0 systemd[1]: Stopped IPv4 firewall with iptables.

● irqbalance.service - irqbalance daemon
     Loaded: loaded (/usr/lib/systemd/system/irqbalance.service; enabled; preset: enabled)
     Active: active (running) since Sat 2025-11-29 06:29:44 UTC; 1h 47min ago
       Docs: man:irqbalance(1)
             https://github.com/Irqbalance/irqbalance
   Main PID: 804 (irqbalance)
         IO: 0B read, 0B written
      Tasks: 2 (limit: 48573)
     Memory: 1.1M (peak: 1.6M)
        CPU: 600ms
     CGroup: /system.slice/irqbalance.service
             └─804 /usr/sbin/irqbalance

Nov 29 06:29:54 np0005539576.novalocal irqbalance[804]: Cannot change IRQ 32 affinity: Operation not permitted
Nov 29 06:29:54 np0005539576.novalocal irqbalance[804]: IRQ 32 affinity is now unmanaged
Nov 29 06:29:54 np0005539576.novalocal irqbalance[804]: Cannot change IRQ 30 affinity: Operation not permitted
Nov 29 06:29:54 np0005539576.novalocal irqbalance[804]: IRQ 30 affinity is now unmanaged
Nov 29 06:29:54 np0005539576.novalocal irqbalance[804]: Cannot change IRQ 29 affinity: Operation not permitted
Nov 29 06:29:54 np0005539576.novalocal irqbalance[804]: IRQ 29 affinity is now unmanaged
Nov 29 06:42:34 np0005539576.novalocal irqbalance[804]: Cannot change IRQ 27 affinity: Operation not permitted
Nov 29 06:42:34 np0005539576.novalocal irqbalance[804]: IRQ 27 affinity is now unmanaged
Nov 29 07:02:04 compute-0 irqbalance[804]: Cannot change IRQ 26 affinity: Operation not permitted
Nov 29 07:02:04 compute-0 irqbalance[804]: IRQ 26 affinity is now unmanaged

○ iscsi-init.service - One time configuration for iscsi.service
     Loaded: loaded (/usr/lib/systemd/system/iscsi-init.service; disabled; preset: disabled)
     Active: inactive (dead)
  Condition: start condition failed at Sat 2025-11-29 07:36:10 UTC; 41min ago

Nov 29 07:33:46 compute-0 systemd[1]: One time configuration for iscsi.service was skipped because of an unmet condition check (ConditionPathExists=!/etc/iscsi/initiatorname.iscsi).
Nov 29 07:36:10 compute-0 systemd[1]: One time configuration for iscsi.service was skipped because of an unmet condition check (ConditionPathExists=!/etc/iscsi/initiatorname.iscsi).

○ iscsi-onboot.service - Special handling of early boot iSCSI sessions
     Loaded: loaded (/usr/lib/systemd/system/iscsi-onboot.service; enabled; preset: enabled)
     Active: inactive (dead)
       Docs: man:iscsiadm(8)
             man:iscsid(8)

● iscsi-shutdown.service - Logout off all iSCSI sessions on shutdown
     Loaded: loaded (/usr/lib/systemd/system/iscsi-shutdown.service; static)
     Active: active (exited) since Sat 2025-11-29 07:33:46 UTC; 43min ago
       Docs: man:iscsid(8)
             man:iscsiadm(8)
   Main PID: 227940 (code=exited, status=0/SUCCESS)
        CPU: 4ms

Nov 29 07:33:46 compute-0 systemd[1]: Starting Logout off all iSCSI sessions on shutdown...
Nov 29 07:33:46 compute-0 systemd[1]: Finished Logout off all iSCSI sessions on shutdown.

○ iscsi-starter.service
     Loaded: loaded (/usr/lib/systemd/system/iscsi-starter.service; enabled; preset: disabled)
     Active: inactive (dead)

○ iscsi.service - Login and scanning of iSCSI devices
     Loaded: loaded (/usr/lib/systemd/system/iscsi.service; indirect; preset: enabled)
     Active: inactive (dead)
       Docs: man:iscsiadm(8)
             man:iscsid(8)

● iscsid.service - Open-iSCSI
     Loaded: loaded (/usr/lib/systemd/system/iscsid.service; enabled; preset: disabled)
     Active: active (running) since Sat 2025-11-29 07:36:10 UTC; 41min ago
TriggeredBy: ● iscsid.socket
       Docs: man:iscsid(8)
             man:iscsiuio(8)
             man:iscsiadm(8)
   Main PID: 240752 (iscsid)
     Status: "Ready to process requests"
         IO: 0B read, 0B written
      Tasks: 1 (limit: 48573)
     Memory: 1.9M (peak: 2.0M)
        CPU: 8ms
     CGroup: /system.slice/iscsid.service
             └─240752 /usr/sbin/iscsid -f

Nov 29 07:36:10 compute-0 systemd[1]: Starting Open-iSCSI...
Nov 29 07:36:10 compute-0 systemd[1]: Started Open-iSCSI.

○ iscsiuio.service - iSCSI UserSpace I/O driver
     Loaded: loaded (/usr/lib/systemd/system/iscsiuio.service; disabled; preset: disabled)
     Active: inactive (dead)
TriggeredBy: ○ iscsiuio.socket
       Docs: man:iscsiuio(8)

● kdump.service - Crash recovery kernel arming
     Loaded: loaded (/usr/lib/systemd/system/kdump.service; enabled; preset: enabled)
     Active: active (exited) since Sat 2025-11-29 06:30:05 UTC; 1h 47min ago
   Main PID: 1010 (code=exited, status=0/SUCCESS)
        CPU: 18.670s

Nov 29 06:30:04 np0005539576.novalocal dracut[1289]: Linked:         0 files
Nov 29 06:30:04 np0005539576.novalocal dracut[1289]: Compared:       0 xattrs
Nov 29 06:30:04 np0005539576.novalocal dracut[1289]: Compared:       0 files
Nov 29 06:30:04 np0005539576.novalocal dracut[1289]: Saved:          0 B
Nov 29 06:30:04 np0005539576.novalocal dracut[1289]: Duration:       0.000403 seconds
Nov 29 06:30:04 np0005539576.novalocal dracut[1289]: *** Hardlinking files done ***
Nov 29 06:30:04 np0005539576.novalocal dracut[1289]: *** Creating initramfs image file '/boot/initramfs-5.14.0-642.el9.x86_64kdump.img' done ***
Nov 29 06:30:05 np0005539576.novalocal kdumpctl[1018]: kdump: kexec: loaded kdump kernel
Nov 29 06:30:05 np0005539576.novalocal kdumpctl[1018]: kdump: Starting kdump: [OK]
Nov 29 06:30:05 np0005539576.novalocal systemd[1]: Finished Crash recovery kernel arming.

● kmod-static-nodes.service - Create List of Static Device Nodes
     Loaded: loaded (/usr/lib/systemd/system/kmod-static-nodes.service; static)
     Active: active (exited) since Sat 2025-11-29 06:29:42 UTC; 1h 47min ago
   Main PID: 675 (code=exited, status=0/SUCCESS)
        CPU: 8ms

● ldconfig.service - Rebuild Dynamic Linker Cache
     Loaded: loaded (/usr/lib/systemd/system/ldconfig.service; static)
     Active: active (exited) since Sat 2025-11-29 06:29:43 UTC; 1h 47min ago
       Docs: man:ldconfig(8)
   Main PID: 697 (code=exited, status=0/SUCCESS)
        CPU: 61ms

Nov 29 06:29:43 localhost systemd[1]: Starting Rebuild Dynamic Linker Cache...
Nov 29 06:29:43 localhost systemd[1]: Finished Rebuild Dynamic Linker Cache.

○ libvirtd.service
     Loaded: masked (Reason: Unit libvirtd.service is masked.)
     Active: inactive (dead)
TriggeredBy: ○ libvirtd-ro.socket
             ○ libvirtd-admin.socket
             ○ libvirtd.socket

○ loadmodules.service - Load legacy module configuration
     Loaded: loaded (/usr/lib/systemd/system/loadmodules.service; enabled; preset: enabled)
     Active: inactive (dead)

○ logrotate.service - Rotate log files
     Loaded: loaded (/usr/lib/systemd/system/logrotate.service; static)
     Active: inactive (dead)
TriggeredBy: ● logrotate.timer
       Docs: man:logrotate(8)
             man:logrotate.conf(5)

○ lvm2-lvmpolld.service - LVM2 poll daemon
     Loaded: loaded (/usr/lib/systemd/system/lvm2-lvmpolld.service; static)
     Active: inactive (dead)
TriggeredBy: ● lvm2-lvmpolld.socket
       Docs: man:lvmpolld(8)

● lvm2-monitor.service - Monitoring of LVM2 mirrors, snapshots etc. using dmeventd or progress polling
     Loaded: loaded (/usr/lib/systemd/system/lvm2-monitor.service; enabled; preset: enabled)
     Active: active (exited) since Sat 2025-11-29 07:02:44 UTC; 1h 14min ago
       Docs: man:dmeventd(8)
             man:lvcreate(8)
             man:lvchange(8)
             man:vgchange(8)
   Main PID: 33990 (code=exited, status=0/SUCCESS)
        CPU: 24ms

Nov 29 07:02:44 compute-0 systemd[1]: Starting Monitoring of LVM2 mirrors, snapshots etc. using dmeventd or progress polling...
Nov 29 07:02:44 compute-0 systemd[1]: Finished Monitoring of LVM2 mirrors, snapshots etc. using dmeventd or progress polling.

○ mdmonitor.service - Software RAID monitoring and management
     Loaded: loaded (/usr/lib/systemd/system/mdmonitor.service; enabled; preset: enabled)
     Active: inactive (dead)

○ microcode.service - Load CPU microcode update
     Loaded: loaded (/usr/lib/systemd/system/microcode.service; enabled; preset: enabled)
     Active: inactive (dead)
  Condition: start condition failed at Sat 2025-11-29 06:29:44 UTC; 1h 47min ago

Nov 29 06:29:44 localhost systemd[1]: Load CPU microcode update was skipped because of an unmet condition check (ConditionPathExists=/sys/devices/system/cpu/microcode/reload).

○ modprobe@configfs.service - Load Kernel Module configfs
     Loaded: loaded (/usr/lib/systemd/system/modprobe@.service; static)
     Active: inactive (dead) since Sat 2025-11-29 06:29:44 UTC; 1h 47min ago
       Docs: man:modprobe(8)
   Main PID: 760 (code=exited, status=0/SUCCESS)
        CPU: 5ms

Nov 29 06:29:43 localhost systemd[1]: Starting Load Kernel Module configfs...
Nov 29 06:29:44 localhost systemd[1]: modprobe@configfs.service: Deactivated successfully.
Nov 29 06:29:44 localhost systemd[1]: Finished Load Kernel Module configfs.

○ modprobe@drm.service - Load Kernel Module drm
     Loaded: loaded (/usr/lib/systemd/system/modprobe@.service; static)
     Active: inactive (dead) since Sat 2025-11-29 06:29:42 UTC; 1h 47min ago
       Docs: man:modprobe(8)
   Main PID: 677 (code=exited, status=0/SUCCESS)
        CPU: 85ms

Nov 29 06:29:42 localhost systemd[1]: modprobe@drm.service: Deactivated successfully.
Nov 29 06:29:42 localhost systemd[1]: Finished Load Kernel Module drm.

○ modprobe@efi_pstore.service - Load Kernel Module efi_pstore
     Loaded: loaded (/usr/lib/systemd/system/modprobe@.service; static)
     Active: inactive (dead) since Sat 2025-11-29 06:29:42 UTC; 1h 47min ago
       Docs: man:modprobe(8)
   Main PID: 678 (code=exited, status=0/SUCCESS)
        CPU: 6ms

Nov 29 06:29:42 localhost systemd[1]: modprobe@efi_pstore.service: Deactivated successfully.
Nov 29 06:29:42 localhost systemd[1]: Finished Load Kernel Module efi_pstore.

○ modprobe@fuse.service - Load Kernel Module fuse
     Loaded: loaded (/usr/lib/systemd/system/modprobe@.service; static)
     Active: inactive (dead) since Sat 2025-11-29 06:29:42 UTC; 1h 47min ago
       Docs: man:modprobe(8)
   Main PID: 679 (code=exited, status=0/SUCCESS)
        CPU: 40ms

Nov 29 06:29:42 localhost systemd[1]: modprobe@fuse.service: Deactivated successfully.
Nov 29 06:29:42 localhost systemd[1]: Finished Load Kernel Module fuse.

○ netns-placeholder.service - Create netns directory
     Loaded: loaded (/etc/systemd/system/netns-placeholder.service; enabled; preset: enabled)
     Active: inactive (dead) since Sat 2025-11-29 07:34:28 UTC; 42min ago
   Main PID: 233947 (code=exited, status=0/SUCCESS)
        CPU: 10ms

Nov 29 07:34:28 compute-0 systemd[1]: Starting Create netns directory...
Nov 29 07:34:28 compute-0 systemd[1]: netns-placeholder.service: Deactivated successfully.
Nov 29 07:34:28 compute-0 systemd[1]: Finished Create netns directory.

○ network.service - LSB: Bring up/down networking
     Loaded: loaded (/etc/rc.d/init.d/network; generated)
     Active: inactive (dead)
       Docs: man:systemd-sysv-generator(8)

● NetworkManager-wait-online.service - Network Manager Wait Online
     Loaded: loaded (/usr/lib/systemd/system/NetworkManager-wait-online.service; enabled; preset: disabled)
     Active: active (exited) since Sat 2025-11-29 07:06:26 UTC; 1h 10min ago
       Docs: man:NetworkManager-wait-online.service(8)
   Main PID: 48981 (code=exited, status=0/SUCCESS)
        CPU: 19ms

Nov 29 07:06:26 compute-0 systemd[1]: Starting Network Manager Wait Online...
Nov 29 07:06:26 compute-0 systemd[1]: Finished Network Manager Wait Online.

● NetworkManager.service - Network Manager
     Loaded: loaded (/usr/lib/systemd/system/NetworkManager.service; enabled; preset: enabled)
    Drop-In: /usr/lib/systemd/system/NetworkManager.service.d
             └─NetworkManager-ovs.conf
     Active: active (running) since Sat 2025-11-29 07:06:26 UTC; 1h 10min ago
       Docs: man:NetworkManager(8)
   Main PID: 48962 (NetworkManager)
         IO: 104.0K read, 258.0K written
      Tasks: 3 (limit: 48573)
     Memory: 5.6M (peak: 6.3M)
        CPU: 34.042s
     CGroup: /system.slice/NetworkManager.service
             └─48962 /usr/sbin/NetworkManager --no-daemon

Nov 29 08:11:13 compute-0 NetworkManager[48962]: <info>  [1764403873.3171] manager: (tap503caeb9-24): new Open vSwitch Port device (/org/freedesktop/NetworkManager/Devices/144)
Nov 29 08:11:14 compute-0 NetworkManager[48962]: <info>  [1764403874.4006] manager: (tap503caeb9-24): new Tun device (/org/freedesktop/NetworkManager/Devices/145)
Nov 29 08:11:14 compute-0 NetworkManager[48962]: <info>  [1764403874.5047] device (tap503caeb9-24): state change: unmanaged -> unavailable (reason 'connection-assumed', managed-type: 'external')
Nov 29 08:11:14 compute-0 NetworkManager[48962]: <info>  [1764403874.5055] device (tap503caeb9-24): state change: unavailable -> disconnected (reason 'none', managed-type: 'external')
Nov 29 08:11:14 compute-0 NetworkManager[48962]: <info>  [1764403874.7904] manager: (tapdda88d46-90): new Veth device (/org/freedesktop/NetworkManager/Devices/146)
Nov 29 08:11:14 compute-0 NetworkManager[48962]: <info>  [1764403874.8583] device (tapdda88d46-90): carrier: link connected
Nov 29 08:11:15 compute-0 NetworkManager[48962]: <info>  [1764403875.0223] manager: (tapdda88d46-90): new Open vSwitch Port device (/org/freedesktop/NetworkManager/Devices/147)
Nov 29 08:11:21 compute-0 NetworkManager[48962]: <info>  [1764403881.1862] manager: (patch-provnet-53893d16-43ff-4c9d-aa40-6eb91dbe033a-to-br-int): new Open vSwitch Port device (/org/freedesktop/NetworkManager/Devices/148)
Nov 29 08:11:21 compute-0 NetworkManager[48962]: <info>  [1764403881.1880] manager: (patch-br-int-to-provnet-53893d16-43ff-4c9d-aa40-6eb91dbe033a): new Open vSwitch Port device (/org/freedesktop/NetworkManager/Devices/149)
Nov 29 08:11:54 compute-0 NetworkManager[48962]: <info>  [1764403914.2551] device (tap503caeb9-24): state change: disconnected -> unmanaged (reason 'unmanaged', managed-type: 'removed')

○ nfs-idmapd.service - NFSv4 ID-name mapping service
     Loaded: loaded (/usr/lib/systemd/system/nfs-idmapd.service; static)
     Active: inactive (dead)
       Docs: man:idmapd(8)

○ nfs-mountd.service - NFS Mount Daemon
     Loaded: loaded (/usr/lib/systemd/system/nfs-mountd.service; static)
     Active: inactive (dead)
       Docs: man:rpc.mountd(8)

○ nfs-server.service - NFS server and services
     Loaded: loaded (/usr/lib/systemd/system/nfs-server.service; disabled; preset: disabled)
     Active: inactive (dead)
   Unit ntpd.service could not be found.
Unit ntpdate.service could not be found.
    Docs: man:rpc.nfsd(8)
             man:exportfs(8)

○ nfs-utils.service - NFS server and client services
     Loaded: loaded (/usr/lib/systemd/system/nfs-utils.service; static)
     Active: inactive (dead)

○ nfsdcld.service - NFSv4 Client Tracking Daemon
     Loaded: loaded (/usr/lib/systemd/system/nfsdcld.service; static)
     Active: inactive (dead)
       Docs: man:nfsdcld(8)

● nftables.service - Netfilter Tables
     Loaded: loaded (/usr/lib/systemd/system/nftables.service; enabled; preset: disabled)
     Active: active (exited) since Sat 2025-11-29 07:08:57 UTC; 1h 8min ago
       Docs: man:nft(8)
   Main PID: 63169 (code=exited, status=0/SUCCESS)
        CPU: 38ms

Nov 29 07:08:57 compute-0 systemd[1]: Starting Netfilter Tables...
Nov 29 07:08:57 compute-0 systemd[1]: Finished Netfilter Tables.

● nis-domainname.service - Read and set NIS domainname from /etc/sysconfig/network
     Loaded: loaded (/usr/lib/systemd/system/nis-domainname.service; enabled; preset: enabled)
     Active: active (exited) since Sat 2025-11-29 06:29:42 UTC; 1h 47min ago
   Main PID: 680 (code=exited, status=0/SUCCESS)
        CPU: 5ms

Nov 29 06:29:42 localhost systemd[1]: Finished Read and set NIS domainname from /etc/sysconfig/network.

○ nvmefc-boot-connections.service - Auto-connect to subsystems on FC-NVME devices found during boot
     Loaded: loaded (/usr/lib/systemd/system/nvmefc-boot-connections.service; enabled; preset: enabled)
     Active: inactive (dead)

● openvswitch.service - Open vSwitch
     Loaded: loaded (/usr/lib/systemd/system/openvswitch.service; enabled; preset: disabled)
     Active: active (exited) since Sat 2025-11-29 07:06:07 UTC; 1h 11min ago
   Main PID: 47274 (code=exited, status=0/SUCCESS)
        CPU: 2ms

Nov 29 07:06:07 compute-0 systemd[1]: Starting Open vSwitch...
Nov 29 07:06:07 compute-0 systemd[1]: Finished Open vSwitch.

● ovs-delete-transient-ports.service - Open vSwitch Delete Transient Ports
     Loaded: loaded (/usr/lib/systemd/system/ovs-delete-transient-ports.service; static)
     Active: active (exited) since Sat 2025-11-29 07:06:07 UTC; 1h 11min ago
   Main PID: 47211 (code=exited, status=0/SUCCESS)
        CPU: 27ms

Nov 29 07:06:07 compute-0 systemd[1]: Starting Open vSwitch Delete Transient Ports...
Nov 29 07:06:07 compute-0 systemd[1]: Finished Open vSwitch Delete Transient Ports.

● ovs-vswitchd.service - Open vSwitch Forwarding Unit
     Loaded: loaded (/usr/lib/systemd/system/ovs-vswitchd.service; static)
     Active: active (running) since Sat 2025-11-29 07:06:07 UTC; 1h 11min ago
   Main PID: 47265 (ovs-vswitchd)
         IO: 3.4M read, 476.0K written
      Tasks: 13 (limit: 48573)
     Memory: 246.2M (peak: 251.4M)
        CPU: 23.202s
     CGroup: /system.slice/ovs-vswitchd.service
             └─47265 ovs-vswitchd unix:/var/run/openvswitch/db.sock -vconsole:emer -vsyslog:err -vfile:info --mlockall --user openvswitch:hugetlbfs --no-chdir --log-file=/var/log/openvswitch/ovs-vswitchd.log --pidfile=/var/run/openvswitch/ovs-vswitchd.pid --detach

Nov 29 07:06:07 compute-0 systemd[1]: Starting Open vSwitch Forwarding Unit...
Nov 29 07:06:07 compute-0 ovs-ctl[47255]: Inserting openvswitch module [  OK  ]
Nov 29 07:06:07 compute-0 ovs-ctl[47224]: Starting ovs-vswitchd [  OK  ]
Nov 29 07:06:07 compute-0 ovs-vsctl[47272]: ovs|00001|vsctl|INFO|Called as ovs-vsctl --no-wait add Open_vSwitch . external-ids hostname=compute-0
Nov 29 07:06:07 compute-0 ovs-ctl[47224]: Enabling remote OVSDB managers [  OK  ]
Nov 29 07:06:07 compute-0 systemd[1]: Started Open vSwitch Forwarding Unit.

● ovsdb-server.service - Open vSwitch Database Unit
     Loaded: loaded (/usr/lib/systemd/system/ovsdb-server.service; static)
     Active: active (running) since Sat 2025-11-29 07:06:06 UTC; 1h 11min ago
   Main PID: 47183 (ovsdb-server)
         IO: 1.2M read, 784.5K written
      Tasks: 1 (limit: 48573)
     Memory: 4.9M (peak: 41.0M)
        CPU: 20.190s
     CGroup: /system.slice/ovsdb-server.service
             └─47183 ovsdb-server /etc/openvswitch/conf.db -vconsole:emer -vsyslUnit pacemaker.service could not be found.
Unit plymouth-quit-wait.service could not be found.
Unit plymouth-start.service could not be found.
Unit power-profiles-daemon.service could not be found.
og:err -vfile:info --remote=punix:/var/run/openvswitch/db.sock --private-key=db:Open_vSwitch,SSL,private_key --certificate=db:Open_vSwitch,SSL,certificate --bootstrap-ca-cert=db:Open_vSwitch,SSL,ca_cert --user openvswitch:hugetlbfs --no-chdir --log-file=/var/log/openvswitch/ovsdb-server.log --pidfile=/var/run/openvswitch/ovsdb-server.pid --detach

Nov 29 07:06:05 compute-0 chown[47130]: /usr/bin/chown: cannot access '/run/openvswitch': No such file or directory
Nov 29 07:06:06 compute-0 ovs-ctl[47135]: /etc/openvswitch/conf.db does not exist ... (warning).
Nov 29 07:06:06 compute-0 ovs-ctl[47135]: Creating empty database /etc/openvswitch/conf.db [  OK  ]
Nov 29 07:06:06 compute-0 ovs-ctl[47135]: Starting ovsdb-server [  OK  ]
Nov 29 07:06:06 compute-0 ovs-vsctl[47184]: ovs|00001|vsctl|INFO|Called as ovs-vsctl --no-wait -- init -- set Open_vSwitch . db-version=8.5.1
Nov 29 07:06:06 compute-0 ovs-vsctl[47200]: ovs|00001|vsctl|INFO|Called as ovs-vsctl --no-wait set Open_vSwitch . ovs-version=3.3.5-115.el9s "external-ids:system-id=\"df234f2c-4343-4c91-861d-13d184c56aa0\"" "external-ids:rundir=\"/var/run/openvswitch\"" "system-type=\"centos\"" "system-version=\"9\""
Nov 29 07:06:06 compute-0 ovs-ctl[47135]: Configuring Open vSwitch system IDs [  OK  ]
Nov 29 07:06:06 compute-0 ovs-vsctl[47210]: ovs|00001|vsctl|INFO|Called as ovs-vsctl --no-wait add Open_vSwitch . external-ids hostname=compute-0
Nov 29 07:06:06 compute-0 ovs-ctl[47135]: Enabling remote OVSDB managers [  OK  ]
Nov 29 07:06:06 compute-0 systemd[1]: Started Open vSwitch Database Unit.

● polkit.service - Authorization Manager
     Loaded: loaded (/usr/lib/systemd/system/polkit.service; static)
     Active: active (running) since Sat 2025-11-29 07:05:09 UTC; 1h 12min ago
       Docs: man:polkit(8)
   Main PID: 43449 (polkitd)
         IO: 19.2M read, 0B written
      Tasks: 12 (limit: 48573)
     Memory: 25.0M (peak: 26.7M)
        CPU: 2.315s
     CGroup: /system.slice/polkit.service
             └─43449 /usr/lib/polkit-1/polkitd --no-debug

Nov 29 07:29:52 compute-0 polkitd[43449]: Collecting garbage unconditionally...
Nov 29 07:29:52 compute-0 polkitd[43449]: Loading rules from directory /etc/polkit-1/rules.d
Nov 29 07:29:52 compute-0 polkitd[43449]: Loading rules from directory /usr/share/polkit-1/rules.d
Nov 29 07:29:52 compute-0 polkitd[43449]: Finished loading, compiling and executing 3 rules
Nov 29 07:32:08 compute-0 polkitd[43449]: Registered Authentication Agent for unix-process:219056:384393 (system bus name :1.2883 [pkttyagent --process 219056 --notify-fd 4 --fallback], object path /org/freedesktop/PolicyKit1/AuthenticationAgent, locale en_US.UTF-8)
Nov 29 07:32:08 compute-0 polkitd[43449]: Unregistered Authentication Agent for unix-process:219056:384393 (system bus name :1.2883, object path /org/freedesktop/PolicyKit1/AuthenticationAgent, locale en_US.UTF-8) (disconnected from bus)
Nov 29 07:32:08 compute-0 polkitd[43449]: Registered Authentication Agent for unix-process:219055:384393 (system bus name :1.2884 [pkttyagent --process 219055 --notify-fd 4 --fallback], object path /org/freedesktop/PolicyKit1/AuthenticationAgent, locale en_US.UTF-8)
Nov 29 07:32:08 compute-0 polkitd[43449]: Unregistered Authentication Agent for unix-process:219055:384393 (system bus name :1.2884, object path /org/freedesktop/PolicyKit1/AuthenticationAgent, locale en_US.UTF-8) (disconnected from bus)
Nov 29 07:32:10 compute-0 polkitd[43449]: Registered Authentication Agent for unix-process:219524:384594 (system bus name :1.2887 [pkttyagent --process 219524 --notify-fd 4 --fallback], object path /org/freedesktop/PolicyKit1/AuthenticationAgent, locale en_US.UTF-8)
Nov 29 07:32:10 compute-0 polkitd[43449]: Unregistered Authentication Agent for unix-process:219524:384594 (system bus name :1.2887, object path /org/freedesktop/PolicyKit1/AuthenticationAgent, locale en_US.UTF-8) (disconnected from bus)

○ raid-check.service - RAID setup health check
     Loaded: loaded (/usr/lib/systemd/system/raid-check.service; static)
     Active: inactive (dead)
TriggeredBy: ○ raid-chUnit rpc-svcgssd.service could not be found.
eck.timer

○ rc-local.service - /etc/rc.d/rc.local Compatibility
     Loaded: loaded (/usr/lib/systemd/system/rc-local.service; static)
     Active: inactive (dead)
       Docs: man:systemd-rc-local-generator(8)

○ rescue.service - Rescue Shell
     Loaded: loaded (/usr/lib/systemd/system/rescue.service; static)
     Active: inactive (dead)
       Docs: man:sulogin(8)

○ rpc-gssd.service - RPC security service for NFS client and server
     Loaded: loaded (/usr/lib/systemd/system/rpc-gssd.service; static)
     Active: inactive (dead)
  Condition: start condition failed at Sat 2025-11-29 06:29:45 UTC; 1h 47min ago
       Docs: man:rpc.gssd(8)

Nov 29 06:29:45 np0005539576.novalocal systemd[1]: RPC security service for NFS client and server was skipped because of an unmet condition check (ConditionPathExists=/etc/krb5.keytab).

● rpc-statd-notify.service - Notify NFS peers of a restart
     Loaded: loaded (/usr/lib/systemd/system/rpc-statd-notify.service; static)
     Active: active (exited) since Sat 2025-11-29 06:29:53 UTC; 1h 47min ago
       Docs: man:sm-notify(8)
             man:rpc.statd(8)
        CPU: 6ms

Nov 29 06:29:53 np0005539576.novalocal systemd[1]: Starting Notify NFS peers of a restart...
Nov 29 06:29:53 np0005539576.novalocal sm-notify[1006]: Version 2.5.4 starting
Nov 29 06:29:53 np0005539576.novalocal systemd[1]: Started Notify NFS peers of a restart.

○ rpc-statd.service - NFS status monitor for NFSv2/3 locking.
     Loaded: loaded (/usr/lib/systemd/system/rpc-statd.service; static)
     Active: inactive (dead)
       Docs: man:rpc.statd(8)

● rpcbind.service - RPC Bind
     Loaded: loaded (/usr/lib/systemd/system/rpcbind.service; enabled; preset: enabled)
     Active: active (running) since Sat 2025-11-29 06:29:43 UTC; 1h 47min ago
TriggeredBy: ● rpcbind.socket
       Docs: man:rpcbind(8)
   Main PID: 702 (rpcbind)
         IO: 0B read, 0B written
      Tasks: 1 (limit: 48573)
     Memory: 2.3M (peak: 2.6M)
        CPU: 38ms
     CGroup: /system.slice/rpcbind.service
             └─702 /usr/bin/rpcbind -w -f

Nov 29 06:29:43 localhost systemd[1]: Starting RPC Bind...
Nov 29 06:29:43 localhost systemd[1]: Started RPC Bind.

● rsyslog.service - System Logging Service
     Loaded: loaded (/usr/lib/systemd/system/rsyslog.service; enabled; preset: enabled)
     Active: active (running) since Sat 2025-11-29 06:29:53 UTC; 1h 47min ago
       Docs: man:rsyslogd(8)
             https://www.rsyslog.com/doc/
   Main PID: 1007 (rsyslogd)
         IO: 4.0K read, 26.4M written
      Tasks: 3 (limit: 48573)
     Memory: 22.7M (peak: 23.4M)
        CPU: 15.966s
     CGroup: /system.slice/rsyslog.service
             └─1007 /usr/sbin/rsyslogd -n

Nov 29 07:38:21 compute-0 rsyslogd[1007]: imjournal: journal files changed, reloading...  [v8.2510.0-2.el9 try https://www.rsyslog.com/e/0 ]
Nov 29 07:45:44 compute-0 rsyslogd[1007]: imjournal: journal files changed, reloading...  [v8.2510.0-2.el9 try https://www.rsyslog.com/e/0 ]
Nov 29 07:56:57 compute-0 rsyslogd[1007]: imjournal: journal files changed, reloading...  [v8.2510.0-2.el9 try https://www.rsyslog.com/e/0 ]
Nov 29 07:56:57 compute-0 rsyslogd[1007]: imjournal: journal files changed, reloading...  [v8.2510.0-2.el9 try https://www.rsyslog.com/e/0 ]
Nov 29 08:06:19 compute-0 rsyslogd[1007]: imjournal: journal files changed, reloading...  [v8.2510.0-2.el9 try https://www.rsyslog.com/e/0 ]
Nov 29 08:06:19 compute-0 rsyslogd[1007]: imjournal: journal files changed, reloading...  [v8.2510.0-2.el9 try https://www.rsyslog.com/e/0 ]
Nov 29 08:16:44 compute-0 rsyslogd[1007]: imjournal: journal files changed, reloading...  [v8.2510.0-2.el9 try https://www.rsyslog.com/e/0 ]
Nov 29 08:16:49 compute-0 rsyslogd[1007]: imjournal: journal files changed, reloading...  [v8.2510.0-2.el9 try https://www.rsyslog.com/e/0 ]
Nov 29 08:16:49 compute-0 rsyslogd[1007]: imjournal from <np0005539576:ceph-osd>: begin to drop messages due to rate-limiting
Nov 29 08:16:55 compute-0 rsyslogd[1007]: imjournal: journal files changed, reloading...  [v8.2510.0-2.el9 try https://Unit sntp.service could not be found.
Unit sshd-keygen.service could not be found.
www.rsyslog.com/e/0 ]

○ selinux-autorelabel-mark.service - Mark the need to relabel after reboot
     Loaded: loaded (/usr/lib/systemd/system/selinux-autorelabel-mark.service; enabled; preset: enabled)
     Active: inactive (dead)
  Condition: start condition failed at Sat 2025-11-29 06:29:43 UTC; 1h 47min ago

Nov 29 06:29:43 localhost systemd[1]: Mark the need to relabel after reboot was skipped because of an unmet condition check (ConditionSecurity=!selinux).

● serial-getty@ttyS0.service - Serial Getty on ttyS0
     Loaded: loaded (/usr/lib/systemd/system/serial-getty@.service; enabled-runtime; preset: disabled)
     Active: active (running) since Sat 2025-11-29 06:29:53 UTC; 1h 47min ago
       Docs: man:agetty(8)
             man:systemd-getty-generator(8)
             http://0pointer.de/blog/projects/serial-console.html
   Main PID: 1014 (agetty)
         IO: 0B read, 0B written
      Tasks: 1 (limit: 48573)
     Memory: 236.0K (peak: 480.0K)
        CPU: 7ms
     CGroup: /system.slice/system-serial\x2dgetty.slice/serial-getty@ttyS0.service
             └─1014 /sbin/agetty -o "-p -- \\u" --keep-baud 115200,57600,38400,9600 - vt220

Nov 29 06:29:53 np0005539576.novalocal systemd[1]: Started Serial Getty on ttyS0.

○ sshd-keygen@ecdsa.service - OpenSSH ecdsa Server Key Generation
     Loaded: loaded (/usr/lib/systemd/system/sshd-keygen@.service; disabled; preset: disabled)
    Drop-In: /usr/lib/systemd/system/sshd-keygen@.service.d
             └─disable-sshd-keygen-if-cloud-init-active.conf
     Active: inactive (dead)
  Condition: start condition failed at Sat 2025-11-29 07:30:06 UTC; 47min ago

Nov 29 06:29:44 localhost systemd[1]: OpenSSH ecdsa Server Key Generation was skipped because of an unmet condition check (ConditionPathExists=!/run/systemd/generator.early/multi-user.target.wants/cloud-init.target).
Nov 29 07:30:06 compute-0 systemd[1]: OpenSSH ecdsa Server Key Generation was skipped because of an unmet condition check (ConditionPathExists=!/run/systemd/generator.early/multi-user.target.wants/cloud-init.target).

○ sshd-keygen@ed25519.service - OpenSSH ed25519 Server Key Generation
     Loaded: loaded (/usr/lib/systemd/system/sshd-keygen@.service; disabled; preset: disabled)
    Drop-In: /usr/lib/systemd/system/sshd-keygen@.service.d
             └─disable-sshd-keygen-if-cloud-init-active.conf
     Active: inactive (dead)
  Condition: start condition failed at Sat 2025-11-29 07:30:06 UTC; 47min ago

Nov 29 06:29:44 localhost systemd[1]: OpenSSH ed25519 Server Key Generation was skipped because of an unmet condition check (ConditionPathExists=!/run/systemd/generator.early/multi-user.target.wants/cloud-init.target).
Nov 29 07:30:06 compute-0 systemd[1]: OpenSSH ed25519 Server Key Generation was skipped because of an unmet condition check (ConditionPathExists=!/run/systemd/generator.early/multi-user.target.wants/cloud-init.target).

○ sshd-keygen@rsa.service - OpenSSH rsa Server Key Generation
     Loaded: loaded (/usr/lib/systemd/system/sshd-keygen@.service; disabled; preset: disabled)
    Drop-In: /usr/lib/systemd/system/sshd-keygen@.service.d
             └─disable-sshd-keygen-if-cloud-init-active.conf
     Active: inactive (dead)
  Condition: start condition failed at Sat 2025-11-29 07:30:06 UTC; 47min ago

Nov 29 06:29:44 localhost systemd[1]: OpenSSH rsa Server Key Generation was skipped because of an unmet condition check (ConditionPathExists=!/run/systemd/generator.early/multi-user.target.wants/cloud-init.target).
Nov 29 07:30:06 compute-0 systemd[1]: OpenSSH rsa Server Key Generation was skipped because of an unmet condition check (ConditionPathExists=!/run/systemd/generator.early/multi-user.target.wants/cloud-init.target).

● sshd.service - OpenSSH server daemon
     Loaded: loaded (/usr/lib/systemd/system/sshd.service; enabled; preset: enabled)
     Active: active (running) since Sat 2025-11-29 07:30:06 UTC; 47min ago
       Docs: man:sshd(8)
             man:sshd_config(5)
   Main PID: 191978 (sshd)
         IO: 16.0K read, 460.0K written
      Tasks: 1 (limit: 48573)Unit syslog.service could not be found.

     Memory: 32.3M (peak: 37.8M)
        CPU: 25.440s
     CGroup: /system.slice/sshd.service
             └─191978 "sshd: /usr/sbin/sshd -D [listener] 0 of 10-100 startups"

Nov 29 08:08:02 compute-0 sshd-session[298983]: Connection closed by authenticating user root 143.14.121.41 port 52880 [preauth]
Nov 29 08:08:12 compute-0 sshd-session[298997]: Connection closed by authenticating user root 143.14.121.41 port 51662 [preauth]
Nov 29 08:08:18 compute-0 sshd-session[299220]: Connection closed by authenticating user root 143.14.121.41 port 55542 [preauth]
Nov 29 08:08:53 compute-0 sshd-session[299222]: Connection closed by authenticating user root 143.14.121.41 port 55554 [preauth]
Nov 29 08:09:16 compute-0 sshd-session[299290]: ssh_dispatch_run_fatal: Connection from 143.14.121.41 port 45038: Connection timed out [preauth]
Nov 29 08:13:52 compute-0 sshd-session[306926]: Connection closed by 80.9.196.204 port 57774
Nov 29 08:13:52 compute-0 sshd-session[306927]: Invalid user a from 80.9.196.204 port 57790
Nov 29 08:13:52 compute-0 sshd-session[306927]: Connection closed by invalid user a 80.9.196.204 port 57790 [preauth]
Nov 29 08:16:26 compute-0 sshd-session[311291]: Accepted publickey for zuul from 192.168.122.10 port 54518 ssh2: ECDSA SHA256:yjwSeYo61Cp1bcL7y5AlYLjzNZeAFiW5isMWg/hA4OQ
Nov 29 08:16:26 compute-0 sshd-session[311291]: pam_unix(sshd:session): session opened for user zuul(uid=1000) by zuul(uid=0)

○ sssd-kcm.service - SSSD Kerberos Cache Manager
     Loaded: loaded (/usr/lib/systemd/system/sssd-kcm.service; indirect; preset: disabled)
     Active: inactive (dead)
TriggeredBy: ● sssd-kcm.socket
       Docs: man:sssd-kcm(5)

○ sssd.service - System Security Services Daemon
     Loaded: loaded (/usr/lib/systemd/system/sssd.service; enabled; preset: enabled)
     Active: inactive (dead)
  Condition: start condition failed at Sat 2025-11-29 06:29:44 UTC; 1h 47min ago

Nov 29 06:29:44 localhost systemd[1]: System Security Services Daemon was skipped because no trigger condition checks were met.

○ sysstat-collect.service - system activity accounting tool
     Loaded: loaded (/usr/lib/systemd/system/sysstat-collect.service; static)
     Active: inactive (dead)
TriggeredBy: ○ sysstat-collect.timer
       Docs: man:sa1(8)

○ sysstat-summary.service - Generate a daily summary of process accounting
     Loaded: loaded (/usr/lib/systemd/system/sysstat-summary.service; static)
     Active: inactive (dead)
TriggeredBy: ○ sysstat-summary.timer
       Docs: man:sa2(8)

○ sysstat.service - Resets System Activity Logs
     Loaded: loaded (/usr/lib/systemd/system/sysstat.service; enabled; preset: enabled)
     Active: inactive (dead)

○ systemd-ask-password-console.service - Dispatch Password Requests to Console
     Loaded: loaded (/usr/lib/systemd/system/systemd-ask-password-console.service; static)
     Active: inactive (dead)
TriggeredBy: ● systemd-ask-password-console.path
       Docs: man:systemd-ask-password-console.service(8)

○ systemd-ask-password-wall.service - Forward Password Requests to Wall
     Loaded: loaded (/usr/lib/systemd/system/systemd-ask-password-wall.service; static)
     Active: inactive (dead)
TriggeredBy: ● systemd-ask-password-wall.path
       Docs: man:systemd-ask-password-wall.service(8)

○ systemd-binfmt.service - Set Up Additional Binary Formats
     Loaded: loaded (/usr/lib/systemd/system/systemd-binfmt.service; static)
     Active: inactive (dead)
  Condition: start condition failed at Sat 2025-11-29 06:29:43 UTC; 1h 47min ago
       Docs: man:systemd-binfmt.service(8)
             man:binfmt.d(5)
             https://docs.kernel.org/admin-guide/binfmt-misc.html
             https://www.freedesktop.org/wiki/Software/systemd/APIFileSystems

Nov 29 06:29:43 localhost systemd[1]: Set Up Additional Binary Formats was skipped because no trigger condition checks were met.

○ systemd-boot-random-seed.service - Update Boot Loader Random Seed
     Loaded: loaded (/usr/lib/systemd/system/systemd-boot-random-seed.service; static)
     Active: inactive (dead)
  Condition: start condition failed at Sat 2025-11-29 06:29:43 UTC; 1h 47min ago
       Docs: man:systemd-boot-random-seed.service(8)
             man:random(4)

Nov 29 06:29:43 localhost systemd[1]: Update Boot Loader Random Seed was skipped because no trigger condition checks were met.

● systemd-boot-update.service - Automatic Boot Loader Update
     Loaded: loaded (/usr/lib/systemd/system/systemd-boot-update.service; enabled; preset: enabled)
     Active: active (exited) since Sat 2025-11-29 06:29:43 UTC; 1h 47min ago
       Docs: man:bootctl(1)
   Main PID: 698 (code=exited, status=0/SUCCESS)
        CPU: 9ms

Nov 29 06:29:43 localhost systemd[1]: Starting Automatic Boot Loader Update...
Nov 29 06:29:43 localhost bootctl[698]: Couldn't find EFI system partition, skipping.
Nov 29 06:29:43 localhost systemd[1]: Finished Automatic Boot Loader Update.

○ systemd-firstboot.service - First Boot Wizard
     Loaded: loaded (/usr/lib/systemd/system/systemd-firstboot.service; static)
     Active: inactive (dead)
  Condition: start condition failed at Sat 2025-11-29 06:29:42 UTC; 1h 47min ago
       Docs: man:systemd-firstboot(1)

Nov 29 06:29:42 localhost systemd[1]: First Boot Wizard was skipped because of an unmet condition check (ConditionFirstBoot=yes).

○ systemd-fsck-root.service - File System Check on Root Device
     Loaded: loaded (/usr/lib/systemd/system/systemd-fsck-root.service; static)
     Active: inactive (dead) since Sat 2025-11-29 06:29:42 UTC; 1h 47min ago
   Duration: 1min 33.441s
       Docs: man:systemd-fsck-root.service(8)
   Main PID: 554 (code=exited, status=0/SUCCESS)
        CPU: 17ms

Nov 29 06:28:08 localhost systemd[1]: Starting File System Check on /dev/disk/by-uuid/b277050f-8ace-464d-abb6-4c46d4c45253...
Nov 29 06:28:08 localhost systemd-fsck[556]: /usr/sbin/fsck.xfs: XFS file system.
Nov 29 06:28:08 localhost systemd[1]: Finished File System Check on /dev/disk/by-uuid/b277050f-8ace-464d-abb6-4c46d4c45253.

● systemd-hostnamed.service - Hostname Service
     Loaded: loaded (/usr/lib/systemd/system/systemd-hostnamed.service; static)
    Drop-In: /usr/lib/systemd/system/systemd-hostnamed.service.d
             └─disable-privatedevices.conf
     Active: active (running) since Sat 2025-11-29 08:16:58 UTC; 18s ago
       Docs: man:systemd-hostnamed.service(8)
             man:hostname(5)
             man:machine-info(5)
             man:org.freedesktop.resolve1(5)
   Main PID: 314855 (systemd-hostnam)
         IO: 24.0K read, 0B written
      Tasks: 1 (limit: 48573)
     Memory: 2.7M (peak: 3.8M)
        CPU: 125ms
     CGroup: /system.slice/systemd-hostnamed.service
             └─314855 /usr/lib/systemd/systemd-hostnamed

Nov 29 08:16:58 compute-0 systemd[1]: Starting Hostname Service...
Nov 29 08:16:58 compute-0 systemd[1]: Started Hostname Service.

● systemd-hwdb-update.service - Rebuild Hardware Database
     Loaded: loaded (/usr/lib/systemd/system/systemd-hwdb-update.service; static)
     Active: active (exited) since Sat 2025-11-29 06:29:43 UTC; 1h 47min ago
       Docs: man:hwdb(7)
             man:systemd-hwdb(8)
   Main PID: 689 (code=exited, status=0/SUCCESS)
        CPU: 595ms

Nov 29 06:29:42 localhost systemd[1]: Starting Rebuild Hardware Database...
Nov 29 06:29:43 localhost systemd[1]: Finished Rebuild Hardware Database.

○ systemd-initctl.service - initctl Compatibility Daemon
     Loaded: loaded (/usr/lib/systemd/system/systemd-initctl.service; static)
     Active: inactive (dead)
TriggeredBy: ● systemd-initctl.socket
       Docs: man:systemd-initctl.service(8)

● systemd-journal-catalog-update.service - Rebuild Journal Catalog
     Loaded: loaded (/usr/lib/systemd/system/systemd-journal-catalog-update.service; static)
     Active: active (exited) since Sat 2025-11-29 06:29:43 UTC; 1h 47min ago
       Docs: man:systemd-journald.service(8)
             man:journald.conf(5)
   Main PID: 703 (code=exited, status=0/SUCCESS)
        CPU: 22ms

Nov 29 06:29:43 localhost systemd[1]: Starting Rebuild Journal Catalog...
Nov 29 06:29:43 localhost systemd[1]: Finished Rebuild Journal Catalog.

● systemd-journal-flush.service - Flush Journal to Persistent Storage
     Loaded: loaded (/usr/lib/systemd/system/systemd-journal-flush.service; static)
     Active: active (exited) since Sat 2025-11-29 06:29:42 UTC; 1h 47min ago
       Docs: man:systemd-journald.service(8)
             man:journald.conf(5)
   Main PID: 690 (code=exited, status=0/SUCCESS)
        CPU: 8ms

Nov 29 06:29:42 localhost systemd[1]: Starting Flush Journal to Persistent Storage...
Nov 29 06:29:42 localhost systemd[1]: Finished Flush Journal to Persistent Storage.

● systemd-journald.service - Journal Service
     Loaded: loaded (/usr/lib/systemd/system/systemd-journald.service; static)
     Active: active (running) since Sat 2025-11-29 06:29:42 UTC; 1h 47min ago
TriggeredBy: ● systemd-journald-dev-log.socket
             ● systemd-journald.socket
       Docs: man:systemd-journald.service(8)
             man:journald.conf(5)
   Main PID: 681 (systemd-journal)
     Status: "Processing requests..."
         IO: 0B read, 0B written
      Tasks: 1 (limit: 48573)
     Memory: 95.0M (peak: 103.1M)
        CPU: 18.904s
     CGroup: /system.slice/systemd-journald.service
             └─681 /usr/lib/systemd/systemd-journald

Nov 29 06:29:42 localhost systemd-journald[681]: Journal started
Nov 29 06:29:42 localhost systemd-journald[681]: Runtime Journal (/run/log/journal/1f988c78c563e12389ab342aced42dbb) is 8.0M, max 153.6M, 145.6M free.
Nov 29 06:29:42 localhost systemd[1]: systemd-journald.service: Deactivated successfully.
Nov 29 06:29:42 localhost systemd-journald[681]: Runtime Journal (/run/log/journal/1f988c78c563e12389ab342aced42dbb) is 8.0M, max 153.6M, 145.6M free.
Nov 29 06:29:42 localhost systemd-journald[681]: Received client request to flush runtime journal.

● systemd-logind.service - User Login Management
     Loaded: loaded (/usr/lib/systemd/system/systemd-logind.service; static)
    Drop-In: /usr/lib/systemd/system/systemd-logind.service.d
             └─10-grub2-logind-service.conf
     Active: active (running) since Sat 2025-11-29 06:29:44 UTC; 1h 47min ago
       Docs: man:sd-login(3)
             man:systemd-logind.service(8)
             man:logind.conf(5)
             man:org.freedesktop.login1(5)
   Main PID: 807 (systemd-logind)
     Status: "Processing requests..."
         IO: 4.0K read, 0B written
      Tasks: 1 (limit: 48573)
     Memory: 6.3M (peak: 8.1M)
        CPU: 4.288s
     CGroup: /system.slice/systemd-logind.service
             └─807 /usr/lib/systemd/systemd-logind

Nov 29 07:32:45 compute-0 systemd-logind[807]: Removed session 48.
Nov 29 07:33:00 compute-0 systemd-logind[807]: New session 49 of user zuul.
Nov 29 07:36:07 compute-0 systemd-logind[807]: Watching system buttons on /dev/input/event0 (Power Button)
Nov 29 07:36:07 compute-0 systemd-logind[807]: Watching system buttons on /dev/input/event1 (AT Translated Set 2 keyboard)
Nov 29 07:37:11 compute-0 systemd-logind[807]: New session 50 of user zuul.
Nov 29 07:37:11 compute-0 systemd-logind[807]: Session 50 logged out. Waiting for processes to exit.
Nov 29 07:37:11 compute-0 systemd-logind[807]: Removed session 50.
Nov 29 07:38:40 compute-0 systemd-logind[807]: Session 49 logged out. Waiting for processes to exit.
Nov 29 07:38:40 compute-0 systemd-logind[807]: Removed session 49.
Nov 29 08:16:26 compute-0 systemd-logind[807]: New session 51 of user zuul.

○ systemd-machine-id-commit.service - Commit a transient machine-id on disk
     Loaded: loaded (/usr/lib/systemd/system/systemd-machine-id-commit.service; static)
     Active: inactive (dead)
  Condition: start condition failed at Sat 2025-11-29 06:29:43 UTC; 1h 47min ago
       Docs: man:systemd-machine-id-commit.service(8)

Nov 29 06:29:43 localhost systemd[1]: Commit a transient machine-id on disk was skipped because of an unmet condition check (ConditionPathIsMountPoint=/etc/machine-id).

● systemd-machined.service - Virtual Machine and Container Registration Service
     Loaded: loaded (/usr/lib/systemd/system/systemd-machined.service; static)
     Active: activUnit systemd-networkd-wait-online.service could not be found.
e (running) since Sat 2025-11-29 07:32:01 UTC; 45min ago
       Docs: man:systemd-machined.service(8)
             man:org.freedesktop.machine1(5)
   Main PID: 217781 (systemd-machine)
     Status: "Processing requests..."
         IO: 0B read, 0B written
      Tasks: 1 (limit: 48573)
     Memory: 1.5M (peak: 2.0M)
        CPU: 2.114s
     CGroup: /system.slice/systemd-machined.service
             └─217781 /usr/lib/systemd/systemd-machined

Nov 29 08:07:24 compute-0 systemd-machined[217781]: Machine qemu-23-instance-00000017 terminated.
Nov 29 08:08:02 compute-0 systemd-machined[217781]: Machine qemu-25-instance-00000019 terminated.
Nov 29 08:09:00 compute-0 systemd-machined[217781]: New machine qemu-27-instance-0000001b.
Nov 29 08:09:44 compute-0 systemd-machined[217781]: Machine qemu-27-instance-0000001b terminated.
Nov 29 08:09:45 compute-0 systemd-machined[217781]: New machine qemu-28-instance-0000001c.
Nov 29 08:10:22 compute-0 systemd-machined[217781]: Machine qemu-28-instance-0000001c terminated.
Nov 29 08:10:29 compute-0 systemd-machined[217781]: New machine qemu-29-instance-0000001d.
Nov 29 08:10:52 compute-0 systemd-machined[217781]: Machine qemu-29-instance-0000001d terminated.
Nov 29 08:11:14 compute-0 systemd-machined[217781]: New machine qemu-30-instance-0000001e.
Nov 29 08:11:54 compute-0 systemd-machined[217781]: Machine qemu-30-instance-0000001e terminated.

● systemd-modules-load.service - Load Kernel Modules
     Loaded: loaded (/usr/lib/systemd/system/systemd-modules-load.service; static)
     Active: active (exited) since Sat 2025-11-29 07:36:03 UTC; 41min ago
       Docs: man:systemd-modules-load.service(8)
             man:modules-load.d(5)
   Main PID: 239133 (code=exited, status=0/SUCCESS)
        CPU: 11ms

Nov 29 07:36:03 compute-0 systemd[1]: Starting Load Kernel Modules...
Nov 29 07:36:03 compute-0 systemd[1]: Finished Load Kernel Modules.

● systemd-network-generator.service - Generate network units from Kernel command line
     Loaded: loaded (/usr/lib/systemd/system/systemd-network-generator.service; enabled; preset: enabled)
     Active: active (exited) since Sat 2025-11-29 06:29:42 UTC; 1h 47min ago
       Docs: man:systemd-network-generator.service(8)
   Main PID: 682 (code=exited, status=0/SUCCESS)
        CPU: 16ms

Nov 29 06:29:42 localhost systemd[1]: Finished Generate network units from Kernel command line.

○ systemd-pcrmachine.service - TPM2 PCR Machine ID Measurement
     Loaded: loaded (/usr/lib/systemd/system/systemd-pcrmachine.service; static)
     Active: inactive (dead)
  Condition: start condition failed at Sat 2025-11-29 06:29:42 UTC; 1h 47min ago
       Docs: man:systemd-pcrmachine.service(8)

○ systemd-pcrphase-initrd.service - TPM2 PCR Barrier (initrd)
     Loaded: loaded (/usr/lib/systemd/system/systemd-pcrphase-initrd.service; static)
     Active: inactive (dead)
       Docs: man:systemd-pcrphase-initrd.service(8)

○ systemd-pcrphase-sysinit.service - TPM2 PCR Barrier (Initialization)
     Loaded: loaded (/usr/lib/systemd/system/systemd-pcrphase-sysinit.service; static)
     Active: inactive (dead)
  Condition: start condition failed at Sat 2025-11-29 06:29:44 UTC; 1h 47min ago
       Docs: man:systemd-pcrphase-sysinit.service(8)

Nov 29 06:29:44 localhost systemd[1]: TPM2 PCR Barrier (Initialization) was skipped because of an unmet condition check (ConditionPathExists=/sys/firmware/efi/efivars/StubPcrKernelImage-4a67b082-0a4c-41cf-b6c7-440b29bb8c4f).

○ systemd-pcrphase.service - TPM2 PCR Barrier (User)
     Loaded: loaded (/usr/lib/systemd/system/systemd-pcrphase.service; static)
     Active: inactive (dead)
  Condition: start condition failed at Sat 2025-11-29 06:29:45 UTC; 1h 47min ago
       Docs: man:systemd-pcrphase.service(8)

Nov 29 06:29:45 np0005539576.novalocal systemd[1]: TPM2 PCR Barrier (User) was skipped because of an unmet condition check (ConditionPathExists=/sys/firmware/efi/efivars/StubPcrKernelImage-4a67b082-0a4c-41cf-b6c7-440b29bb8c4f).

○ systemd-pstore.service - Platform Persistent Storage Archival
     Loaded: loaded (/usr/Unit systemd-timesyncd.service could not be found.
lib/systemd/system/systemd-pstore.service; enabled; preset: enabled)
     Active: inactive (dead)
  Condition: start condition failed at Sat 2025-11-29 06:29:42 UTC; 1h 47min ago
       Docs: man:systemd-pstore(8)

Nov 29 06:29:42 localhost systemd[1]: Platform Persistent Storage Archival was skipped because of an unmet condition check (ConditionDirectoryNotEmpty=/sys/fs/pstore).

● systemd-random-seed.service - Load/Save OS Random Seed
     Loaded: loaded (/usr/lib/systemd/system/systemd-random-seed.service; static)
     Active: active (exited) since Sat 2025-11-29 06:29:43 UTC; 1h 47min ago
       Docs: man:systemd-random-seed.service(8)
             man:random(4)
   Main PID: 691 (code=exited, status=0/SUCCESS)
        CPU: 10ms

Nov 29 06:29:42 localhost systemd[1]: Starting Load/Save OS Random Seed...
Nov 29 06:29:43 localhost systemd[1]: Finished Load/Save OS Random Seed.

● systemd-remount-fs.service - Remount Root and Kernel File Systems
     Loaded: loaded (/usr/lib/systemd/system/systemd-remount-fs.service; enabled-runtime; preset: disabled)
     Active: active (exited) since Sat 2025-11-29 06:29:42 UTC; 1h 47min ago
       Docs: man:systemd-remount-fs.service(8)
             https://www.freedesktop.org/wiki/Software/systemd/APIFileSystems
   Main PID: 683 (code=exited, status=0/SUCCESS)
        CPU: 21ms

Nov 29 06:29:42 localhost systemd[1]: Finished Remount Root and Kernel File Systems.

○ systemd-repart.service - Repartition Root Disk
     Loaded: loaded (/usr/lib/systemd/system/systemd-repart.service; static)
     Active: inactive (dead)
  Condition: start condition failed at Sat 2025-11-29 06:29:42 UTC; 1h 47min ago
       Docs: man:systemd-repart.service(8)

○ systemd-rfkill.service - Load/Save RF Kill Switch Status
     Loaded: loaded (/usr/lib/systemd/system/systemd-rfkill.service; static)
     Active: inactive (dead)
TriggeredBy: ● systemd-rfkill.socket
       Docs: man:systemd-rfkill.service(8)

● systemd-sysctl.service - Apply Kernel Variables
     Loaded: loaded (/usr/lib/systemd/system/systemd-sysctl.service; static)
     Active: active (exited) since Sat 2025-11-29 07:05:20 UTC; 1h 11min ago
       Docs: man:systemd-sysctl.service(8)
             man:sysctl.d(5)
   Main PID: 44936 (code=exited, status=0/SUCCESS)
        CPU: 31ms

Nov 29 07:05:20 compute-0 systemd[1]: Starting Apply Kernel Variables...
Nov 29 07:05:20 compute-0 systemd[1]: Finished Apply Kernel Variables.

○ systemd-sysext.service - Merge System Extension Images into /usr/ and /opt/
     Loaded: loaded (/usr/lib/systemd/system/systemd-sysext.service; disabled; preset: disabled)
     Active: inactive (dead)
       Docs: man:systemd-sysext.service(8)

● systemd-sysusers.service - Create System Users
     Loaded: loaded (/usr/lib/systemd/system/systemd-sysusers.service; static)
     Active: active (exited) since Sat 2025-11-29 06:29:43 UTC; 1h 47min ago
       Docs: man:sysusers.d(5)
             man:systemd-sysusers.service(8)
   Main PID: 692 (code=exited, status=0/SUCCESS)
        CPU: 24ms

Nov 29 06:29:42 localhost systemd[1]: Starting Create System Users...
Nov 29 06:29:43 localhost systemd[1]: Finished Create System Users.

○ systemd-tmpfiles-clean.service - Cleanup of Temporary Directories
     Loaded: loaded (/usr/lib/systemd/system/systemd-tmpfiles-clean.service; static)
     Active: inactive (dead) since Sat 2025-11-29 06:43:23 UTC; 1h 33min ago
TriggeredBy: ● systemd-tmpfiles-clean.timer
       Docs: man:tmpfiles.d(5)
             man:systemd-tmpfiles(8)
   Main PID: 29912 (code=exited, status=0/SUCCESS)
        CPU: 43ms

Nov 29 06:43:23 compute-0 systemd[1]: Starting Cleanup of Temporary Directories...
Nov 29 06:43:23 compute-0 systemd[1]: systemd-tmpfiles-clean.service: Deactivated successfully.
Nov 29 06:43:23 compute-0 systemd[1]: Finished Cleanup of Temporary Directories.

● systemd-tmpfiles-setup-dev.service - Create Static Device Nodes in /dev
     Loaded: loaded (/usr/lib/systemd/system/systemd-tmpfiles-setup-dev.service; static)
     Active: active (exited) since Sat 2025-11-29 06:29:43 UTUnit systemd-tmpfiles.service could not be found.
C; 1h 47min ago
       Docs: man:tmpfiles.d(5)
             man:systemd-tmpfiles(8)
   Main PID: 694 (code=exited, status=0/SUCCESS)
        CPU: 39ms

Nov 29 06:29:43 localhost systemd[1]: Starting Create Static Device Nodes in /dev...
Nov 29 06:29:43 localhost systemd[1]: Finished Create Static Device Nodes in /dev.

● systemd-tmpfiles-setup.service - Create Volatile Files and Directories
     Loaded: loaded (/usr/lib/systemd/system/systemd-tmpfiles-setup.service; static)
     Active: active (exited) since Sat 2025-11-29 06:29:43 UTC; 1h 47min ago
       Docs: man:tmpfiles.d(5)
             man:systemd-tmpfiles(8)
   Main PID: 699 (code=exited, status=0/SUCCESS)
        CPU: 110ms

Nov 29 06:29:43 localhost systemd[1]: Starting Create Volatile Files and Directories...
Nov 29 06:29:43 localhost systemd[1]: Finished Create Volatile Files and Directories.

○ systemd-udev-settle.service - Wait for udev To Complete Device Initialization
     Loaded: loaded (/usr/lib/systemd/system/systemd-udev-settle.service; static)
     Active: inactive (dead)
       Docs: man:systemd-udev-settle.service(8)

● systemd-udev-trigger.service - Coldplug All udev Devices
     Loaded: loaded (/usr/lib/systemd/system/systemd-udev-trigger.service; static)
    Drop-In: /usr/lib/systemd/system/systemd-udev-trigger.service.d
             └─systemd-udev-trigger-no-reload.conf
     Active: active (exited) since Sat 2025-11-29 06:29:42 UTC; 1h 47min ago
       Docs: man:udev(7)
             man:systemd-udevd.service(8)
   Main PID: 686 (code=exited, status=0/SUCCESS)
        CPU: 110ms

Nov 29 06:29:42 localhost systemd[1]: Finished Coldplug All udev Devices.

● systemd-udevd.service - Rule-based Manager for Device Events and Files
     Loaded: loaded (/usr/lib/systemd/system/systemd-udevd.service; static)
     Active: active (running) since Sat 2025-11-29 06:29:43 UTC; 1h 47min ago
TriggeredBy: ● systemd-udevd-control.socket
             ● systemd-udevd-kernel.socket
       Docs: man:systemd-udevd.service(8)
             man:udev(7)
   Main PID: 733 (systemd-udevd)
     Status: "Processing with 32 children at max"
         IO: 137.1M read, 48.8M written
      Tasks: 1
     Memory: 56.5M (peak: 94.8M)
        CPU: 12.647s
     CGroup: /system.slice/systemd-udevd.service
             └─udev
               └─733 /usr/lib/systemd/systemd-udevd

Nov 29 08:09:45 compute-0 systemd-udevd[301122]: Network interface NamePolicy= disabled on kernel command line.
Nov 29 08:10:29 compute-0 systemd-udevd[303025]: Network interface NamePolicy= disabled on kernel command line.
Nov 29 08:11:14 compute-0 systemd-udevd[303865]: Network interface NamePolicy= disabled on kernel command line.
Nov 29 08:11:14 compute-0 systemd-udevd[303867]: Network interface NamePolicy= disabled on kernel command line.
Nov 29 08:16:38 compute-0 lvm[311963]: PV /dev/loop3 online, VG ceph_vg0 is complete.
Nov 29 08:16:38 compute-0 lvm[311963]: VG ceph_vg0 finished
Nov 29 08:16:38 compute-0 lvm[311965]: PV /dev/loop5 online, VG ceph_vg2 is complete.
Nov 29 08:16:38 compute-0 lvm[311965]: VG ceph_vg2 finished
Nov 29 08:16:38 compute-0 lvm[311995]: PV /dev/loop4 online, VG ceph_vg1 is complete.
Nov 29 08:16:38 compute-0 lvm[311995]: VG ceph_vg1 finished

● systemd-update-done.service - Update is Completed
     Loaded: loaded (/usr/lib/systemd/system/systemd-update-done.service; static)
     Active: active (exited) since Sat 2025-11-29 06:29:43 UTC; 1h 47min ago
       Docs: man:systemd-update-done.service(8)
   Main PID: 734 (code=exited, status=0/SUCCESS)
        CPU: 12ms

Nov 29 06:29:43 localhost systemd[1]: Starting Update is Completed...
Nov 29 06:29:43 localhost systemd[1]: Finished Update is Completed.

○ systemd-update-utmp-runlevel.service - Record Runlevel Change in UTMP
     Loaded: loaded (/usr/lib/systemd/system/systemd-update-utmp-runlevel.service; static)
     Active: inactive (dead) since Sat 2025-11-29 06:29:53 UTC; 1h 47min ago
       Docs: man:systemd-update-utmp-runlevel.service(8)
             man:utmp(5)
   Main PID: 1029 (code=exited, status=0/Unit tlp.service could not be found.
SUCCESS)
        CPU: 9ms

Nov 29 06:29:53 np0005539576.novalocal systemd[1]: Starting Record Runlevel Change in UTMP...
Nov 29 06:29:53 np0005539576.novalocal systemd[1]: systemd-update-utmp-runlevel.service: Deactivated successfully.
Nov 29 06:29:53 np0005539576.novalocal systemd[1]: Finished Record Runlevel Change in UTMP.

● systemd-update-utmp.service - Record System Boot/Shutdown in UTMP
     Loaded: loaded (/usr/lib/systemd/system/systemd-update-utmp.service; static)
     Active: active (exited) since Sat 2025-11-29 06:29:43 UTC; 1h 47min ago
       Docs: man:systemd-update-utmp.service(8)
             man:utmp(5)
   Main PID: 731 (code=exited, status=0/SUCCESS)
        CPU: 7ms

Nov 29 06:29:43 localhost systemd[1]: Starting Record System Boot/Shutdown in UTMP...
Nov 29 06:29:43 localhost systemd[1]: Finished Record System Boot/Shutdown in UTMP.

● systemd-user-sessions.service - Permit User Sessions
     Loaded: loaded (/usr/lib/systemd/system/systemd-user-sessions.service; static)
     Active: active (exited) since Sat 2025-11-29 06:29:53 UTC; 1h 47min ago
       Docs: man:systemd-user-sessions.service(8)
   Main PID: 1009 (code=exited, status=0/SUCCESS)
        CPU: 8ms

Nov 29 06:29:53 np0005539576.novalocal systemd[1]: Starting Permit User Sessions...
Nov 29 06:29:53 np0005539576.novalocal systemd[1]: Finished Permit User Sessions.

○ systemd-vconsole-setup.service - Setup Virtual Console
     Loaded: loaded (/usr/lib/systemd/system/systemd-vconsole-setup.service; static)
     Active: inactive (dead) since Sat 2025-11-29 06:29:41 UTC; 1h 47min ago
   Duration: 1min 33.959s
       Docs: man:systemd-vconsole-setup.service(8)
             man:vconsole.conf(5)
   Main PID: 311 (code=exited, status=0/SUCCESS)
        CPU: 226ms

Nov 29 06:28:07 localhost systemd[1]: Finished Setup Virtual Console.
Nov 29 06:29:41 localhost systemd[1]: systemd-vconsole-setup.service: Deactivated successfully.
Nov 29 06:29:41 localhost systemd[1]: Stopped Setup Virtual Console.
Notice: journal has been rotated since unit was started, output may be incomplete.

● tuned.service - Dynamic System Tuning Daemon
     Loaded: loaded (/usr/lib/systemd/system/tuned.service; enabled; preset: enabled)
     Active: active (running) since Sat 2025-11-29 07:18:31 UTC; 58min ago
       Docs: man:tuned(8)
             man:tuned.conf(5)
             man:tuned-adm(8)
   Main PID: 113121 (tuned)
         IO: 0B read, 0B written
      Tasks: 4 (limit: 48573)
     Memory: 13.9M (peak: 16.0M)
        CPU: 1.583s
     CGroup: /system.slice/tuned.service
             └─113121 /usr/bin/python3 -Es /usr/sbin/tuned -l -P

Nov 29 07:18:31 compute-0 systemd[1]: Starting Dynamic System Tuning Daemon...
Nov 29 07:18:31 compute-0 systemd[1]: Started Dynamic System Tuning Daemon.

○ unbound-anchor.service - update of the root trust anchor for DNSSEC validation in unbound
     Loaded: loaded (/usr/lib/systemd/system/unbound-anchor.service; static)
     Active: inactive (dead)
TriggeredBy: ● unbound-anchor.timer
       Docs: man:unbound-anchor(8)

● user-runtime-dir@1000.service - User Runtime Directory /run/user/1000
     Loaded: loaded (/usr/lib/systemd/system/user-runtime-dir@.service; static)
     Active: active (exited) since Sat 2025-11-29 06:30:09 UTC; 1h 47min ago
       Docs: man:user@.service(5)
   Main PID: 4300 (code=exited, status=0/SUCCESS)
        CPU: 26ms

Nov 29 06:30:09 np0005539576.novalocal systemd[1]: Starting User Runtime Directory /run/user/1000...
Nov 29 06:30:09 np0005539576.novalocal systemd[1]: Finished User Runtime Directory /run/user/1000.

● user-runtime-dir@42477.service - User Runtime Directory /run/user/42477
     Loaded: loaded (/usr/lib/systemd/system/user-runtime-dir@.service; static)
     Active: active (exited) since Sat 2025-11-29 07:12:12 UTC; 1h 5min ago
       Docs: man:user@.service(5)
   Main PID: 76616 (code=exited, status=0/SUCCESS)
        CPU: 28ms

Nov 29 07:12:11 compute-0 systemd[1]: Starting User Runtime Directory /run/user/42477...
Nov 29 07:12:12 compute-0 systemd[1]: Finished User Runtime Directory /run/user/42477.

● user@1000.service - User Manager for UID 1000
     Loaded: loaded (/usr/lib/systemd/system/user@.service; static)
    Drop-In: /usr/lib/systemd/system/user@.service.d
             └─10-login-barrier.conf
     Active: active (running) since Sat 2025-11-29 06:30:09 UTC; 1h 47min ago
       Docs: man:user@.service(5)
   Main PID: 4301 (systemd)
     Status: "Ready."
         IO: 676.0K read, 8.0K written
      Tasks: 5
     Memory: 9.4M (peak: 15.0M)
        CPU: 5.231s
     CGroup: /user.slice/user-1000.slice/user@1000.service
             ├─app.slice
             │ └─dbus-broker.service
             │   ├─14284 /usr/bin/dbus-broker-launch --scope user
             │   └─14294 dbus-broker --log 4 --controller 9 --machine-id 1f988c78c563e12389ab342aced42dbb --max-bytes 100000000000000 --max-fds 25000000000000 --max-matches 5000000000
             ├─init.scope
             │ ├─4301 /usr/lib/systemd/systemd --user
             │ └─4303 "(sd-pam)"
             └─user.slice
               └─podman-pause-aee1f4af.scope
                 └─14221 catatonit -P

Nov 29 06:42:23 np0005539576.novalocal dbus-broker-launch[14284]: Policy to allow eavesdropping in /usr/share/dbus-1/session.conf +31: Eavesdropping is deprecated and ignored
Nov 29 06:42:23 np0005539576.novalocal dbus-broker-launch[14284]: Policy to allow eavesdropping in /usr/share/dbus-1/session.conf +33: Eavesdropping is deprecated and ignored
Nov 29 06:42:23 np0005539576.novalocal systemd[4301]: Started D-Bus User Message Bus.
Nov 29 06:42:23 np0005539576.novalocal dbus-broker-lau[14284]: Ready
Nov 29 06:42:23 np0005539576.novalocal systemd[4301]: selinux: avc:  op=load_policy lsm=selinux seqno=6 res=1
Nov 29 06:42:23 np0005539576.novalocal systemd[4301]: Created slice Slice /user.
Nov 29 06:42:23 np0005539576.novalocal systemd[4301]: podman-14211.scope: unit configures an IP firewall, but not running as root.
Nov 29 06:42:23 np0005539576.novalocal systemd[4301]: (This warning is only shown for the first unit using IP firewalling.)
Nov 29 06:42:23 np0005539576.novalocal systemd[4301]: Started podman-14211.scope.
Nov 29 06:42:23 np0005539576.novalocal systemd[4301]: Started podman-pause-aee1f4af.scope.

● user@42477.service - User Manager for UID 42477
     Loaded: loaded (/usr/lib/systemd/system/user@.service; static)
    Drop-In: /usr/lib/systemd/system/user@.service.d
             └─10-login-barrier.conf
     Active: active (running) since Sat 2025-11-29 07:12:12 UTC; 1h 5min ago
       Docs: man:user@.service(5)
   Main PID: 76617 (systemd)
     Status: "Ready."
         IO: 0B read, 0B written
      Tasks: 2
     Memory: 8.9M (peak: 10.7M)
        CPU: 3.920s
     CGroup: /user.slice/user-42477.slice/user@42477.service
             └─init.scope
               ├─76617 /usr/lib/systemd/systemd --user
               └─76619 "(sd-pam)"

Nov 29 07:12:12 compute-0 systemd[76617]: Finished Create User's Volatile Files and Directories.
Nov 29 07:12:12 compute-0 systemd[76617]: Reached target Basic System.
Nov 29 07:12:12 compute-0 systemd[76617]: Reached target Main User Target.
Nov 29 07:12:12 compute-0 systemd[76617]: Startup finished in 169ms.
Nov 29 07:12:12 compute-0 systemd[1]: Started User Manager for UID 42477.
Nov 29 07:14:12 compute-0 systemd[76617]: Starting Mark boot as successful...
Nov 29 07:14:12 compute-0 systemd[76617]: Finished Mark boot as successful.
Nov 29 07:17:15 compute-0 systemd[76617]: Created slice User Background Tasks Slice.
Nov 29 07:17:15 compute-0 systemd[76617]: Starting Cleanup of User's Temporary Files and Directories...
Nov 29 07:17:15 compute-0 systemd[76617]: Finished Cleanup of User's Temporary Files and Directories.

○ virtinterfaced.service - libvirt interface daemon
     Loaded: loaded (/usr/lib/systemd/system/virtinterfaced.service; disabled; preset: disabled)
     Active: inactive (dead)
TriggeredBy: ○ virtinterfaced-ro.socket
             ○ virtinterfaced-admin.socket
             ○ virtinterfaced.socket
       Docs: man:virtinterfaced(8)
             https://libvirt.org/

○ virtlockd.service - libvirt locking daemon
     Loaded: loaded (/usr/lib/systemd/system/virtlockd.service; disabled; preset: disabled)
     Active: inactive (dead)
TriggeredBy: ● virtlockd.socket
             ○ virtlockd-admin.socket
       Docs: man:virtlockd(8)
             https://libvirt.org/

● virtlogd.service - libvirt logging daemon
     Loaded: loaded (/usr/lib/systemd/system/virtlogd.service; enabled; preset: disabled)
     Active: active (running) since Sat 2025-11-29 07:31:57 UTC; 45min ago
TriggeredBy: ● virtlogd-admin.socket
             ● virtlogd.socket
       Docs: man:virtlogd(8)
             https://libvirt.org/
   Main PID: 217150 (virtlogd)
         IO: 956.0K read, 2.6M written
      Tasks: 1 (limit: 48573)
     Memory: 3.9M (peak: 4.6M)
        CPU: 45.755s
     CGroup: /system.slice/virtlogd.service
             └─217150 /usr/sbin/virtlogd

Nov 29 07:31:57 compute-0 systemd[1]: Starting libvirt logging daemon...
Nov 29 07:31:57 compute-0 systemd[1]: Started libvirt logging daemon.

○ virtnetworkd.service - libvirt network daemon
     Loaded: loaded (/usr/lib/systemd/system/virtnetworkd.service; disabled; preset: disabled)
     Active: inactive (dead)
TriggeredBy: ○ virtnetworkd-ro.socket
             ○ virtnetworkd-admin.socket
             ○ virtnetworkd.socket
       Docs: man:virtnetworkd(8)
             https://libvirt.org/

● virtnodedevd.service - libvirt nodedev daemon
     Loaded: loaded (/usr/lib/systemd/system/virtnodedevd.service; enabled; preset: disabled)
     Active: active (running) since Sat 2025-11-29 07:38:24 UTC; 38min ago
TriggeredBy: ● virtnodedevd-ro.socket
             ● virtnodedevd-admin.socket
             ● virtnodedevd.socket
       Docs: man:virtnodedevd(8)
             https://libvirt.org/
   Main PID: 256467 (virtnodedevd)
         IO: 4.4M read, 0B written
      Tasks: 20 (limit: 48573)
     Memory: 10.1M (peak: 11.7M)
        CPU: 4.199s
     CGroup: /system.slice/virtnodedevd.service
             └─256467 /usr/sbin/virtnodedevd --timeout 120

Nov 29 07:38:24 compute-0 systemd[1]: Starting libvirt nodedev daemon...
Nov 29 07:38:24 compute-0 systemd[1]: Started libvirt nodedev daemon.

○ virtnwfilterd.service - libvirt nwfilter daemon
     Loaded: loaded (/usr/lib/systemd/system/virtnwfilterd.service; disabled; preset: disabled)
     Active: inactive (dead)
TriggeredBy: ○ virtnwfilterd-admin.socket
             ○ virtnwfilterd-ro.socket
             ○ virtnwfilterd.socket
       Docs: man:virtnwfilterd(8)
             https://libvirt.org/

○ virtproxyd.service - libvirt proxy daemon
     Loaded: loaded (/usr/lib/systemd/system/virtproxyd.service; enabled; preset: disabled)
     Active: inactive (dead) since Sat 2025-11-29 07:34:00 UTC; 43min ago
   Duration: 2min 5ms
TriggeredBy: ● virtproxyd.socket
             ● virtproxyd-ro.socket
             ● virtproxyd-tls.socket
             ● virtproxyd-admin.socket
       Docs: man:virtproxyd(8)
             https://libvirt.org/
   Main PID: 217570 (code=exited, status=0/SUCCESS)
        CPU: 70ms

Nov 29 07:32:00 compute-0 systemd[1]: Starting libvirt proxy daemon...
Nov 29 07:32:00 compute-0 systemd[1]: Started libvirt proxy daemon.
Nov 29 07:34:00 compute-0 systemd[1]: virtproxyd.service: Deactivated successfully.

● virtqemud.service - libvirt QEMU daemon
     Loaded: loaded (/usr/lib/systemd/system/virtqemud.service; enabled; preset: enabled)
     Active: active (running) since Sat 2025-11-29 07:38:22 UTC; 38min ago
TriggeredBy: ● virtqemud-ro.socket
             ● virtqemud.socket
             ● virtqemud-admin.socket
       Docs: man:virtqemud(8)
             https://libvirt.org/
   Main PID: 256259 (virtqemud)
         IO: 45.9M read, 1.3M written
      Tasks: 19 (limit: 32768)
     Memory: 68.3M (peak: 86.8M)
        CPU: 10.877s
     CGroup: /system.slice/virtqemud.service
             └─256259 /usr/sbin/virtqemud --timeout 120

Nov 29 07:38:22 compute-0 systemd[1]: Starting libvirt QEMU daemonUnit ypbind.service could not be found.
Unit yppasswdd.service could not be found.
Unit ypserv.service could not be found.
Unit ypxfrd.service could not be found.
...
Nov 29 07:38:22 compute-0 systemd[1]: Started libvirt QEMU daemon.
Nov 29 07:38:31 compute-0 virtqemud[256259]: libvirt version: 11.9.0, package: 1.el9 (builder@centos.org, 2025-11-04-09:54:50, )
Nov 29 07:38:31 compute-0 virtqemud[256259]: hostname: compute-0
Nov 29 07:38:31 compute-0 virtqemud[256259]: End of file while reading data: Input/output error
Nov 29 08:03:00 compute-0 virtqemud[256259]: End of file while reading data: Input/output error
Nov 29 08:16:37 compute-0 virtqemud[256259]: Failed to connect socket to '/var/run/libvirt/virtnetworkd-sock-ro': No such file or directory
Nov 29 08:16:37 compute-0 virtqemud[256259]: Failed to connect socket to '/var/run/libvirt/virtnwfilterd-sock-ro': No such file or directory
Nov 29 08:16:37 compute-0 virtqemud[256259]: Failed to connect socket to '/var/run/libvirt/virtstoraged-sock-ro': No such file or directory
Nov 29 08:17:16 compute-0 virtqemud[256259]: Failed to connect socket to '/var/run/libvirt/virtstoraged-sock-ro': No such file or directory

● virtsecretd.service - libvirt secret daemon
     Loaded: loaded (/usr/lib/systemd/system/virtsecretd.service; enabled; preset: disabled)
     Active: active (running) since Sat 2025-11-29 07:46:59 UTC; 30min ago
TriggeredBy: ● virtsecretd-ro.socket
             ● virtsecretd-admin.socket
             ● virtsecretd.socket
       Docs: man:virtsecretd(8)
             https://libvirt.org/
   Main PID: 266034 (virtsecretd)
         IO: 804.0K read, 170.5K written
      Tasks: 18 (limit: 48573)
     Memory: 4.8M (peak: 5.7M)
        CPU: 555ms
     CGroup: /system.slice/virtsecretd.service
             └─266034 /usr/sbin/virtsecretd --timeout 120

Nov 29 07:46:59 compute-0 systemd[1]: Starting libvirt secret daemon...
Nov 29 07:46:59 compute-0 systemd[1]: Started libvirt secret daemon.

○ virtstoraged.service - libvirt storage daemon
     Loaded: loaded (/usr/lib/systemd/system/virtstoraged.service; disabled; preset: disabled)
     Active: inactive (dead)
TriggeredBy: ○ virtstoraged-admin.socket
             ○ virtstoraged-ro.socket
             ○ virtstoraged.socket
       Docs: man:virtstoraged(8)
             https://libvirt.org/

● -.slice - Root Slice
     Loaded: loaded
     Active: active since Sat 2025-11-29 06:28:07 UTC; 1h 49min ago
      Until: Sat 2025-11-29 06:28:07 UTC; 1h 49min ago
       Docs: man:systemd.special(7)
      Tasks: 1412
     Memory: 3.5G
        CPU: 59min 47.750s
     CGroup: /
             ├─318245 turbostat --debug sleep 10
             ├─318248 sleep 10
             ├─init.scope
             │ └─1 /usr/lib/systemd/systemd --switched-root --system --deserialize 31
             ├─machine.slice
             │ ├─libpod-23d05e03be4cb9084b6afbc3edf0d56047ca0f3f1aaf62172739e6d6ce3f7fa8.scope
             │ │ └─container
             │ │   ├─153385 dumb-init --single-child -- kolla_start
             │ │   └─153388 /usr/bin/ovn-controller --pidfile unix:/run/openvswitch/db.sock -p /etc/pki/tls/private/ovndb.key -c /etc/pki/tls/certs/ovndb.crt -C /etc/pki/tls/certs/ovndbca.crt
             │ ├─libpod-53b3e3edbc13651958250c6312d45e987c0dee5fbb3effb8392660dfc1eaa82f.scope
             │ │ └─container
             │ │   ├─238043 dumb-init --single-child -- kolla_start
             │ │   └─238046 /usr/sbin/multipathd -d
             │ ├─libpod-8d4ccc3041d68cb3d47809db0bb1a7e168a735eaa6605edbf2567447905aa50e.scope
             │ │ └─container
             │ │   ├─163635 dumb-init --single-child -- kolla_start
             │ │   ├─163655 "neutron-ovn-metadata-agent (/usr/bin/python3 /usr/bin/neutron-ovn-metadata-agent)"
             │ │   ├─164115 "neutron-ovn-metadata-agent (/usr/bin/python3 /usr/bin/neutron-ovn-metadata-agent)"
             │ │   ├─164178 /usr/bin/python3 /bin/privsep-helper --config-file /etc/neutron/neutron.conf --config-dir /etc/neutron.conf.d --privsep_context neutron.privileged.namespace_cmd --privsep_sock_path /tmp/tmpthalsdm8/privsep.sock
             │ │   ├─266092 /usr/bin/python3 /bin/privsep-helper --config-file /etc/neutron/neutron.conf --config-dir /etc/neutron.conf.d --privsep_context neutron.privileged.default --privsep_sock_path /tmp/tmpv5pj6f28/privsep.sock
             │ │   └─266358 /usr/bin/python3 /bin/privsep-helper --config-file /etc/neutron/neutron.conf --config-dir /etc/neutron.conf.d --privsep_context neutron.privileged.link_cmd --privsep_sock_path /tmp/tmp_ylguf3k/privsep.sock
             │ └─libpod-c15969223c00c2b30a6778fcaf267330fb4765e47f851bc1febe18461e097809.scope
             │   └─container
             │     ├─256731 dumb-init --single-child -- kolla_start
             │     ├─256736 /usr/bin/python3 /usr/bin/nova-compute
             │     ├─265746 /usr/bin/python3 /bin/privsep-helper --config-file /etc/nova/nova.conf --config-file /etc/nova/nova-compute.conf --config-dir /etc/nova/nova.conf.d --privsep_context vif_plug_ovs.privsep.vif_plug --privsep_sock_path /tmp/tmp4yqe7f_k/privsep.sock
             │     ├─266745 /usr/bin/python3 /bin/privsep-helper --config-file /etc/nova/nova.conf --config-file /etc/nova/nova-compute.conf --config-dir /etc/nova/nova.conf.d --privsep_context os_brick.privileged.default --privsep_sock_path /tmp/tmp2b17b584/privsep.sock
             │     └─266829 /usr/bin/python3 /bin/privsep-helper --config-file /etc/nova/nova.conf --config-file /etc/nova/nova-compute.conf --config-dir /etc/nova/nova.conf.d --privsep_context nova.privsep.sys_admin_pctxt --privsep_sock_path /tmp/tmparx_pv8r/privsep.sock
             ├─system.slice
             │ ├─NetworkManager.service
             │ │ └─48962 /usr/sbin/NetworkManager --no-daemon
             │ ├─auditd.service
             │ │ ├─704 /sbin/auditd
             │ │ └─706 /usr/sbin/sedispatch
             │ ├─chronyd.service
             │ │ └─58530 /usr/sbin/chronyd -F 2
             │ ├─crond.service
             │ │ └─1011 /usr/sbin/crond -n
             │ ├─dbus-broker.service
             │ │ ├─774 /usr/bin/dbus-broker-launch --scope system --audit
             │ │ └─781 dbus-broker --log 4 --controller 9 --machine-id 1f988c78c563e12389ab342aced42dbb --max-bytes 536870912 --max-fds 4096 --max-matches 131072 --audit
             │ ├─edpm_multipathd.service
             │ │ └─238041 /usr/bin/conmon --api-version 1 -c 53b3e3edbc13651958250c6312d45e987c0dee5fbb3effb8392660dfc1eaa82f -u 53b3e3edbc13651958250c6312d45e987c0dee5fbb3effb8392660dfc1eaa82f -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/53b3e3edbc13651958250c6312d45e987c0dee5fbb3effb8392660dfc1eaa82f/userdata -p /run/containers/storage/overlay-containers/53b3e3edbc13651958250c6312d45e987c0dee5fbb3effb8392660dfc1eaa82f/userdata/pidfile -n multipathd --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/53b3e3edbc13651958250c6312d45e987c0dee5fbb3effb8392660dfc1eaa82f --full-attach -s -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/53b3e3edbc13651958250c6312d45e987c0dee5fbb3effb8392660dfc1eaa82f/userdata/oci-log --conmon-pidfile /run/multipathd.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg 53b3e3edbc13651958250c6312d45e987c0dee5fbb3effb8392660dfc1eaa82f
             │ ├─edpm_nova_compute.service
             │ │ └─256729 /usr/bin/conmon --api-version 1 -c c15969223c00c2b30a6778fcaf267330fb4765e47f851bc1febe18461e097809 -u c15969223c00c2b30a6778fcaf267330fb4765e47f851bc1febe18461e097809 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/c15969223c00c2b30a6778fcaf267330fb4765e47f851bc1febe18461e097809/userdata -p /run/containers/storage/overlay-containers/c15969223c00c2b30a6778fcaf267330fb4765e47f851bc1febe18461e097809/userdata/pidfile -n nova_compute --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/c15969223c00c2b30a6778fcaf267330fb4765e47f851bc1febe18461e097809 --full-attach -s -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/c15969223c00c2b30a6778fcaf267330fb4765e47f851bc1febe18461e097809/userdata/oci-log --conmon-pidfile /run/nova_compute.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg c15969223c00c2b30a6778fcaf267330fb4765e47f851bc1febe18461e097809
             │ ├─edpm_ovn_controller.service
             │ │ └─153383 /usr/bin/conmon --api-version 1 -c 23d05e03be4cb9084b6afbc3edf0d56047ca0f3f1aaf62172739e6d6ce3f7fa8 -u 23d05e03be4cb9084b6afbc3edf0d56047ca0f3f1aaf62172739e6d6ce3f7fa8 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/23d05e03be4cb9084b6afbc3edf0d56047ca0f3f1aaf62172739e6d6ce3f7fa8/userdata -p /run/containers/storage/overlay-containers/23d05e03be4cb9084b6afbc3edf0d56047ca0f3f1aaf62172739e6d6ce3f7fa8/userdata/pidfile -n ovn_controller --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/23d05e03be4cb9084b6afbc3edf0d56047ca0f3f1aaf62172739e6d6ce3f7fa8 --full-attach -s -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/23d05e03be4cb9084b6afbc3edf0d56047ca0f3f1aaf62172739e6d6ce3f7fa8/userdata/oci-log --conmon-pidfile /run/ovn_controller.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg 23d05e03be4cb9084b6afbc3edf0d56047ca0f3f1aaf62172739e6d6ce3f7fa8
             │ ├─edpm_ovn_metadata_agent.service
             │ │ └─163632 /usr/bin/conmon --api-version 1 -c 8d4ccc3041d68cb3d47809db0bb1a7e168a735eaa6605edbf2567447905aa50e -u 8d4ccc3041d68cb3d47809db0bb1a7e168a735eaa6605edbf2567447905aa50e -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/8d4ccc3041d68cb3d47809db0bb1a7e168a735eaa6605edbf2567447905aa50e/userdata -p /run/containers/storage/overlay-containers/8d4ccc3041d68cb3d47809db0bb1a7e168a735eaa6605edbf2567447905aa50e/userdata/pidfile -n ovn_metadata_agent --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/8d4ccc3041d68cb3d47809db0bb1a7e168a735eaa6605edbf2567447905aa50e --full-attach -s -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/8d4ccc3041d68cb3d47809db0bb1a7e168a735eaa6605edbf2567447905aa50e/userdata/oci-log --conmon-pidfile /run/ovn_metadata_agent.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg 8d4ccc3041d68cb3d47809db0bb1a7e168a735eaa6605edbf2567447905aa50e
             │ ├─gssproxy.service
             │ │ └─875 /usr/sbin/gssproxy -D
             │ ├─irqbalance.service
             │ │ └─804 /usr/sbin/irqbalance
             │ ├─iscsid.service
             │ │ └─240752 /usr/sbin/iscsid -f
             │ ├─ovs-vswitchd.service
             │ │ └─47265 ovs-vswitchd unix:/var/run/openvswitch/db.sock -vconsole:emer -vsyslog:err -vfile:info --mlockall --user openvswitch:hugetlbfs --no-chdir --log-file=/var/log/openvswitch/ovs-vswitchd.log --pidfile=/var/run/openvswitch/ovs-vswitchd.pid --detach
             │ ├─ovsdb-server.service
             │ │ └─47183 ovsdb-server /etc/openvswitch/conf.db -vconsole:emer -vsyslog:err -vfile:info --remote=punix:/var/run/openvswitch/db.sock --private-key=db:Open_vSwitch,SSL,private_key --certificate=db:Open_vSwitch,SSL,certificate --bootstrap-ca-cert=db:Open_vSwitch,SSL,ca_cert --user openvswitch:hugetlbfs --no-chdir --log-file=/var/log/openvswitch/ovsdb-server.log --pidfile=/var/run/openvswitch/ovsdb-server.pid --detach
             │ ├─polkit.service
             │ │ └─43449 /usr/lib/polkit-1/polkitd --no-debug
             │ ├─rpcbind.service
             │ │ └─702 /usr/bin/rpcbind -w -f
             │ ├─rsyslog.service
             │ │ └─1007 /usr/sbin/rsyslogd -n
             │ ├─sshd.service
             │ │ └─191978 "sshd: /usr/sbin/sshd -D [listener] 0 of 10-100 startups"
             │ ├─system-ceph\x2d14ff1f30\x2d5059\x2d58f1\x2d9a23\x2d69871bb275a1.slice
             │ │ ├─ceph-14ff1f30-5059-58f1-9a23-69871bb275a1@crash.compute-0.service
             │ │ │ ├─libpod-payload-7eb39cf0035c22ff4e83a7371bb415c7d467398eea843a964591e85500be2230
             │ │ │ │ ├─82993 /run/podman-init -- /usr/bin/ceph-crash -n client.crash.compute-0
             │ │ │ │ └─82995 /usr/bin/python3 -s /usr/bin/ceph-crash -n client.crash.compute-0
             │ │ │ └─runtime
             │ │ │   └─82991 /usr/bin/conmon --api-version 1 -c 7eb39cf0035c22ff4e83a7371bb415c7d467398eea843a964591e85500be2230 -u 7eb39cf0035c22ff4e83a7371bb415c7d467398eea843a964591e85500be2230 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/7eb39cf0035c22ff4e83a7371bb415c7d467398eea843a964591e85500be2230/userdata -p /run/containers/storage/overlay-containers/7eb39cf0035c22ff4e83a7371bb415c7d467398eea843a964591e85500be2230/userdata/pidfile -n ceph-14ff1f30-5059-58f1-9a23-69871bb275a1-crash-compute-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/7eb39cf0035c22ff4e83a7371bb415c7d467398eea843a964591e85500be2230 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/7eb39cf0035c22ff4e83a7371bb415c7d467398eea843a964591e85500be2230/userdata/oci-log --conmon-pidfile /run/ceph-14ff1f30-5059-58f1-9a23-69871bb275a1@crash.compute-0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 7eb39cf0035c22ff4e83a7371bb415c7d467398eea843a964591e85500be2230
             │ │ ├─ceph-14ff1f30-5059-58f1-9a23-69871bb275a1@mds.cephfs.compute-0.bdhrqf.service
             │ │ │ ├─libpod-payload-f32e02240e8a83659f9e6ca6aa769bfab0c78f2322374b714daa8a8d1689a511
             │ │ │ │ ├─102314 /run/podman-init -- /usr/bin/ceph-mds -n mds.cephfs.compute-0.bdhrqf -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ │ │ └─102316 /usr/bin/ceph-mds -n mds.cephfs.compute-0.bdhrqf -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ │ └─runtime
             │ │ │   └─102312 /usr/bin/conmon --api-version 1 -c f32e02240e8a83659f9e6ca6aa769bfab0c78f2322374b714daa8a8d1689a511 -u f32e02240e8a83659f9e6ca6aa769bfab0c78f2322374b714daa8a8d1689a511 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/f32e02240e8a83659f9e6ca6aa769bfab0c78f2322374b714daa8a8d1689a511/userdata -p /run/containers/storage/overlay-containers/f32e02240e8a83659f9e6ca6aa769bfab0c78f2322374b714daa8a8d1689a511/userdata/pidfile -n ceph-14ff1f30-5059-58f1-9a23-69871bb275a1-mds-cephfs-compute-0-bdhrqf --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/f32e02240e8a83659f9e6ca6aa769bfab0c78f2322374b714daa8a8d1689a511 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/f32e02240e8a83659f9e6ca6aa769bfab0c78f2322374b714daa8a8d1689a511/userdata/oci-log --conmon-pidfile /run/ceph-14ff1f30-5059-58f1-9a23-69871bb275a1@mds.cephfs.compute-0.bdhrqf.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg f32e02240e8a83659f9e6ca6aa769bfab0c78f2322374b714daa8a8d1689a511
             │ │ ├─ceph-14ff1f30-5059-58f1-9a23-69871bb275a1@mgr.compute-0.kzdpag.service
             │ │ │ ├─libpod-payload-cf5b754473e023a6c38808405cba62d535fd2d4dd4ad3580b8df218d673d96ea
             │ │ │ │ ├─75343 /run/podman-init -- /usr/bin/ceph-mgr -n mgr.compute-0.kzdpag -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ │ │ └─75345 /usr/bin/ceph-mgr -n mgr.compute-0.kzdpag -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ │ └─runtime
             │ │ │   └─75341 /usr/bin/conmon --api-version 1 -c cf5b754473e023a6c38808405cba62d535fd2d4dd4ad3580b8df218d673d96ea -u cf5b754473e023a6c38808405cba62d535fd2d4dd4ad3580b8df218d673d96ea -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/cf5b754473e023a6c38808405cba62d535fd2d4dd4ad3580b8df218d673d96ea/userdata -p /run/containers/storage/overlay-containers/cf5b754473e023a6c38808405cba62d535fd2d4dd4ad3580b8df218d673d96ea/userdata/pidfile -n ceph-14ff1f30-5059-58f1-9a23-69871bb275a1-mgr-compute-0-kzdpag --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/cf5b754473e023a6c38808405cba62d535fd2d4dd4ad3580b8df218d673d96ea --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/cf5b754473e023a6c38808405cba62d535fd2d4dd4ad3580b8df218d673d96ea/userdata/oci-log --conmon-pidfile /run/ceph-14ff1f30-5059-58f1-9a23-69871bb275a1@mgr.compute-0.kzdpag.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg cf5b754473e023a6c38808405cba62d535fd2d4dd4ad3580b8df218d673d96ea
             │ │ ├─ceph-14ff1f30-5059-58f1-9a23-69871bb275a1@mon.compute-0.service
             │ │ │ ├─libpod-payload-21a56ae912cb8d8d1f0dc09cd0d64941e849dd5a597340fef403575f5f6dca90
             │ │ │ │ ├─75048 /run/podman-init -- /usr/bin/ceph-mon -n mon.compute-0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false
             │ │ │ │ └─75050 /usr/bin/ceph-mon -n mon.compute-0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false
             │ │ │ └─runtime
             │ │ │   └─75046 /usr/bin/conmon --api-version 1 -c 21a56ae912cb8d8d1f0dc09cd0d64941e849dd5a597340fef403575f5f6dca90 -u 21a56ae912cb8d8d1f0dc09cd0d64941e849dd5a597340fef403575f5f6dca90 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/21a56ae912cb8d8d1f0dc09cd0d64941e849dd5a597340fef403575f5f6dca90/userdata -p /run/containers/storage/overlay-containers/21a56ae912cb8d8d1f0dc09cd0d64941e849dd5a597340fef403575f5f6dca90/userdata/pidfile -n ceph-14ff1f30-5059-58f1-9a23-69871bb275a1-mon-compute-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/21a56ae912cb8d8d1f0dc09cd0d64941e849dd5a597340fef403575f5f6dca90 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/21a56ae912cb8d8d1f0dc09cd0d64941e849dd5a597340fef403575f5f6dca90/userdata/oci-log --conmon-pidfile /run/ceph-14ff1f30-5059-58f1-9a23-69871bb275a1@mon.compute-0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 21a56ae912cb8d8d1f0dc09cd0d64941e849dd5a597340fef403575f5f6dca90
             │ │ ├─ceph-14ff1f30-5059-58f1-9a23-69871bb275a1@osd.0.service
             │ │ │ ├─libpod-payload-9e203bb2012357f685d3a92116fa2f82a2b0bf3d53f620d86c98c827de1eec96
             │ │ │ │ ├─88829 /run/podman-init -- /usr/bin/ceph-osd -n osd.0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ │ │ └─88831 /usr/bin/ceph-osd -n osd.0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ │ └─runtime
             │ │ │   └─88827 /usr/bin/conmon --api-version 1 -c 9e203bb2012357f685d3a92116fa2f82a2b0bf3d53f620d86c98c827de1eec96 -u 9e203bb2012357f685d3a92116fa2f82a2b0bf3d53f620d86c98c827de1eec96 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/9e203bb2012357f685d3a92116fa2f82a2b0bf3d53f620d86c98c827de1eec96/userdata -p /run/containers/storage/overlay-containers/9e203bb2012357f685d3a92116fa2f82a2b0bf3d53f620d86c98c827de1eec96/userdata/pidfile -n ceph-14ff1f30-5059-58f1-9a23-69871bb275a1-osd-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/9e203bb2012357f685d3a92116fa2f82a2b0bf3d53f620d86c98c827de1eec96 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/9e203bb2012357f685d3a92116fa2f82a2b0bf3d53f620d86c98c827de1eec96/userdata/oci-log --conmon-pidfile /run/ceph-14ff1f30-5059-58f1-9a23-69871bb275a1@osd.0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 9e203bb2012357f685d3a92116fa2f82a2b0bf3d53f620d86c98c827de1eec96
             │ │ ├─ceph-14ff1f30-5059-58f1-9a23-69871bb275a1@osd.1.service
             │ │ │ ├─libpod-payload-6a5fc11573d1a39f1563ae47c276c7e603d4b25acd108fa32dcccaad74ad1d11
             │ │ │ │ ├─89838 /run/podman-init -- /usr/bin/ceph-osd -n osd.1 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ │ │ └─89840 /usr/bin/ceph-osd -n osd.1 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ │ └─runtime
             │ │ │   └─89836 /usr/bin/conmon --api-version 1 -c 6a5fc11573d1a39f1563ae47c276c7e603d4b25acd108fa32dcccaad74ad1d11 -u 6a5fc11573d1a39f1563ae47c276c7e603d4b25acd108fa32dcccaad74ad1d11 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/6a5fc11573d1a39f1563ae47c276c7e603d4b25acd108fa32dcccaad74ad1d11/userdata -p /run/containers/storage/overlay-containers/6a5fc11573d1a39f1563ae47c276c7e603d4b25acd108fa32dcccaad74ad1d11/userdata/pidfile -n ceph-14ff1f30-5059-58f1-9a23-69871bb275a1-osd-1 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/6a5fc11573d1a39f1563ae47c276c7e603d4b25acd108fa32dcccaad74ad1d11 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/6a5fc11573d1a39f1563ae47c276c7e603d4b25acd108fa32dcccaad74ad1d11/userdata/oci-log --conmon-pidfile /run/ceph-14ff1f30-5059-58f1-9a23-69871bb275a1@osd.1.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 6a5fc11573d1a39f1563ae47c276c7e603d4b25acd108fa32dcccaad74ad1d11
             │ │ ├─ceph-14ff1f30-5059-58f1-9a23-69871bb275a1@osd.2.service
             │ │ │ ├─libpod-payload-2e6c1ee4769aea5e614a2b0f0b65dd1997bbdea3f16ea1e0ab78d05fbef53f23
             │ │ │ │ ├─91078 /run/podman-init -- /usr/bin/ceph-osd -n osd.2 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ │ │ └─91083 /usr/bin/ceph-osd -n osd.2 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ │ └─runtime
             │ │ │   └─91076 /usr/bin/conmon --api-version 1 -c 2e6c1ee4769aea5e614a2b0f0b65dd1997bbdea3f16ea1e0ab78d05fbef53f23 -u 2e6c1ee4769aea5e614a2b0f0b65dd1997bbdea3f16ea1e0ab78d05fbef53f23 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/2e6c1ee4769aea5e614a2b0f0b65dd1997bbdea3f16ea1e0ab78d05fbef53f23/userdata -p /run/containers/storage/overlay-containers/2e6c1ee4769aea5e614a2b0f0b65dd1997bbdea3f16ea1e0ab78d05fbef53f23/userdata/pidfile -n ceph-14ff1f30-5059-58f1-9a23-69871bb275a1-osd-2 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/2e6c1ee4769aea5e614a2b0f0b65dd1997bbdea3f16ea1e0ab78d05fbef53f23 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/2e6c1ee4769aea5e614a2b0f0b65dd1997bbdea3f16ea1e0ab78d05fbef53f23/userdata/oci-log --conmon-pidfile /run/ceph-14ff1f30-5059-58f1-9a23-69871bb275a1@osd.2.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 2e6c1ee4769aea5e614a2b0f0b65dd1997bbdea3f16ea1e0ab78d05fbef53f23
             │ │ └─ceph-14ff1f30-5059-58f1-9a23-69871bb275a1@rgw.rgw.compute-0.qxekyl.service
             │ │   ├─libpod-payload-4edd4e145deee38b7ccf448ee661159c19a1b05ba5a7fb95dd92a6fb13e29fbf
             │ │   │ ├─99621 /run/podman-init -- /usr/bin/radosgw -n client.rgw.rgw.compute-0.qxekyl -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │   │ └─99623 /usr/bin/radosgw -n client.rgw.rgw.compute-0.qxekyl -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │   └─runtime
             │ │     └─99619 /usr/bin/conmon --api-version 1 -c 4edd4e145deee38b7ccf448ee661159c19a1b05ba5a7fb95dd92a6fb13e29fbf -u 4edd4e145deee38b7ccf448ee661159c19a1b05ba5a7fb95dd92a6fb13e29fbf -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/4edd4e145deee38b7ccf448ee661159c19a1b05ba5a7fb95dd92a6fb13e29fbf/userdata -p /run/containers/storage/overlay-containers/4edd4e145deee38b7ccf448ee661159c19a1b05ba5a7fb95dd92a6fb13e29fbf/userdata/pidfile -n ceph-14ff1f30-5059-58f1-9a23-69871bb275a1-rgw-rgw-compute-0-qxekyl --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/4edd4e145deee38b7ccf448ee661159c19a1b05ba5a7fb95dd92a6fb13e29fbf --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/4edd4e145deee38b7ccf448ee661159c19a1b05ba5a7fb95dd92a6fb13e29fbf/userdata/oci-log --conmon-pidfile /run/ceph-14ff1f30-5059-58f1-9a23-69871bb275a1@rgw.rgw.compute-0.qxekyl.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 4edd4e145deee38b7ccf448ee661159c19a1b05ba5a7fb95dd92a6fb13e29fbf
             │ ├─system-getty.slice
             │ │ └─getty@tty1.service
             │ │   └─1013 /sbin/agetty -o "-p -- \\u" --noclear - linux
             │ ├─system-serial\x2dgetty.slice
             │ │ └─serial-getty@ttyS0.service
             │ │   └─1014 /sbin/agetty -o "-p -- \\u" --keep-baud 115200,57600,38400,9600 - vt220
             │ ├─systemd-hostnamed.service
             │ │ └─314855 /usr/lib/systemd/systemd-hostnamed
             │ ├─systemd-journald.service
             │ │ └─681 /usr/lib/systemd/systemd-journald
             │ ├─systemd-logind.service
             │ │ └─807 /usr/lib/systemd/systemd-logind
             │ ├─systemd-machined.service
             │ │ └─217781 /usr/lib/systemd/systemd-machined
             │ ├─systemd-udevd.service
             │ │ └─udev
             │ │   └─733 /usr/lib/systemd/systemd-udevd
             │ ├─tuned.service
             │ │ └─113121 /usr/bin/python3 -Es /usr/sbin/tuned -l -P
             │ ├─virtlogd.service
             │ │ └─217150 /usr/sbin/virtlogd
             │ ├─virtnodedevd.service
             │ │ └─256467 /usr/sbin/virtnodedevd --timeout 120
             │ ├─virtqemud.service
             │ │ └─256259 /usr/sbin/virtqemud --timeout 120
             │ └─virtsecretd.service
             │   └─266034 /usr/sbin/virtsecretd --timeout 120
             └─user.slice
               ├─user-1000.slice
               │ ├─session-1.scope
               │ │ └─4513 /usr/bin/python3
               │ ├─session-51.scope
               │ │ ├─311291 "sshd-session: zuul [priv]"
               │ │ ├─311294 "sshd-session: zuul@notty"
               │ │ ├─311295 sudo bash -c "rm -rf /var/tmp/sos-osp && mkdir /var/tmp/sos-osp && sos report --batch --all-logs --tmp-dir=/var/tmp/sos-osp  -p container,openstack_edpm,system,storage,virt"
               │ │ ├─311319 /usr/bin/python3 -s /sbin/sos report --batch --all-logs --tmp-dir=/var/tmp/sos-osp -p container,openstack_edpm,system,storage,virt
               │ │ ├─318244 timeout 15s turbostat --debug sleep 10
               │ │ ├─318920 timeout 300s systemctl status --all
               │ │ ├─318922 systemctl status --all
               │ │ ├─318998 timeout --foreground 300s virsh -r nodedev-dumpxml scsi_0_0_0_0
               │ │ └─318999 virsh -r nodedev-dumpxml scsi_0_0_0_0
               │ └─user@1000.service
               │   ├─app.slice
               │   │ └─dbus-broker.service
               │   │   ├─14284 /usr/bin/dbus-broker-launch --scope user
               │   │   └─14294 dbus-broker --log 4 --controller 9 --machine-id 1f988c78c563e12389ab342aced42dbb --max-bytes 100000000000000 --max-fds 25000000000000 --max-matches 5000000000
               │   ├─init.scope
               │   │ ├─4301 /usr/lib/systemd/systemd --user
               │   │ └─4303 "(sd-pam)"
               │   └─user.slice
               │     └─podman-pause-aee1f4af.scope
               │       └─14221 catatonit -P
               └─user-42477.slice
                 ├─session-20.scope
                 │ ├─76613 "sshd-session: ceph-admin [priv]"
                 │ └─76635 "sshd-session: ceph-admin"
                 ├─session-22.scope
                 │ ├─76620 "sshd-session: ceph-admin [priv]"
                 │ └─76636 "sshd-session: ceph-admin@notty"
                 ├─session-23.scope
                 │ ├─76687 "sshd-session: ceph-admin [priv]"
                 │ └─76690 "sshd-session: ceph-admin@notty"
                 ├─session-24.scope
                 │ ├─76741 "sshd-session: ceph-admin [priv]"
                 │ └─76744 "sshd-session: ceph-admin@notty"
                 ├─session-25.scope
                 │ ├─76795 "sshd-session: ceph-admin [priv]"
                 │ └─76798 "sshd-session: ceph-admin@notty"
                 ├─session-26.scope
                 │ ├─76849 "sshd-session: ceph-admin [priv]"
                 │ └─76852 "sshd-session: ceph-admin@notty"
                 ├─session-27.scope
                 │ ├─76903 "sshd-session: ceph-admin [priv]"
                 │ └─76906 "sshd-session: ceph-admin@notty"
                 ├─session-28.scope
                 │ ├─76957 "sshd-session: ceph-admin [priv]"
                 │ └─76960 "sshd-session: ceph-admin@notty"
                 ├─session-29.scope
                 │ ├─77011 "sshd-session: ceph-admin [priv]"
                 │ └─77014 "sshd-session: ceph-admin@notty"
                 ├─session-30.scope
                 │ ├─77065 "sshd-session: ceph-admin [priv]"
                 │ └─77068 "sshd-session: ceph-admin@notty"
                 ├─session-31.scope
                 │ ├─77092 "sshd-session: ceph-admin [priv]"
                 │ └─77095 "sshd-session: ceph-admin@notty"
                 ├─session-32.scope
                 │ ├─77146 "sshd-session: ceph-admin [priv]"
                 │ └─77149 "sshd-session: ceph-admin@notty"
                 └─user@42477.service
                   └─init.scope
                     ├─76617 /usr/lib/systemd/systemd --user
                     └─76619 "(sd-pam)"

Nov 29 08:17:03 compute-0 systemd[1]: Started libcrun container.
Nov 29 08:17:03 compute-0 systemd[1]: libpod-461e59920c5310cbd4998dedb59ec0837a1a689a1fea0d784d33ff5a9f7b8ecf.scope: Deactivated successfully.
Nov 29 08:17:03 compute-0 systemd[1]: var-lib-containers-storage-overlay-a6117d844ad7ad7a2fc33616a7a58ef68dd010ec5e13672e6ef63b2ba4ed2080-merged.mount: Deactivated successfully.
Nov 29 08:17:03 compute-0 systemd[1]: libpod-conmon-461e59920c5310cbd4998dedb59ec0837a1a689a1fea0d784d33ff5a9f7b8ecf.scope: Deactivated successfully.
Nov 29 08:17:03 compute-0 systemd[1]: Started libpod-conmon-d4a9be8cb43979961b1e87e7f3e0d6905eb8c9a0bb29d35e2dfe03c2ac9375f6.scope.
Nov 29 08:17:03 compute-0 systemd[1]: Started libcrun container.
Nov 29 08:17:04 compute-0 systemd[1]: libpod-d4a9be8cb43979961b1e87e7f3e0d6905eb8c9a0bb29d35e2dfe03c2ac9375f6.scope: Deactivated successfully.
Nov 29 08:17:04 compute-0 systemd[1]: libpod-d4a9be8cb43979961b1e87e7f3e0d6905eb8c9a0bb29d35e2dfe03c2ac9375f6.scope: Consumed 1.005s CPU time.
Nov 29 08:17:04 compute-0 systemd[1]: var-lib-containers-storage-overlay-bc80bc3e197dad729721c53f1b41d1a7516a3adf67ba9cdfa41e0969fe84d28c-merged.mount: Deactivated successfully.
Nov 29 08:17:04 compute-0 systemd[1]: libpod-conmon-d4a9be8cb43979961b1e87e7f3e0d6905eb8c9a0bb29d35e2dfe03c2ac9375f6.scope: Deactivated successfully.

● machine.slice - Virtual Machine and Container Slice
     Loaded: loaded (/usr/lib/systemd/system/machine.slice; static)
     Active: active since Sat 2025-11-29 07:11:10 UTC; 1h 6min ago
      Until: Sat 2025-11-29 07:11:10 UTC; 1h 6min ago
       Docs: man:systemd.special(7)
         IO: 381.9M read, 131.0M written
      Tasks: 54
     Memory: 1020.8M (peak: 1.6G)
        CPU: 15min 7.854s
     CGroup: /machine.slice
             ├─libpod-23d05e03be4cb9084b6afbc3edf0d56047ca0f3f1aaf62172739e6d6ce3f7fa8.scope
             │ └─container
             │   ├─153385 dumb-init --single-child -- kolla_start
             │   └─153388 /usr/bin/ovn-controller --pidfile unix:/run/openvswitch/db.sock -p /etc/pki/tls/private/ovndb.key -c /etc/pki/tls/certs/ovndb.crt -C /etc/pki/tls/certs/ovndbca.crt
             ├─libpod-53b3e3edbc13651958250c6312d45e987c0dee5fbb3effb8392660dfc1eaa82f.scope
             │ └─container
             │   ├─238043 dumb-init --single-child -- kolla_start
             │   └─238046 /usr/sbin/multipathd -d
             ├─libpod-8d4ccc3041d68cb3d47809db0bb1a7e168a735eaa6605edbf2567447905aa50e.scope
             │ └─container
             │   ├─163635 dumb-init --single-child -- kolla_start
             │   ├─163655 "neutron-ovn-metadata-agent (/usr/bin/python3 /usr/bin/neutron-ovn-metadata-agent)"
             │   ├─164115 "neutron-ovn-metadata-agent (/usr/bin/python3 /usr/bin/neutron-ovn-metadata-agent)"
             │   ├─164178 /usr/bin/python3 /bin/privsep-helper --config-file /etc/neutron/neutron.conf --config-dir /etc/neutron.conf.d --privsep_context neutron.privileged.namespace_cmd --privsep_sock_path /tmp/tmpthalsdm8/privsep.sock
             │   ├─266092 /usr/bin/python3 /bin/privsep-helper --config-file /etc/neutron/neutron.conf --config-dir /etc/neutron.conf.d --privsep_context neutron.privileged.default --privsep_sock_path /tmp/tmpv5pj6f28/privsep.sock
             │   └─266358 /usr/bin/python3 /bin/privsep-helper --config-file /etc/neutron/neutron.conf --config-dir /etc/neutron.conf.d --privsep_context neutron.privileged.link_cmd --privsep_sock_path /tmp/tmp_ylguf3k/privsep.sock
             └─libpod-c15969223c00c2b30a6778fcaf267330fb4765e47f851bc1febe18461e097809.scope
               └─container
                 ├─256731 dumb-init --single-child -- kolla_start
                 ├─256736 /usr/bin/python3 /usr/bin/nova-compute
                 ├─265746 /usr/bin/python3 /bin/privsep-helper --config-file /etc/nova/nova.conf --config-file /etc/nova/nova-compute.conf --config-dir /etc/nova/nova.conf.d --privsep_context vif_plug_ovs.privsep.vif_plug --privsep_sock_path /tmp/tmp4yqe7f_k/privsep.sock
                 ├─266745 /usr/bin/python3 /bin/privsep-helper --config-file /etc/nova/nova.conf --config-file /etc/nova/nova-compute.conf --config-dir /etc/nova/nova.conf.d --privsep_context os_brick.privileged.default --privsep_sock_path /tmp/tmp2b17b584/privsep.sock
                 └─266829 /usr/bin/python3 /bin/privsep-helper --config-file /etc/nova/nova.conf --config-file /etc/nova/nova-compute.conf --config-dir /etc/nova/nova.conf.d --privsep_context nova.privsep.sys_admin_pctxt --privsep_sock_path /tmp/tmparx_pv8r/privsep.sock

Nov 29 08:17:04 compute-0 magical_shockley[316220]:         "type": "bluestore"
Nov 29 08:17:04 compute-0 magical_shockley[316220]:     },
Nov 29 08:17:04 compute-0 magical_shockley[316220]:     "8cd0a453-4c8d-429b-b547-2404357db43c": {
Nov 29 08:17:04 compute-0 magical_shockley[316220]:         "ceph_fsid": "14ff1f30-5059-58f1-9a23-69871bb275a1",
Nov 29 08:17:04 compute-0 magical_shockley[316220]:         "device": "/dev/mapper/ceph_vg0-ceph_lv0",
Nov 29 08:17:04 compute-0 magical_shockley[316220]:         "osd_id": 0,
Nov 29 08:17:04 compute-0 magical_shockley[316220]:         "osd_uuid": "8cd0a453-4c8d-429b-b547-2404357db43c",
Nov 29 08:17:04 compute-0 magical_shockley[316220]:         "type": "bluestore"
Nov 29 08:17:04 compute-0 magical_shockley[316220]:     }
Nov 29 08:17:04 compute-0 magical_shockley[316220]: }

● system-ceph\x2d14ff1f30\x2d5059\x2d58f1\x2d9a23\x2d69871bb275a1.slice - Slice /system/ceph-14ff1f30-5059-58f1-9a23-69871bb275a1
     Loaded: loaded
     Active: active since Sat 2025-11-29 07:11:13 UTC; 1h 6min ago
      Until: Sat 2025-11-29 07:11:13 UTC; 1h 6min ago
         IO: 1.6G read, 25.6G written
      Tasks: 992
     Memory: 3.4G (peak: 4.4G)
        CPU: 5min 51.902s
     CGroup: /system.slice/system-ceph\x2d14ff1f30\x2d5059\x2d58f1\x2d9a23\x2d69871bb275a1.slice
             ├─ceph-14ff1f30-5059-58f1-9a23-69871bb275a1@crash.compute-0.service
             │ ├─libpod-payload-7eb39cf0035c22ff4e83a7371bb415c7d467398eea843a964591e85500be2230
             │ │ ├─82993 /run/podman-init -- /usr/bin/ceph-crash -n client.crash.compute-0
             │ │ └─82995 /usr/bin/python3 -s /usr/bin/ceph-crash -n client.crash.compute-0
             │ └─runtime
             │   └─82991 /usr/bin/conmon --api-version 1 -c 7eb39cf0035c22ff4e83a7371bb415c7d467398eea843a964591e85500be2230 -u 7eb39cf0035c22ff4e83a7371bb415c7d467398eea843a964591e85500be2230 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/7eb39cf0035c22ff4e83a7371bb415c7d467398eea843a964591e85500be2230/userdata -p /run/containers/storage/overlay-containers/7eb39cf0035c22ff4e83a7371bb415c7d467398eea843a964591e85500be2230/userdata/pidfile -n ceph-14ff1f30-5059-58f1-9a23-69871bb275a1-crash-compute-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/7eb39cf0035c22ff4e83a7371bb415c7d467398eea843a964591e85500be2230 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/7eb39cf0035c22ff4e83a7371bb415c7d467398eea843a964591e85500be2230/userdata/oci-log --conmon-pidfile /run/ceph-14ff1f30-5059-58f1-9a23-69871bb275a1@crash.compute-0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 7eb39cf0035c22ff4e83a7371bb415c7d467398eea843a964591e85500be2230
             ├─ceph-14ff1f30-5059-58f1-9a23-69871bb275a1@mds.cephfs.compute-0.bdhrqf.service
             │ ├─libpod-payload-f32e02240e8a83659f9e6ca6aa769bfab0c78f2322374b714daa8a8d1689a511
             │ │ ├─102314 /run/podman-init -- /usr/bin/ceph-mds -n mds.cephfs.compute-0.bdhrqf -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ └─102316 /usr/bin/ceph-mds -n mds.cephfs.compute-0.bdhrqf -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ └─runtime
             │   └─102312 /usr/bin/conmon --api-version 1 -c f32e02240e8a83659f9e6ca6aa769bfab0c78f2322374b714daa8a8d1689a511 -u f32e02240e8a83659f9e6ca6aa769bfab0c78f2322374b714daa8a8d1689a511 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/f32e02240e8a83659f9e6ca6aa769bfab0c78f2322374b714daa8a8d1689a511/userdata -p /run/containers/storage/overlay-containers/f32e02240e8a83659f9e6ca6aa769bfab0c78f2322374b714daa8a8d1689a511/userdata/pidfile -n ceph-14ff1f30-5059-58f1-9a23-69871bb275a1-mds-cephfs-compute-0-bdhrqf --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/f32e02240e8a83659f9e6ca6aa769bfab0c78f2322374b714daa8a8d1689a511 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/f32e02240e8a83659f9e6ca6aa769bfab0c78f2322374b714daa8a8d1689a511/userdata/oci-log --conmon-pidfile /run/ceph-14ff1f30-5059-58f1-9a23-69871bb275a1@mds.cephfs.compute-0.bdhrqf.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg f32e02240e8a83659f9e6ca6aa769bfab0c78f2322374b714daa8a8d1689a511
             ├─ceph-14ff1f30-5059-58f1-9a23-69871bb275a1@mgr.compute-0.kzdpag.service
             │ ├─libpod-payload-cf5b754473e023a6c38808405cba62d535fd2d4dd4ad3580b8df218d673d96ea
             │ │ ├─75343 /run/podman-init -- /usr/bin/ceph-mgr -n mgr.compute-0.kzdpag -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ └─75345 /usr/bin/ceph-mgr -n mgr.compute-0.kzdpag -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ └─runtime
             │   └─75341 /usr/bin/conmon --api-version 1 -c cf5b754473e023a6c38808405cba62d535fd2d4dd4ad3580b8df218d673d96ea -u cf5b754473e023a6c38808405cba62d535fd2d4dd4ad3580b8df218d673d96ea -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/cf5b754473e023a6c38808405cba62d535fd2d4dd4ad3580b8df218d673d96ea/userdata -p /run/containers/storage/overlay-containers/cf5b754473e023a6c38808405cba62d535fd2d4dd4ad3580b8df218d673d96ea/userdata/pidfile -n ceph-14ff1f30-5059-58f1-9a23-69871bb275a1-mgr-compute-0-kzdpag --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/cf5b754473e023a6c38808405cba62d535fd2d4dd4ad3580b8df218d673d96ea --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/cf5b754473e023a6c38808405cba62d535fd2d4dd4ad3580b8df218d673d96ea/userdata/oci-log --conmon-pidfile /run/ceph-14ff1f30-5059-58f1-9a23-69871bb275a1@mgr.compute-0.kzdpag.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg cf5b754473e023a6c38808405cba62d535fd2d4dd4ad3580b8df218d673d96ea
             ├─ceph-14ff1f30-5059-58f1-9a23-69871bb275a1@mon.compute-0.service
             │ ├─libpod-payload-21a56ae912cb8d8d1f0dc09cd0d64941e849dd5a597340fef403575f5f6dca90
             │ │ ├─75048 /run/podman-init -- /usr/bin/ceph-mon -n mon.compute-0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false
             │ │ └─75050 /usr/bin/ceph-mon -n mon.compute-0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false
             │ └─runtime
             │   └─75046 /usr/bin/conmon --api-version 1 -c 21a56ae912cb8d8d1f0dc09cd0d64941e849dd5a597340fef403575f5f6dca90 -u 21a56ae912cb8d8d1f0dc09cd0d64941e849dd5a597340fef403575f5f6dca90 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/21a56ae912cb8d8d1f0dc09cd0d64941e849dd5a597340fef403575f5f6dca90/userdata -p /run/containers/storage/overlay-containers/21a56ae912cb8d8d1f0dc09cd0d64941e849dd5a597340fef403575f5f6dca90/userdata/pidfile -n ceph-14ff1f30-5059-58f1-9a23-69871bb275a1-mon-compute-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/21a56ae912cb8d8d1f0dc09cd0d64941e849dd5a597340fef403575f5f6dca90 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/21a56ae912cb8d8d1f0dc09cd0d64941e849dd5a597340fef403575f5f6dca90/userdata/oci-log --conmon-pidfile /run/ceph-14ff1f30-5059-58f1-9a23-69871bb275a1@mon.compute-0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 21a56ae912cb8d8d1f0dc09cd0d64941e849dd5a597340fef403575f5f6dca90
             ├─ceph-14ff1f30-5059-58f1-9a23-69871bb275a1@osd.0.service
             │ ├─libpod-payload-9e203bb2012357f685d3a92116fa2f82a2b0bf3d53f620d86c98c827de1eec96
             │ │ ├─88829 /run/podman-init -- /usr/bin/ceph-osd -n osd.0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ └─88831 /usr/bin/ceph-osd -n osd.0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ └─runtime
             │   └─88827 /usr/bin/conmon --api-version 1 -c 9e203bb2012357f685d3a92116fa2f82a2b0bf3d53f620d86c98c827de1eec96 -u 9e203bb2012357f685d3a92116fa2f82a2b0bf3d53f620d86c98c827de1eec96 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/9e203bb2012357f685d3a92116fa2f82a2b0bf3d53f620d86c98c827de1eec96/userdata -p /run/containers/storage/overlay-containers/9e203bb2012357f685d3a92116fa2f82a2b0bf3d53f620d86c98c827de1eec96/userdata/pidfile -n ceph-14ff1f30-5059-58f1-9a23-69871bb275a1-osd-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/9e203bb2012357f685d3a92116fa2f82a2b0bf3d53f620d86c98c827de1eec96 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/9e203bb2012357f685d3a92116fa2f82a2b0bf3d53f620d86c98c827de1eec96/userdata/oci-log --conmon-pidfile /run/ceph-14ff1f30-5059-58f1-9a23-69871bb275a1@osd.0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 9e203bb2012357f685d3a92116fa2f82a2b0bf3d53f620d86c98c827de1eec96
             ├─ceph-14ff1f30-5059-58f1-9a23-69871bb275a1@osd.1.service
             │ ├─libpod-payload-6a5fc11573d1a39f1563ae47c276c7e603d4b25acd108fa32dcccaad74ad1d11
             │ │ ├─89838 /run/podman-init -- /usr/bin/ceph-osd -n osd.1 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ └─89840 /usr/bin/ceph-osd -n osd.1 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ └─runtime
             │   └─89836 /usr/bin/conmon --api-version 1 -c 6a5fc11573d1a39f1563ae47c276c7e603d4b25acd108fa32dcccaad74ad1d11 -u 6a5fc11573d1a39f1563ae47c276c7e603d4b25acd108fa32dcccaad74ad1d11 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/6a5fc11573d1a39f1563ae47c276c7e603d4b25acd108fa32dcccaad74ad1d11/userdata -p /run/containers/storage/overlay-containers/6a5fc11573d1a39f1563ae47c276c7e603d4b25acd108fa32dcccaad74ad1d11/userdata/pidfile -n ceph-14ff1f30-5059-58f1-9a23-69871bb275a1-osd-1 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/6a5fc11573d1a39f1563ae47c276c7e603d4b25acd108fa32dcccaad74ad1d11 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/6a5fc11573d1a39f1563ae47c276c7e603d4b25acd108fa32dcccaad74ad1d11/userdata/oci-log --conmon-pidfile /run/ceph-14ff1f30-5059-58f1-9a23-69871bb275a1@osd.1.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 6a5fc11573d1a39f1563ae47c276c7e603d4b25acd108fa32dcccaad74ad1d11
             ├─ceph-14ff1f30-5059-58f1-9a23-69871bb275a1@osd.2.service
             │ ├─libpod-payload-2e6c1ee4769aea5e614a2b0f0b65dd1997bbdea3f16ea1e0ab78d05fbef53f23
             │ │ ├─91078 /run/podman-init -- /usr/bin/ceph-osd -n osd.2 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ └─91083 /usr/bin/ceph-osd -n osd.2 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ └─runtime
             │   └─91076 /usr/bin/conmon --api-version 1 -c 2e6c1ee4769aea5e614a2b0f0b65dd1997bbdea3f16ea1e0ab78d05fbef53f23 -u 2e6c1ee4769aea5e614a2b0f0b65dd1997bbdea3f16ea1e0ab78d05fbef53f23 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/2e6c1ee4769aea5e614a2b0f0b65dd1997bbdea3f16ea1e0ab78d05fbef53f23/userdata -p /run/containers/storage/overlay-containers/2e6c1ee4769aea5e614a2b0f0b65dd1997bbdea3f16ea1e0ab78d05fbef53f23/userdata/pidfile -n ceph-14ff1f30-5059-58f1-9a23-69871bb275a1-osd-2 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/2e6c1ee4769aea5e614a2b0f0b65dd1997bbdea3f16ea1e0ab78d05fbef53f23 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/2e6c1ee4769aea5e614a2b0f0b65dd1997bbdea3f16ea1e0ab78d05fbef53f23/userdata/oci-log --conmon-pidfile /run/ceph-14ff1f30-5059-58f1-9a23-69871bb275a1@osd.2.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 2e6c1ee4769aea5e614a2b0f0b65dd1997bbdea3f16ea1e0ab78d05fbef53f23
             └─ceph-14ff1f30-5059-58f1-9a23-69871bb275a1@rgw.rgw.compute-0.qxekyl.service
               ├─libpod-payload-4edd4e145deee38b7ccf448ee661159c19a1b05ba5a7fb95dd92a6fb13e29fbf
               │ ├─99621 /run/podman-init -- /usr/bin/radosgw -n client.rgw.rgw.compute-0.qxekyl -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
               │ └─99623 /usr/bin/radosgw -n client.rgw.rgw.compute-0.qxekyl -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
               └─runtime
                 └─99619 /usr/bin/conmon --api-version 1 -c 4edd4e145deee38b7ccf448ee661159c19a1b05ba5a7fb95dd92a6fb13e29fbf -u 4edd4e145deee38b7ccf448ee661159c19a1b05ba5a7fb95dd92a6fb13e29fbf -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/4edd4e145deee38b7ccf448ee661159c19a1b05ba5a7fb95dd92a6fb13e29fbf/userdata -p /run/containers/storage/overlay-containers/4edd4e145deee38b7ccf448ee661159c19a1b05ba5a7fb95dd92a6fb13e29fbf/userdata/pidfile -n ceph-14ff1f30-5059-58f1-9a23-69871bb275a1-rgw-rgw-compute-0-qxekyl --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/4edd4e145deee38b7ccf448ee661159c19a1b05ba5a7fb95dd92a6fb13e29fbf --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/4edd4e145deee38b7ccf448ee661159c19a1b05ba5a7fb95dd92a6fb13e29fbf/userdata/oci-log --conmon-pidfile /run/ceph-14ff1f30-5059-58f1-9a23-69871bb275a1@rgw.rgw.compute-0.qxekyl.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 4edd4e145deee38b7ccf448ee661159c19a1b05ba5a7fb95dd92a6fb13e29fbf

Nov 29 08:17:15 compute-0 ceph-mgr[75345]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Nov 29 08:17:15 compute-0 ceph-mgr[75345]: [pg_autoscaler INFO root] Pool '.rgw.root' root_id -1 using 2.5436283128215145e-07 of space, bias 1.0, pg target 7.630884938464544e-05 quantized to 32 (current 32)
Nov 29 08:17:15 compute-0 ceph-mgr[75345]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Nov 29 08:17:15 compute-0 ceph-mgr[75345]: [pg_autoscaler INFO root] Pool 'default.rgw.log' root_id -1 using 2.1620840658982875e-06 of space, bias 1.0, pg target 0.0006486252197694863 quantized to 32 (current 32)
Nov 29 08:17:15 compute-0 ceph-mgr[75345]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Nov 29 08:17:15 compute-0 ceph-mgr[75345]: [pg_autoscaler INFO root] Pool 'default.rgw.control' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Nov 29 08:17:15 compute-0 ceph-mgr[75345]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Nov 29 08:17:15 compute-0 ceph-mgr[75345]: [pg_autoscaler INFO root] Pool 'default.rgw.meta' root_id -1 using 1.2718141564107572e-07 of space, bias 4.0, pg target 0.00015261769876929088 quantized to 32 (current 32)
Nov 29 08:17:16 compute-0 ceph-mon[75050]: pgmap v2463: 305 pgs: 305 active+clean; 271 MiB data, 672 MiB used, 59 GiB / 60 GiB avail
Nov 29 08:17:17 compute-0 ceph-mgr[75345]: log_channel(cluster) log [DBG] : pgmap v2464: 305 pgs: 305 active+clean; 271 MiB data, 672 MiB used, 59 GiB / 60 GiB avail

● system-dbus\x2d:1.1\x2dorg.fedoraproject.SetroubleshootPrivileged.slice - Slice /system/dbus-:1.1-org.fedoraproject.SetroubleshootPrivileged
     Loaded: loaded
     Active: active since Sat 2025-11-29 07:31:59 UTC; 45min ago
      Until: Sat 2025-11-29 07:31:59 UTC; 45min ago
         IO: 4.0K read, 0B written
      Tasks: 0
     Memory: 4.0K (peak: 57.9M)
        CPU: 1.001s
     CGroup: /system.slice/system-dbus\x2d:1.1\x2dorg.fedoraproject.SetroubleshootPrivileged.slice

Nov 29 07:31:59 compute-0 systemd[1]: Created slice Slice /system/dbus-:1.1-org.fedoraproject.SetroubleshootPrivileged.

● system-getty.slice - Slice /system/getty
     Loaded: loaded
     Active: active since Sat 2025-11-29 06:29:42 UTC; 1h 47min ago
      Until: Sat 2025-11-29 06:29:42 UTC; 1h 47min ago
         IO: 0B read, 0B written
      Tasks: 1
     Memory: 296.0K (peak: 776.0K)
        CPU: 11ms
     CGroup: /system.slice/system-getty.slice
             └─getty@tty1.service
               └─1013 /sbin/agetty -o "-p -- \\u" --noclear - linux

● system-modprobe.slice - Slice /system/modprobe
     Loaded: loaded
     Active: active since Sat 2025-11-29 06:28:08 UTC; 1h 49min ago
      Until: Sat 2025-11-29 06:28:08 UTC; 1h 49min ago
         IO: 0B read, 0B written
      Tasks: 0
     Memory: 52.0K (peak: 11.8M)
        CPU: 147ms
     CGroup: /system.slice/system-modprobe.slice

Nov 29 06:28:08 localhost systemd[1]: Created slice Slice /system/modprobe.

● system-serial\x2dgetty.slice - Slice /system/serial-getty
     Loaded: loaded
     Active: active since Sat 2025-11-29 06:29:42 UTC; 1h 47min ago
      Until: Sat 2025-11-29 06:29:42 UTC; 1h 47min ago
         IO: 0B read, 0B written
      Tasks: 1
     Memory: 256.0K (peak: 500.0K)
        CPU: 7ms
     CGroup: /system.slice/system-serial\x2dgetty.slice
             └─serial-getty@ttyS0.service
               └─1014 /sbin/agetty -o "-p -- \\u" --keep-baud 115200,57600,38400,9600 - vt220

● system-sshd\x2dkeygen.slice - Slice /system/sshd-keygen
     Loaded: loaded
     Active: active since Sat 2025-11-29 06:29:42 UTC; 1h 47min ago
      Until: Sat 2025-11-29 06:29:42 UTC; 1h 47min ago
         IO: 0B read, 0B written
      Tasks: 0
     Memory: 0B (peak: 0B)
        CPU: 0
     CGroup: /system.slice/system-sshd\x2dkeygen.slice

● system-systemd\x2dcoredump.slice - Slice /system/systemd-coredump
     Loaded: loaded
     Active: active since Sat 2025-11-29 07:47:52 UTC; 29min ago
      Until: Sat 2025-11-29 07:47:52 UTC; 29min ago
         IO: 5.4M read, 1.0M written
      Tasks: 0
     Memory: 5.5M (peak: 296.7M)
        CPU: 1.436s
     CGroup: /system.slice/system-systemd\x2dcoredump.slice

Nov 29 07:47:52 compute-0 systemd[1]: Created slice Slice /system/systemd-coredump.
Nov 29 07:47:54 compute-0 systemd-coredump[266851]: Process 266831 (qemu-img) of user 0 dumped core.
                                                    
                                                    Stack trace of thread 266841:
                                                    #0  0x00007f8ef59a703c __pthread_kill_implementation (libc.so.6 + 0x8d03c)
                                                    #1  0x00007f8ef5959b86 raise (libc.so.6 + 0x3fb86)
                                                    #2  0x00007f8ef5943873 abort (libc.so.6 + 0x29873)
                                                    #3  0x00005648e4ddd5df ___interceptor_pthread_create (qemu-img + 0x4f5df)
                                                    #4  0x00007f8ef2b7dff4 _ZN6Thread10try_createEm (libceph-common.so.2 + 0x258ff4)
                                                    #5  0x00007f8ef2b806ae _ZN6Thread6createEPKcm (libceph-common.so.2 + 0x25b6ae)
                                                    #6  0x00007f8ef3a8726b _ZNSt8_Rb_treeISt4pairINSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEESt10type_indexES0_IKS8_N4ceph12immobile_anyILm576EEEESt10_Select1stISD_ENSA_6common11CephContext19associated_objs_cmpESaISD_EE22_M_emplace_hint_uniqueIJRKSt21piecewise_construct_tSt5tupleIJRSt17basic_string_viewIcS4_ERS7_EESP_IJRKSt15in_place_type_tIN6librbd21TaskFinisherSingletonEERPSH_EEEEESt17_Rb_tree_iteratorISD_ESt23_Rb_tree_const_iteratorISD_EDpOT_.constprop.0 (librbd.so.1 + 0x51126b)
                                                    #7  0x00007f8ef36b47a6 _ZN6librbd8ImageCtx4initEv (librbd.so.1 + 0x13e7a6)
                                                    #8  0x00007f8ef378e2d3 _ZN6librbd5image11OpenRequestINS_8ImageCtxEE12send_refreshEv (librbd.so.1 + 0x2182d3)
                                                    #9  0x00007f8ef378ef46 _ZN6librbd5image11OpenRequestINS_8ImageCtxEE23handle_v2_get_data_poolEPi (librbd.so.1 + 0x218f46)
                                                    #10 0x00007f8ef378f2a7 _ZN6librbd4util6detail20rados_state_callbackINS_5image11OpenRequestINS_8ImageCtxEEEXadL_ZNS6_23handle_v2_get_data_poolEPiEELb1EEEvPvS8_ (librbd.so.1 + 0x2192a7)
                                                    #11 0x00007f8ef348d0ac _ZN5boost4asio6detail18completion_handlerINS1_7binder0IN8librados14CB_AioCompleteEEENS0_10io_context19basic_executor_typeISaIvELm0EEEE11do_completeEPvPNS1_19scheduler_operationERKNS_6system10error_codeEm (librados.so.2 + 0xad0ac)
                                                    #12 0x00007f8ef348c585 _ZN5boost4asio6detail14strand_service11do_completeEPvPNS1_19scheduler_operationERKNS_6system10error_codeEm (librados.so.2 + 0xac585)
                                                    #13 0x00007f8ef3507498 _ZN5boost4asio6detail9scheduler3runERNS_6system10error_codeE.constprop.0.isra.0 (librados.so.2 + 0x127498)
                                                    #14 0x00007f8ef34a64e4 _ZNSt6thread11_State_implINS_8_InvokerISt5tupleIJZ17make_named_threadIZN4ceph5async15io_context_pool5startEsEUlvE_JEES_St17basic_string_viewIcSt11char_traitsIcEEOT_DpOT0_EUlSD_SG_E_S7_EEEEE6_M_runEv (librados.so.2 + 0xc64e4)
                                                    #15 0x00007f8ef2214ae4 execute_native_thread_routine (libstdc++.so.6 + 0xdbae4)
                                                    #16 0x00007f8ef59a52fa start_thread (libc.so.6 + 0x8b2fa)
                                                    #17 0x00007f8ef5a2a400 __clone3 (libc.so.6 + 0x110400)
                                                    
                                                    Stack trace of thread 266833:
                                                    #0  0x00007f8ef59a238a __futex_abstimed_wait_common (libc.so.6 + 0x8838a)
                                                    #1  0x00007f8ef59a48e2 pthread_cond_wait@@GLIBC_2.3.2 (libc.so.6 + 0x8a8e2)
                                                    #2  0x00007f8ef220e6c0 _ZNSt18condition_variable4waitERSt11unique_lockISt5mutexE (libstdc++.so.6 + 0xd56c0)
                                                    #3  0x00007f8ef2d900a2 _ZN4ceph7logging3Log5entryEv (libceph-common.so.2 + 0x46b0a2)
                                                    #4  0x00007f8ef59a52fa start_thread (libc.so.6 + 0x8b2fa)
                                                    #5  0x00007f8ef5a2a400 __clone3 (libc.so.6 + 0x110400)
                                                    
                                                    Stack trace of thread 266831:
                                                    #0  0x00007f8ef59a238a __futex_abstimed_wait_common (libc.so.6 + 0x8838a)
                                                    #1  0x00007f8ef59a48e2 pthread_cond_wait@@GLIBC_2.3.2 (libc.so.6 + 0x8a8e2)
                                                    #2  0x00007f8ef220e6c0 _ZNSt18condition_variable4waitERSt11unique_lockISt5mutexE (libstdc++.so.6 + 0xd56c0)
                                                    #3  0x00007f8ef36bbeb3 _ZN6librbd10ImageStateINS_8ImageCtxEE4openEm (librbd.so.1 + 0x145eb3)
                                                    #4  0x00007f8ef368bfcb rbd_open (librbd.so.1 + 0x115fcb)
                                                    #5  0x00007f8ef3c3689d qemu_rbd_open (block-rbd.so + 0x489d)
                                                    #6  0x00005648e4dee25c bdrv_open_driver.llvm.1535778247189356743 (qemu-img + 0x6025c)
                                                    #7  0x00005648e4df34b7 bdrv_open_inherit.llvm.1535778247189356743 (qemu-img + 0x654b7)
                                                    #8  0x00005648e4e00de1 bdrv_open_child_bs.llvm.1535778247189356743 (qemu-img + 0x72de1)
                                                    #9  0x00005648e4df2c36 bdrv_open_inherit.llvm.1535778247189356743 (qemu-img + 0x64c36)
                                                    #10 0x00005648e4e224b3 blk_new_open (qemu-img + 0x944b3)
                                                    #11 0x00005648e4ee2516 img_open_file (qemu-img + 0x154516)
                                                    #12 0x00005648e4ee20c0 img_open (qemu-img + 0x1540c0)
                                                    #13 0x00005648e4ede03b img_info (qemu-img + 0x15003b)
                                                    #14 0x00005648e4ed76ca main (qemu-img + 0x1496ca)
                                                    #15 0x00007f8ef5944610 __libc_start_call_main (libc.so.6 + 0x2a610)
                                                    #16 0x00007f8ef59446c0 __libc_start_main@@GLIBC_2.34 (libc.so.6 + 0x2a6c0)
                                                    #17 0x00005648e4ddd285 _start (qemu-img + 0x4f285)
                                                    
                                                    Stack trace of thread 266832:
                                                    #0  0x00007f8ef5a2282d syscall (libc.so.6 + 0x10882d)
                                                    #1  0x00005648e4f68193 qemu_event_wait (qemu-img + 0x1da193)
                                                    #2  0x00005648e4f732e7 call_rcu_thread (qemu-img + 0x1e52e7)
                                                    #3  0x00005648e4f662aa qemu_thread_start.llvm.12875871551448449403 (qemu-img + 0x1d82aa)
                                                    #4  0x00007f8ef59a52fa start_thread (libc.so.6 + 0x8b2fa)
                                                    #5  0x00007f8ef5a2a400 __clone3 (libc.so.6 + 0x110400)
                                                    
                                                    Stack trace of thread 266840:
                                                    #0  0x00007f8ef59a238a __futex_abstimed_wait_common (libc.so.6 + 0x8838a)
                                                    #1  0x00007f8ef59a4cc0 pthread_cond_clockwait@GLIBC_2.30 (libc.so.6 + 0x8acc0)
                                                    #2  0x00007f8ef2b9e150 _ZN4ceph6common24CephContextServiceThread5entryEv (libceph-common.so.2 + 0x279150)
                                                    #3  0x00007f8ef59a52fa start_thread (libc.so.6 + 0x8b2fa)
                                                    #4  0x00007f8ef5a2a400 __clone3 (libc.so.6 + 0x110400)
                                                    
                                                    Stack trace of thread 266835:
                                                    #0  0x00007f8ef5a29a3e epoll_wait (libc.so.6 + 0x10fa3e)
                                                    #1  0x00007f8ef2d65618 _ZN11EpollDriver10event_waitERSt6vectorI14FiredFileEventSaIS1_EEP7timeval (libceph-common.so.2 + 0x440618)
                                                    #2  0x00007f8ef2d63702 _ZN11EventCenter14process_eventsEjPNSt6chrono8durationImSt5ratioILl1ELl1000000000EEEE (libceph-common.so.2 + 0x43e702)
                                                    #3  0x00007f8ef2d642c6 _ZNSt17_Function_handlerIFvvEZN12NetworkStack10add_threadEP6WorkerEUlvE_E9_M_invokeERKSt9_Any_data (libceph-common.so.2 + 0x43f2c6)
                                                    #4  0x00007f8ef2214ae4 execute_native_thread_routine (libstdc++.so.6 + 0xdbae4)
                                                    #5  0x00007f8ef59a52fa start_thread (libc.so.6 + 0x8b2fa)
                                                    #6  0x00007f8ef5a2a400 __clone3 (libc.so.6 + 0x110400)
                                                    
                                                    Stack trace of thread 266847:
                                                    #0  0x00007f8ef59a238a __futex_abstimed_wait_common (libc.so.6 + 0x8838a)
                                                    #1  0x00007f8ef59a48e2 pthread_cond_wait@@GLIBC_2.3.2 (libc.so.6 + 0x8a8e2)
                                                    #2  0x00007f8ef220e6c0 _ZNSt18condition_variable4waitERSt11unique_lockISt5mutexE (libstdc++.so.6 + 0xd56c0)
                                                    #3  0x00007f8ef2b837f8 _ZN15CommonSafeTimerISt5mutexE12timer_threadEv (libceph-common.so.2 + 0x25e7f8)
                                                    #4  0x00007f8ef2b83f81 _ZN21CommonSafeTimerThreadISt5mutexE5entryEv (libceph-common.so.2 + 0x25ef81)
                                                    #5  0x00007f8ef59a52fa start_thread (libc.so.6 + 0x8b2fa)
                                                    #6  0x00007f8ef5a2a400 __clone3 (libc.so.6 + 0x110400)
                                                    
                                                    Stack trace of thread 266848:
                                                    #0  0x00007f8ef59a238a __futex_abstimed_wait_common (libc.so.6 + 0x8838a)
                                                    #1  0x00007f8ef59a48e2 pthread_cond_wait@@GLIBC_2.3.2 (libc.so.6 + 0x8a8e2)
                                                    #2  0x00007f8ef220e6c0 _ZNSt18condition_variable4waitERSt11unique_lockISt5mutexE (libstdc++.so.6 + 0xd56c0)
                                                    #3  0x00007f8ef2b837f8 _ZN15CommonSafeTimerISt5mutexE12timer_threadEv (libceph-common.so.2 + 0x25e7f8)
                                                    #4  0x00007f8ef2b83f81 _ZN21CommonSafeTimerThreadISt5mutexE5entryEv (libceph-common.so.2 + 0x25ef81)
                                                    #5  0x00007f8ef59a52fa start_thread (libc.so.6 + 0x8b2fa)
                                                    #6  0x00007f8ef5a2a400 __clone3 (libc.so.6 + 0x110400)
                                                    
                                                    Stack trace of thread 266836:
                                                    #0  0x00007f8ef5a29a3e epoll_wait (libc.so.6 + 0x10fa3e)
                                                    #1  0x00007f8ef2d65618 _ZN11EpollDriver10event_waitERSt6vectorI14FiredFileEventSaIS1_EEP7timeval (libceph-common.so.2 + 0x440618)
                                                    #2  0x00007f8ef2d63702 _ZN11EventCenter14process_eventsEjPNSt6chrono8durationImSt5ratioILl1ELl1000000000EEEE (libceph-common.so.2 + 0x43e702)
                                                    #3  0x00007f8ef2d642c6 _ZNSt17_Function_handlerIFvvEZN12NetworkStack10add_threadEP6WorkerEUlvE_E9_M_invokeERKSt9_Any_data (libceph-common.so.2 + 0x43f2c6)
                                                    #4  0x00007f8ef2214ae4 execute_native_thread_routine (libstdc++.so.6 + 0xdbae4)
                                                    #5  0x00007f8ef59a52fa start_thread (libc.so.6 + 0x8b2fa)
                                                    #6  0x00007f8ef5a2a400 __clone3 (libc.so.6 + 0x110400)
                                                    
                                                    Stack trace of thread 266845:
                                                    #0  0x00007f8ef59a238a __futex_abstimed_wait_common (libc.so.6 + 0x8838a)
                                                    #1  0x00007f8ef59a48e2 pthread_cond_wait@@GLIBC_2.3.2 (libc.so.6 + 0x8a8e2)
                                                    #2  0x00007f8ef220e6c0 _ZNSt18condition_variable4waitERSt11unique_lockISt5mutexE (libstdc++.so.6 + 0xd56c0)
                                                    #3  0x00007f8ef2c8c0b9 _ZN13DispatchQueue18run_local_deliveryEv (libceph-common.so.2 + 0x3670b9)
                                                    #4  0x00007f8ef2d1d431 _ZN13DispatchQueue19LocalDeliveryThread5entryEv (libceph-common.so.2 + 0x3f8431)
                                                    #5  0x00007f8ef59a52fa start_thread (libc.so.6 + 0x8b2fa)
                                                    #6  0x00007f8ef5a2a400 __clone3 (libc.so.6 + 0x110400)
                                                    
                                                    Stack trace of thread 266842:
                                                    #0  0x00007f8ef59a238a __futex_abstimed_wait_common (libc.so.6 + 0x8838a)
                                                    #1  0x00007f8ef59a48e2 pthread_cond_wait@@GLIBC_2.3.2 (libc.so.6 + 0x8a8e2)
                                                    #2  0x00007f8ef3507266 _ZN5boost4asio6detail9scheduler3runERNS_6system10error_codeE.constprop.0.isra.0 (librados.so.2 + 0x127266)
                                                    #3  0x00007f8ef34a64e4 _ZNSt6thread11_State_implINS_8_InvokerISt5tupleIJZ17make_named_threadIZN4ceph5async15io_context_pool5startEsEUlvE_JEES_St17basic_string_viewIcSt11char_traitsIcEEOT_DpOT0_EUlSD_SG_E_S7_EEEEE6_M_runEv (librados.so.2 + 0xc64e4)
                                                    #4  0x00007f8ef2214ae4 execute_native_thread_routine (libstdc++.so.6 + 0xdbae4)
                                                    #5  0x00007f8ef59a52fa start_thread (libc.so.6 + 0x8b2fa)
                                                    #6  0x00007f8ef5a2a400 __clone3 (libc.so.6 + 0x110400)
                                                    
                                                    Stack trace of thread 266844:
                                                    #0  0x00007f8ef59a238a __futex_abstimed_wait_common (libc.so.6 + 0x8838a)
                                                    #1  0x00007f8ef59a48e2 pthread_cond_wait@@GLIBC_2.3.2 (libc.so.6 + 0x8a8e2)
                                                    #2  0x00007f8ef220e6c0 _ZNSt18condition_variable4waitERSt11unique_lockISt5mutexE (libstdc++.so.6 + 0xd56c0)
                                                    #3  0x00007f8ef2c8c49f _ZN13DispatchQueue5entryEv (libceph-common.so.2 + 0x36749f)
                                                    #4  0x00007f8ef2d1d411 _ZN13DispatchQueue14DispatchThread5entryEv (libceph-common.so.2 + 0x3f8411)
                                                    #5  0x00007f8ef59a52fa start_thread (libc.so.6 + 0x8b2fa)
                                                    #6  0x00007f8ef5a2a400 __clone3 (libc.so.6 + 0x110400)
                                                    
                                                    Stack trace of thread 266846:
                                                    #0  0x00007f8ef59a238a __futex_abstimed_wait_common (libc.so.6 + 0x8838a)
                                                    #1  0x00007f8ef59a4cc0 pthread_cond_clockwait@GLIBC_2.30 (libc.so.6 + 0x8acc0)
                                                    #2  0x00007f8ef2b83b23 _ZN15CommonSafeTimerISt5mutexE12timer_threadEv (libceph-common.so.2 + 0x25eb23)
                                                    #3  0x00007f8ef2b83f81 _ZN21CommonSafeTimerThreadISt5mutexE5entryEv (libceph-common.so.2 + 0x25ef81)
                                                    #4  0x00007f8ef59a52fa start_thread (libc.so.6 + 0x8b2fa)
                                                    #5  0x00007f8ef5a2a400 __clone3 (libc.so.6 + 0x110400)
                                                    
                                                    Stack trace of thread 266849:
                                                    #0  0x00007f8ef59a238a __futex_abstimed_wait_common (libc.so.6 + 0x8838a)
                                                    #1  0x00007f8ef59a48e2 pthread_cond_wait@@GLIBC_2.3.2 (libc.so.6 + 0x8a8e2)
                                                    #2  0x00007f8ef220e6c0 _ZNSt18condition_variable4waitERSt11unique_lockISt5mutexE (libstdc++.so.6 + 0xd56c0)
                                                    #3  0x00007f8ef2b837f8 _ZN15CommonSafeTimerISt5mutexE12timer_threadEv (libceph-common.so.2 + 0x25e7f8)
                                                    #4  0x00007f8ef2b83f81 _ZN21CommonSafeTimerThreadISt5mutexE5entryEv (libceph-common.so.2 + 0x25ef81)
                                                    #5  0x00007f8ef59a52fa start_thread (libc.so.6 + 0x8b2fa)
                                                    #6  0x00007f8ef5a2a400 __clone3 (libc.so.6 + 0x110400)
                                                    
                                                    Stack trace of thread 266843:
                                                    #0  0x00007f8ef59a238a __futex_abstimed_wait_common (libc.so.6 + 0x8838a)
                                                    #1  0x00007f8ef59a4cc0 pthread_cond_clockwait@GLIBC_2.30 (libc.so.6 + 0x8acc0)
                                                    #2  0x00007f8ef34df364 _ZN4ceph5timerINS_17coarse_mono_clockEE12timer_threadEv (librados.so.2 + 0xff364)
                                                    #3  0x00007f8ef2214ae4 execute_native_thread_routine (libstdc++.so.6 + 0xdbae4)
                                                    #4  0x00007f8ef59a52fa start_thread (libc.so.6 + 0x8b2fa)
                                                    #5  0x00007f8ef5a2a400 __clone3 (libc.so.6 + 0x110400)
                                                    
                                                    Stack trace of thread 266834:
                                                    #0  0x00007f8ef5a29a3e epoll_wait (libc.so.6 + 0x10fa3e)
                                                    #1  0x00007f8ef2d65618 _ZN11EpollDriver10event_waitERSt6vectorI14FiredFileEventSaIS1_EEP7timeval (libceph-common.so.2 + 0x440618)
                                                    #2  0x00007f8ef2d63702 _ZN11EventCenter14process_eventsEjPNSt6chrono8durationImSt5ratioILl1ELl1000000000EEEE (libceph-common.so.2 + 0x43e702)
                                                    #3  0x00007f8ef2d642c6 _ZNSt17_Function_handlerIFvvEZN12NetworkStack10add_threadEP6WorkerEUlvE_E9_M_invokeERKSt9_Any_data (libceph-common.so.2 + 0x43f2c6)
                                                    #4  0x00007f8ef2214ae4 execute_native_thread_routine (libstdc++.so.6 + 0xdbae4)
                                                    #5  0x00007f8ef59a52fa start_thread (libc.so.6 + 0x8b2fa)
                                                    #6  0x00007f8ef5a2a400 __clone3 (libc.so.6 + 0x110400)
                                                    ELF object binary architecture: AMD x86-64

● system.slice - System Slice
     Loaded: loaded
     Active: active since Sat 2025-11-29 06:28:07 UTC; 1h 49min ago
      Until: Sat 2025-11-29 06:28:07 UTC; 1h 49min ago
       Docs: man:systemd.special(7)
         IO: 1.8G read, 25.7G written
      Tasks: 1116
     Memory: 4.1G (peak: 5.0G)
        CPU: 12min 44.730s
     CGroup: /system.slice
             ├─NetworkManager.service
             │ └─48962 /usr/sbin/NetworkManager --no-daemon
             ├─auditd.service
             │ ├─704 /sbin/auditd
             │ └─706 /usr/sbin/sedispatch
             ├─chronyd.service
             │ └─58530 /usr/sbin/chronyd -F 2
             ├─crond.service
             │ └─1011 /usr/sbin/crond -n
             ├─dbus-broker.service
             │ ├─774 /usr/bin/dbus-broker-launch --scope system --audit
             │ └─781 dbus-broker --log 4 --controller 9 --machine-id 1f988c78c563e12389ab342aced42dbb --max-bytes 536870912 --max-fds 4096 --max-matches 131072 --audit
             ├─edpm_multipathd.service
             │ └─238041 /usr/bin/conmon --api-version 1 -c 53b3e3edbc13651958250c6312d45e987c0dee5fbb3effb8392660dfc1eaa82f -u 53b3e3edbc13651958250c6312d45e987c0dee5fbb3effb8392660dfc1eaa82f -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/53b3e3edbc13651958250c6312d45e987c0dee5fbb3effb8392660dfc1eaa82f/userdata -p /run/containers/storage/overlay-containers/53b3e3edbc13651958250c6312d45e987c0dee5fbb3effb8392660dfc1eaa82f/userdata/pidfile -n multipathd --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/53b3e3edbc13651958250c6312d45e987c0dee5fbb3effb8392660dfc1eaa82f --full-attach -s -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/53b3e3edbc13651958250c6312d45e987c0dee5fbb3effb8392660dfc1eaa82f/userdata/oci-log --conmon-pidfile /run/multipathd.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg 53b3e3edbc13651958250c6312d45e987c0dee5fbb3effb8392660dfc1eaa82f
             ├─edpm_nova_compute.service
             │ └─256729 /usr/bin/conmon --api-version 1 -c c15969223c00c2b30a6778fcaf267330fb4765e47f851bc1febe18461e097809 -u c15969223c00c2b30a6778fcaf267330fb4765e47f851bc1febe18461e097809 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/c15969223c00c2b30a6778fcaf267330fb4765e47f851bc1febe18461e097809/userdata -p /run/containers/storage/overlay-containers/c15969223c00c2b30a6778fcaf267330fb4765e47f851bc1febe18461e097809/userdata/pidfile -n nova_compute --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/c15969223c00c2b30a6778fcaf267330fb4765e47f851bc1febe18461e097809 --full-attach -s -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/c15969223c00c2b30a6778fcaf267330fb4765e47f851bc1febe18461e097809/userdata/oci-log --conmon-pidfile /run/nova_compute.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg c15969223c00c2b30a6778fcaf267330fb4765e47f851bc1febe18461e097809
             ├─edpm_ovn_controller.service
             │ └─153383 /usr/bin/conmon --api-version 1 -c 23d05e03be4cb9084b6afbc3edf0d56047ca0f3f1aaf62172739e6d6ce3f7fa8 -u 23d05e03be4cb9084b6afbc3edf0d56047ca0f3f1aaf62172739e6d6ce3f7fa8 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/23d05e03be4cb9084b6afbc3edf0d56047ca0f3f1aaf62172739e6d6ce3f7fa8/userdata -p /run/containers/storage/overlay-containers/23d05e03be4cb9084b6afbc3edf0d56047ca0f3f1aaf62172739e6d6ce3f7fa8/userdata/pidfile -n ovn_controller --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/23d05e03be4cb9084b6afbc3edf0d56047ca0f3f1aaf62172739e6d6ce3f7fa8 --full-attach -s -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/23d05e03be4cb9084b6afbc3edf0d56047ca0f3f1aaf62172739e6d6ce3f7fa8/userdata/oci-log --conmon-pidfile /run/ovn_controller.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg 23d05e03be4cb9084b6afbc3edf0d56047ca0f3f1aaf62172739e6d6ce3f7fa8
             ├─edpm_ovn_metadata_agent.service
             │ └─163632 /usr/bin/conmon --api-version 1 -c 8d4ccc3041d68cb3d47809db0bb1a7e168a735eaa6605edbf2567447905aa50e -u 8d4ccc3041d68cb3d47809db0bb1a7e168a735eaa6605edbf2567447905aa50e -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/8d4ccc3041d68cb3d47809db0bb1a7e168a735eaa6605edbf2567447905aa50e/userdata -p /run/containers/storage/overlay-containers/8d4ccc3041d68cb3d47809db0bb1a7e168a735eaa6605edbf2567447905aa50e/userdata/pidfile -n ovn_metadata_agent --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/8d4ccc3041d68cb3d47809db0bb1a7e168a735eaa6605edbf2567447905aa50e --full-attach -s -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/8d4ccc3041d68cb3d47809db0bb1a7e168a735eaa6605edbf2567447905aa50e/userdata/oci-log --conmon-pidfile /run/ovn_metadata_agent.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg 8d4ccc3041d68cb3d47809db0bb1a7e168a735eaa6605edbf2567447905aa50e
             ├─gssproxy.service
             │ └─875 /usr/sbin/gssproxy -D
             ├─irqbalance.service
             │ └─804 /usr/sbin/irqbalance
             ├─iscsid.service
             │ └─240752 /usr/sbin/iscsid -f
             ├─ovs-vswitchd.service
             │ └─47265 ovs-vswitchd unix:/var/run/openvswitch/db.sock -vconsole:emer -vsyslog:err -vfile:info --mlockall --user openvswitch:hugetlbfs --no-chdir --log-file=/var/log/openvswitch/ovs-vswitchd.log --pidfile=/var/run/openvswitch/ovs-vswitchd.pid --detach
             ├─ovsdb-server.service
             │ └─47183 ovsdb-server /etc/openvswitch/conf.db -vconsole:emer -vsyslog:err -vfile:info --remote=punix:/var/run/openvswitch/db.sock --private-key=db:Open_vSwitch,SSL,private_key --certificate=db:Open_vSwitch,SSL,certificate --bootstrap-ca-cert=db:Open_vSwitch,SSL,ca_cert --user openvswitch:hugetlbfs --no-chdir --log-file=/var/log/openvswitch/ovsdb-server.log --pidfile=/var/run/openvswitch/ovsdb-server.pid --detach
             ├─polkit.service
             │ └─43449 /usr/lib/polkit-1/polkitd --no-debug
             ├─rpcbind.service
             │ └─702 /usr/bin/rpcbind -w -f
             ├─rsyslog.service
             │ └─1007 /usr/sbin/rsyslogd -n
             ├─sshd.service
             │ └─191978 "sshd: /usr/sbin/sshd -D [listener] 0 of 10-100 startups"
             ├─system-ceph\x2d14ff1f30\x2d5059\x2d58f1\x2d9a23\x2d69871bb275a1.slice
             │ ├─ceph-14ff1f30-5059-58f1-9a23-69871bb275a1@crash.compute-0.service
             │ │ ├─libpod-payload-7eb39cf0035c22ff4e83a7371bb415c7d467398eea843a964591e85500be2230
             │ │ │ ├─82993 /run/podman-init -- /usr/bin/ceph-crash -n client.crash.compute-0
             │ │ │ └─82995 /usr/bin/python3 -s /usr/bin/ceph-crash -n client.crash.compute-0
             │ │ └─runtime
             │ │   └─82991 /usr/bin/conmon --api-version 1 -c 7eb39cf0035c22ff4e83a7371bb415c7d467398eea843a964591e85500be2230 -u 7eb39cf0035c22ff4e83a7371bb415c7d467398eea843a964591e85500be2230 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/7eb39cf0035c22ff4e83a7371bb415c7d467398eea843a964591e85500be2230/userdata -p /run/containers/storage/overlay-containers/7eb39cf0035c22ff4e83a7371bb415c7d467398eea843a964591e85500be2230/userdata/pidfile -n ceph-14ff1f30-5059-58f1-9a23-69871bb275a1-crash-compute-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/7eb39cf0035c22ff4e83a7371bb415c7d467398eea843a964591e85500be2230 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/7eb39cf0035c22ff4e83a7371bb415c7d467398eea843a964591e85500be2230/userdata/oci-log --conmon-pidfile /run/ceph-14ff1f30-5059-58f1-9a23-69871bb275a1@crash.compute-0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 7eb39cf0035c22ff4e83a7371bb415c7d467398eea843a964591e85500be2230
             │ ├─ceph-14ff1f30-5059-58f1-9a23-69871bb275a1@mds.cephfs.compute-0.bdhrqf.service
             │ │ ├─libpod-payload-f32e02240e8a83659f9e6ca6aa769bfab0c78f2322374b714daa8a8d1689a511
             │ │ │ ├─102314 /run/podman-init -- /usr/bin/ceph-mds -n mds.cephfs.compute-0.bdhrqf -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ │ └─102316 /usr/bin/ceph-mds -n mds.cephfs.compute-0.bdhrqf -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ └─runtime
             │ │   └─102312 /usr/bin/conmon --api-version 1 -c f32e02240e8a83659f9e6ca6aa769bfab0c78f2322374b714daa8a8d1689a511 -u f32e02240e8a83659f9e6ca6aa769bfab0c78f2322374b714daa8a8d1689a511 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/f32e02240e8a83659f9e6ca6aa769bfab0c78f2322374b714daa8a8d1689a511/userdata -p /run/containers/storage/overlay-containers/f32e02240e8a83659f9e6ca6aa769bfab0c78f2322374b714daa8a8d1689a511/userdata/pidfile -n ceph-14ff1f30-5059-58f1-9a23-69871bb275a1-mds-cephfs-compute-0-bdhrqf --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/f32e02240e8a83659f9e6ca6aa769bfab0c78f2322374b714daa8a8d1689a511 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/f32e02240e8a83659f9e6ca6aa769bfab0c78f2322374b714daa8a8d1689a511/userdata/oci-log --conmon-pidfile /run/ceph-14ff1f30-5059-58f1-9a23-69871bb275a1@mds.cephfs.compute-0.bdhrqf.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg f32e02240e8a83659f9e6ca6aa769bfab0c78f2322374b714daa8a8d1689a511
             │ ├─ceph-14ff1f30-5059-58f1-9a23-69871bb275a1@mgr.compute-0.kzdpag.service
             │ │ ├─libpod-payload-cf5b754473e023a6c38808405cba62d535fd2d4dd4ad3580b8df218d673d96ea
             │ │ │ ├─75343 /run/podman-init -- /usr/bin/ceph-mgr -n mgr.compute-0.kzdpag -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ │ └─75345 /usr/bin/ceph-mgr -n mgr.compute-0.kzdpag -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ └─runtime
             │ │   └─75341 /usr/bin/conmon --api-version 1 -c cf5b754473e023a6c38808405cba62d535fd2d4dd4ad3580b8df218d673d96ea -u cf5b754473e023a6c38808405cba62d535fd2d4dd4ad3580b8df218d673d96ea -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/cf5b754473e023a6c38808405cba62d535fd2d4dd4ad3580b8df218d673d96ea/userdata -p /run/containers/storage/overlay-containers/cf5b754473e023a6c38808405cba62d535fd2d4dd4ad3580b8df218d673d96ea/userdata/pidfile -n ceph-14ff1f30-5059-58f1-9a23-69871bb275a1-mgr-compute-0-kzdpag --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/cf5b754473e023a6c38808405cba62d535fd2d4dd4ad3580b8df218d673d96ea --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/cf5b754473e023a6c38808405cba62d535fd2d4dd4ad3580b8df218d673d96ea/userdata/oci-log --conmon-pidfile /run/ceph-14ff1f30-5059-58f1-9a23-69871bb275a1@mgr.compute-0.kzdpag.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg cf5b754473e023a6c38808405cba62d535fd2d4dd4ad3580b8df218d673d96ea
             │ ├─ceph-14ff1f30-5059-58f1-9a23-69871bb275a1@mon.compute-0.service
             │ │ ├─libpod-payload-21a56ae912cb8d8d1f0dc09cd0d64941e849dd5a597340fef403575f5f6dca90
             │ │ │ ├─75048 /run/podman-init -- /usr/bin/ceph-mon -n mon.compute-0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false
             │ │ │ └─75050 /usr/bin/ceph-mon -n mon.compute-0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false
             │ │ └─runtime
             │ │   └─75046 /usr/bin/conmon --api-version 1 -c 21a56ae912cb8d8d1f0dc09cd0d64941e849dd5a597340fef403575f5f6dca90 -u 21a56ae912cb8d8d1f0dc09cd0d64941e849dd5a597340fef403575f5f6dca90 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/21a56ae912cb8d8d1f0dc09cd0d64941e849dd5a597340fef403575f5f6dca90/userdata -p /run/containers/storage/overlay-containers/21a56ae912cb8d8d1f0dc09cd0d64941e849dd5a597340fef403575f5f6dca90/userdata/pidfile -n ceph-14ff1f30-5059-58f1-9a23-69871bb275a1-mon-compute-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/21a56ae912cb8d8d1f0dc09cd0d64941e849dd5a597340fef403575f5f6dca90 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/21a56ae912cb8d8d1f0dc09cd0d64941e849dd5a597340fef403575f5f6dca90/userdata/oci-log --conmon-pidfile /run/ceph-14ff1f30-5059-58f1-9a23-69871bb275a1@mon.compute-0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 21a56ae912cb8d8d1f0dc09cd0d64941e849dd5a597340fef403575f5f6dca90
             │ ├─ceph-14ff1f30-5059-58f1-9a23-69871bb275a1@osd.0.service
             │ │ ├─libpod-payload-9e203bb2012357f685d3a92116fa2f82a2b0bf3d53f620d86c98c827de1eec96
             │ │ │ ├─88829 /run/podman-init -- /usr/bin/ceph-osd -n osd.0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ │ └─88831 /usr/bin/ceph-osd -n osd.0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ └─runtime
             │ │   └─88827 /usr/bin/conmon --api-version 1 -c 9e203bb2012357f685d3a92116fa2f82a2b0bf3d53f620d86c98c827de1eec96 -u 9e203bb2012357f685d3a92116fa2f82a2b0bf3d53f620d86c98c827de1eec96 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/9e203bb2012357f685d3a92116fa2f82a2b0bf3d53f620d86c98c827de1eec96/userdata -p /run/containers/storage/overlay-containers/9e203bb2012357f685d3a92116fa2f82a2b0bf3d53f620d86c98c827de1eec96/userdata/pidfile -n ceph-14ff1f30-5059-58f1-9a23-69871bb275a1-osd-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/9e203bb2012357f685d3a92116fa2f82a2b0bf3d53f620d86c98c827de1eec96 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/9e203bb2012357f685d3a92116fa2f82a2b0bf3d53f620d86c98c827de1eec96/userdata/oci-log --conmon-pidfile /run/ceph-14ff1f30-5059-58f1-9a23-69871bb275a1@osd.0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 9e203bb2012357f685d3a92116fa2f82a2b0bf3d53f620d86c98c827de1eec96
             │ ├─ceph-14ff1f30-5059-58f1-9a23-69871bb275a1@osd.1.service
             │ │ ├─libpod-payload-6a5fc11573d1a39f1563ae47c276c7e603d4b25acd108fa32dcccaad74ad1d11
             │ │ │ ├─89838 /run/podman-init -- /usr/bin/ceph-osd -n osd.1 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ │ └─89840 /usr/bin/ceph-osd -n osd.1 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ └─runtime
             │ │   └─89836 /usr/bin/conmon --api-version 1 -c 6a5fc11573d1a39f1563ae47c276c7e603d4b25acd108fa32dcccaad74ad1d11 -u 6a5fc11573d1a39f1563ae47c276c7e603d4b25acd108fa32dcccaad74ad1d11 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/6a5fc11573d1a39f1563ae47c276c7e603d4b25acd108fa32dcccaad74ad1d11/userdata -p /run/containers/storage/overlay-containers/6a5fc11573d1a39f1563ae47c276c7e603d4b25acd108fa32dcccaad74ad1d11/userdata/pidfile -n ceph-14ff1f30-5059-58f1-9a23-69871bb275a1-osd-1 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/6a5fc11573d1a39f1563ae47c276c7e603d4b25acd108fa32dcccaad74ad1d11 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/6a5fc11573d1a39f1563ae47c276c7e603d4b25acd108fa32dcccaad74ad1d11/userdata/oci-log --conmon-pidfile /run/ceph-14ff1f30-5059-58f1-9a23-69871bb275a1@osd.1.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 6a5fc11573d1a39f1563ae47c276c7e603d4b25acd108fa32dcccaad74ad1d11
             │ ├─ceph-14ff1f30-5059-58f1-9a23-69871bb275a1@osd.2.service
             │ │ ├─libpod-payload-2e6c1ee4769aea5e614a2b0f0b65dd1997bbdea3f16ea1e0ab78d05fbef53f23
             │ │ │ ├─91078 /run/podman-init -- /usr/bin/ceph-osd -n osd.2 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ │ └─91083 /usr/bin/ceph-osd -n osd.2 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ └─runtime
             │ │   └─91076 /usr/bin/conmon --api-version 1 -c 2e6c1ee4769aea5e614a2b0f0b65dd1997bbdea3f16ea1e0ab78d05fbef53f23 -u 2e6c1ee4769aea5e614a2b0f0b65dd1997bbdea3f16ea1e0ab78d05fbef53f23 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/2e6c1ee4769aea5e614a2b0f0b65dd1997bbdea3f16ea1e0ab78d05fbef53f23/userdata -p /run/containers/storage/overlay-containers/2e6c1ee4769aea5e614a2b0f0b65dd1997bbdea3f16ea1e0ab78d05fbef53f23/userdata/pidfile -n ceph-14ff1f30-5059-58f1-9a23-69871bb275a1-osd-2 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/2e6c1ee4769aea5e614a2b0f0b65dd1997bbdea3f16ea1e0ab78d05fbef53f23 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/2e6c1ee4769aea5e614a2b0f0b65dd1997bbdea3f16ea1e0ab78d05fbef53f23/userdata/oci-log --conmon-pidfile /run/ceph-14ff1f30-5059-58f1-9a23-69871bb275a1@osd.2.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 2e6c1ee4769aea5e614a2b0f0b65dd1997bbdea3f16ea1e0ab78d05fbef53f23
             │ └─ceph-14ff1f30-5059-58f1-9a23-69871bb275a1@rgw.rgw.compute-0.qxekyl.service
             │   ├─libpod-payload-4edd4e145deee38b7ccf448ee661159c19a1b05ba5a7fb95dd92a6fb13e29fbf
             │   │ ├─99621 /run/podman-init -- /usr/bin/radosgw -n client.rgw.rgw.compute-0.qxekyl -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │   │ └─99623 /usr/bin/radosgw -n client.rgw.rgw.compute-0.qxekyl -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │   └─runtime
             │     └─99619 /usr/bin/conmon --api-version 1 -c 4edd4e145deee38b7ccf448ee661159c19a1b05ba5a7fb95dd92a6fb13e29fbf -u 4edd4e145deee38b7ccf448ee661159c19a1b05ba5a7fb95dd92a6fb13e29fbf -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/4edd4e145deee38b7ccf448ee661159c19a1b05ba5a7fb95dd92a6fb13e29fbf/userdata -p /run/containers/storage/overlay-containers/4edd4e145deee38b7ccf448ee661159c19a1b05ba5a7fb95dd92a6fb13e29fbf/userdata/pidfile -n ceph-14ff1f30-5059-58f1-9a23-69871bb275a1-rgw-rgw-compute-0-qxekyl --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/4edd4e145deee38b7ccf448ee661159c19a1b05ba5a7fb95dd92a6fb13e29fbf --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/4edd4e145deee38b7ccf448ee661159c19a1b05ba5a7fb95dd92a6fb13e29fbf/userdata/oci-log --conmon-pidfile /run/ceph-14ff1f30-5059-58f1-9a23-69871bb275a1@rgw.rgw.compute-0.qxekyl.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 4edd4e145deee38b7ccf448ee661159c19a1b05ba5a7fb95dd92a6fb13e29fbf
             ├─system-getty.slice
             │ └─getty@tty1.service
             │   └─1013 /sbin/agetty -o "-p -- \\u" --noclear - linux
             ├─system-serial\x2dgetty.slice
             │ └─serial-getty@ttyS0.service
             │   └─1014 /sbin/agetty -o "-p -- \\u" --keep-baud 115200,57600,38400,9600 - vt220
             ├─systemd-hostnamed.service
             │ └─314855 /usr/lib/systemd/systemd-hostnamed
             ├─systemd-journald.service
             │ └─681 /usr/lib/systemd/systemd-journald
             ├─systemd-logind.service
             │ └─807 /usr/lib/systemd/systemd-logind
             ├─systemd-machined.service
             │ └─217781 /usr/lib/systemd/systemd-machined
             ├─systemd-udevd.service
             │ └─udev
             │   └─733 /usr/lib/systemd/systemd-udevd
             ├─tuned.service
             │ └─113121 /usr/bin/python3 -Es /usr/sbin/tuned -l -P
             ├─virtlogd.service
             │ └─217150 /usr/sbin/virtlogd
             ├─virtnodedevd.service
             │ └─256467 /usr/sbin/virtnodedevd --timeout 120
             ├─virtqemud.service
             │ └─256259 /usr/sbin/virtqemud --timeout 120
             └─virtsecretd.service
               └─266034 /usr/sbin/virtsecretd --timeout 120

Nov 29 08:17:14 compute-0 nova_compute[256729]: 2025-11-29 08:17:14.870 256736 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 25 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263[00m
Nov 29 08:17:15 compute-0 nova_compute[256729]: 2025-11-29 08:17:15.148 256736 DEBUG oslo_service.periodic_task [None req-27de958c-c2ab-4243-8865-e92c1e69ab3e - - - - - -] Running periodic task ComputeManager._poll_volume_usage run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210[00m
Nov 29 08:17:16 compute-0 virtqemud[256259]: Failed to connect socket to '/var/run/libvirt/virtstoraged-sock-ro': No such file or directory
Nov 29 08:17:16 compute-0 nova_compute[256729]: 2025-11-29 08:17:16.149 256736 DEBUG oslo_service.periodic_task [None req-27de958c-c2ab-4243-8865-e92c1e69ab3e - - - - - -] Running periodic task ComputeManager._instance_usage_audit run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210[00m
Nov 29 08:17:17 compute-0 nova_compute[256729]: 2025-11-29 08:17:17.148 256736 DEBUG oslo_service.periodic_task [None req-27de958c-c2ab-4243-8865-e92c1e69ab3e - - - - - -] Running periodic task ComputeManager._heal_instance_info_cache run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210[00m
Nov 29 08:17:17 compute-0 nova_compute[256729]: 2025-11-29 08:17:17.149 256736 DEBUG nova.compute.manager [None req-27de958c-c2ab-4243-8865-e92c1e69ab3e - - - - - -] Starting heal instance info cache _heal_instance_info_cache /usr/lib/python3.9/site-packages/nova/compute/manager.py:9858[00m
Nov 29 08:17:17 compute-0 nova_compute[256729]: 2025-11-29 08:17:17.149 256736 DEBUG nova.compute.manager [None req-27de958c-c2ab-4243-8865-e92c1e69ab3e - - - - - -] Rebuilding the list of instances to heal _heal_instance_info_cache /usr/lib/python3.9/site-packages/nova/compute/manager.py:9862[00m
Nov 29 08:17:17 compute-0 nova_compute[256729]: 2025-11-29 08:17:17.172 256736 DEBUG nova.compute.manager [None req-27de958c-c2ab-4243-8865-e92c1e69ab3e - - - - - -] Didn't find any instances for network info cache update. _heal_instance_info_cache /usr/lib/python3.9/site-packages/nova/compute/manager.py:9944[00m
Nov 29 08:17:17 compute-0 nova_compute[256729]: 2025-11-29 08:17:17.172 256736 DEBUG oslo_service.periodic_task [None req-27de958c-c2ab-4243-8865-e92c1e69ab3e - - - - - -] Running periodic task ComputeManager._reclaim_queued_deletes run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210[00m
Nov 29 08:17:17 compute-0 nova_compute[256729]: 2025-11-29 08:17:17.172 256736 DEBUG nova.compute.manager [None req-27de958c-c2ab-4243-8865-e92c1e69ab3e - - - - - -] CONF.reclaim_instance_interval <= 0, skipping... _reclaim_queued_deletes /usr/lib/python3.9/site-packages/nova/compute/manager.py:10477[00m

● user-1000.slice - User Slice of UID 1000
     Loaded: loaded
    Drop-In: /usr/lib/systemd/system/user-.slice.d
             └─10-defaults.conf
     Active: active since Sat 2025-11-29 06:30:09 UTC; 1h 47min ago
      Until: Sat 2025-11-29 06:30:09 UTC; 1h 47min ago
       Docs: man:user@.service(5)
         IO: 685.5M read, 8.2G written
      Tasks: 21 (limit: 20036)
     Memory: 1.4G (peak: 4.1G)
        CPU: 22min 59.096s
     CGroup: /user.slice/user-1000.slice
             ├─session-1.scope
             │ └─4513 /usr/bin/python3
             ├─session-51.scope
             │ ├─311291 "sshd-session: zuul [priv]"
             │ ├─311294 "sshd-session: zuul@notty"
             │ ├─311295 sudo bash -c "rm -rf /var/tmp/sos-osp && mkdir /var/tmp/sos-osp && sos report --batch --all-logs --tmp-dir=/var/tmp/sos-osp  -p container,openstack_edpm,system,storage,virt"
             │ ├─311319 /usr/bin/python3 -s /sbin/sos report --batch --all-logs --tmp-dir=/var/tmp/sos-osp -p container,openstack_edpm,system,storage,virt
             │ ├─318244 timeout 15s turbostat --debug sleep 10
             │ ├─318920 timeout 300s systemctl status --all
             │ ├─318922 systemctl status --all
             │ ├─319004 timeout --foreground 300s virsh -r nodedev-dumpxml scsi_host0
             │ └─319005 virsh -r nodedev-dumpxml scsi_host0
             └─user@1000.service
               ├─app.slice
               │ └─dbus-broker.service
               │   ├─14284 /usr/bin/dbus-broker-launch --scope user
               │   └─14294 dbus-broker --log 4 --controller 9 --machine-id 1f988c78c563e12389ab342aced42dbb --max-bytes 100000000000000 --max-fds 25000000000000 --max-matches 5000000000
               ├─init.scope
               │ ├─4301 /usr/lib/systemd/systemd --user
               │ └─4303 "(sd-pam)"
               └─user.slice
                 └─podman-pause-aee1f4af.scope
                   └─14221 catatonit -P

Nov 29 07:38:39 compute-0 python3.9[256973]: ansible-containers.podman.podman_container PODMAN-CONTAINER-DEBUG: podman start nova_compute_init
Nov 29 07:38:39 compute-0 podman[257067]: 2025-11-29 07:38:39.572758423 +0000 UTC m=+0.052082204 container died 9d12469c89efdfdc666ec174c176d4bb33ba378d5869ad0c5c6d9b408e5fac58 (image=quay.io/podified-antelope-centos9/openstack-nova-compute:current-podified, name=nova_compute_init, config_id=edpm, org.label-schema.build-date=20251125, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, tcib_build_tag=1f5c0439f2433cb462b222a5bb23e629, tcib_managed=true, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-nova-compute:current-podified', 'privileged': False, 'user': 'root', 'restart': 'never', 'command': 'bash -c $* -- eval python3 /sbin/nova_statedir_ownership.py | logger -t nova_compute_init', 'net': 'none', 'security_opt': ['label=disable'], 'detach': False, 'environment': {'NOVA_STATEDIR_OWNERSHIP_SKIP': '/var/lib/nova/compute_id', '__OS_DEBUG': False}, 'volumes': ['/dev/log:/dev/log', '/var/lib/nova:/var/lib/nova:shared', '/var/lib/_nova_secontext:/var/lib/_nova_secontext:shared,z', '/var/lib/openstack/config/nova/nova_statedir_ownership.py:/sbin/nova_statedir_ownership.py:z']}, managed_by=edpm_ansible, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, container_name=nova_compute_init)
Nov 29 07:38:39 compute-0 sudo[256968]: pam_unix(sudo:session): session closed for user root
Nov 29 07:38:40 compute-0 sshd-session[225540]: Connection closed by 192.168.122.30 port 58076
Nov 29 07:38:40 compute-0 sshd-session[225537]: pam_unix(sshd:session): session closed for user zuul
Nov 29 08:16:26 compute-0 sudo[311295]:     zuul : PWD=/home/zuul ; USER=root ; COMMAND=/bin/bash -c 'rm -rf /var/tmp/sos-osp && mkdir /var/tmp/sos-osp && sos report --batch --all-logs --tmp-dir=/var/tmp/sos-osp  -p container,openstack_edpm,system,storage,virt'
Nov 29 08:16:26 compute-0 sudo[311295]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Nov 29 08:16:36 compute-0 ovs-vsctl[311624]: ovs|00001|db_ctl_base|ERR|no key "dpdk-init" in Open_vSwitch record "." column other_config
Nov 29 08:17:07 compute-0 ovs-appctl[317380]: ovs|00001|daemon_unix|WARN|/var/run/openvswitch/ovs-monitor-ipsec.pid: open: No such file or directory
Nov 29 08:17:07 compute-0 ovs-appctl[317386]: ovs|00001|daemon_unix|WARN|/var/run/openvswitch/ovs-monitor-ipsec.pid: open: No such file or directory

● user-42477.slice - User Slice of UID 42477
     Loaded: loaded
    Drop-In: /usr/lib/systemd/system/user-.slice.d
             └─10-defaults.conf
     Active: active since Sat 2025-11-29 07:12:11 UTC; 1h 5min ago
      Until: Sat 2025-11-29 07:12:11 UTC; 1h 5min ago
       Docs: man:user@.service(5)
         IO: 9.1M read, 174.0M written
      Tasks: 26 (limit: 20036)
     Memory: 36.9M (peak: 77.1M)
        CPU: 5min 42ms
     CGroup: /user.slice/user-42477.slice
             ├─session-20.scope
             │ ├─76613 "sshd-session: ceph-admin [priv]"
             │ └─76635 "sshd-session: ceph-admin"
             ├─session-22.scope
             │ ├─76620 "sshd-session: ceph-admin [priv]"
             │ └─76636 "sshd-session: ceph-admin@notty"
             ├─session-23.scope
             │ ├─76687 "sshd-session: ceph-admin [priv]"
             │ └─76690 "sshd-session: ceph-admin@notty"
             ├─session-24.scope
             │ ├─76741 "sshd-session: ceph-admin [priv]"
             │ └─76744 "sshd-session: ceph-admin@notty"
             ├─session-25.scope
             │ ├─76795 "sshd-session: ceph-admin [priv]"
             │ └─76798 "sshd-session: ceph-admin@notty"
             ├─session-26.scope
             │ ├─76849 "sshd-session: ceph-admin [priv]"
             │ └─76852 "sshd-session: ceph-admin@notty"
             ├─session-27.scope
             │ ├─76903 "sshd-session: ceph-admin [priv]"
             │ └─76906 "sshd-session: ceph-admin@notty"
             ├─session-28.scope
             │ ├─76957 "sshd-session: ceph-admin [priv]"
             │ └─76960 "sshd-session: ceph-admin@notty"
             ├─session-29.scope
             │ ├─77011 "sshd-session: ceph-admin [priv]"
             │ └─77014 "sshd-session: ceph-admin@notty"
             ├─session-30.scope
             │ ├─77065 "sshd-session: ceph-admin [priv]"
             │ └─77068 "sshd-session: ceph-admin@notty"
             ├─session-31.scope
             │ ├─77092 "sshd-session: ceph-admin [priv]"
             │ └─77095 "sshd-session: ceph-admin@notty"
             ├─session-32.scope
             │ ├─77146 "sshd-session: ceph-admin [priv]"
             │ └─77149 "sshd-session: ceph-admin@notty"
             └─user@42477.service
               └─init.scope
                 ├─76617 /usr/lib/systemd/systemd --user
                 └─76619 "(sd-pam)"

Nov 29 08:17:03 compute-0 podman[316184]: 2025-11-29 08:17:03.77816041 +0000 UTC m=+0.144861892 container attach d4a9be8cb43979961b1e87e7f3e0d6905eb8c9a0bb29d35e2dfe03c2ac9375f6 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=magical_shockley, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.schema-version=1.0, CEPH_REF=reef, ceph=True, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2, io.buildah.version=1.39.3, OSD_FLAVOR=default)
Nov 29 08:17:04 compute-0 podman[316184]: 2025-11-29 08:17:04.82344633 +0000 UTC m=+1.190147822 container died d4a9be8cb43979961b1e87e7f3e0d6905eb8c9a0bb29d35e2dfe03c2ac9375f6 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=magical_shockley, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2, io.buildah.version=1.39.3, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_REF=reef, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, org.label-schema.build-date=20250507, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image)
Nov 29 08:17:04 compute-0 podman[316184]: 2025-11-29 08:17:04.878195159 +0000 UTC m=+1.244896641 container remove d4a9be8cb43979961b1e87e7f3e0d6905eb8c9a0bb29d35e2dfe03c2ac9375f6 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=magical_shockley, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=reef, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, io.buildah.version=1.39.3)
Nov 29 08:17:04 compute-0 sudo[315985]: pam_unix(sudo:session): session closed for user root
Nov 29 08:17:04 compute-0 sudo[316565]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Nov 29 08:17:05 compute-0 sudo[316565]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Nov 29 08:17:05 compute-0 sudo[316565]: pam_unix(sudo:session): session closed for user root
Nov 29 08:17:05 compute-0 sudo[316607]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/ls /etc/sysctl.d
Nov 29 08:17:05 compute-0 sudo[316607]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Nov 29 08:17:05 compute-0 sudo[316607]: pam_unix(sudo:session): session closed for user root

● user.slice - User and Session Slice
     Loaded: loaded (/usr/lib/systemd/system/user.slice; static)
     Active: active since Sat 2025-11-29 06:29:42 UTC; 1h 47min ago
      Until: Sat 2025-11-29 06:29:42 UTC; 1h 47min ago
       Docs: man:systemd.special(7)
         IO: 694.6M read, 8.4G written
      Tasks: 47
     Memory: 1.5G (peak: 4.1G)
        CPU: 27min 59.804s
     CGroup: /user.slice
             ├─user-1000.slice
             │ ├─session-1.scope
             │ │ └─4513 /usr/bin/python3
             │ ├─session-51.scope
             │ │ ├─311291 "sshd-session: zuul [priv]"
             │ │ ├─311294 "sshd-session: zuul@notty"
             │ │ ├─311295 sudo bash -c "rm -rf /var/tmp/sos-osp && mkdir /var/tmp/sos-osp && sos report --batch --all-logs --tmp-dir=/var/tmp/sos-osp  -p container,openstack_edpm,system,storage,virt"
             │ │ ├─311319 /usr/bin/python3 -s /sbin/sos report --batch --all-logs --tmp-dir=/var/tmp/sos-osp -p container,openstack_edpm,system,storage,virt
             │ │ ├─318244 timeout 15s turbostat --debug sleep 10
             │ │ ├─318920 timeout 300s systemctl status --all
             │ │ └─318922 systemctl status --all
             │ └─user@1000.service
             │   ├─app.slice
             │   │ └─dbus-broker.service
             │   │   ├─14284 /usr/bin/dbus-broker-launch --scope user
             │   │   └─14294 dbus-broker --log 4 --controller 9 --machine-id 1f988c78c563e12389ab342aced42dbb --max-bytes 100000000000000 --max-fds 25000000000000 --max-matches 5000000000
             │   ├─init.scope
             │   │ ├─4301 /usr/lib/systemd/systemd --user
             │   │ └─4303 "(sd-pam)"
             │   └─user.slice
             │     └─podman-pause-aee1f4af.scope
             │       └─14221 catatonit -P
             └─user-42477.slice
               ├─session-20.scope
               │ ├─76613 "sshd-session: ceph-admin [priv]"
               │ └─76635 "sshd-session: ceph-admin"
               ├─session-22.scope
               │ ├─76620 "sshd-session: ceph-admin [priv]"
               │ └─76636 "sshd-session: ceph-admin@notty"
               ├─session-23.scope
               │ ├─76687 "sshd-session: ceph-admin [priv]"
               │ └─76690 "sshd-session: ceph-admin@notty"
               ├─session-24.scope
               │ ├─76741 "sshd-session: ceph-admin [priv]"
               │ └─76744 "sshd-session: ceph-admin@notty"
               ├─session-25.scope
               │ ├─76795 "sshd-session: ceph-admin [priv]"
               │ └─76798 "sshd-session: ceph-admin@notty"
               ├─session-26.scope
               │ ├─76849 "sshd-session: ceph-admin [priv]"
               │ └─76852 "sshd-session: ceph-admin@notty"
               ├─session-27.scope
               │ ├─76903 "sshd-session: ceph-admin [priv]"
               │ └─76906 "sshd-session: ceph-admin@notty"
               ├─session-28.scope
               │ ├─76957 "sshd-session: ceph-admin [priv]"
               │ └─76960 "sshd-session: ceph-admin@notty"
               ├─session-29.scope
               │ ├─77011 "sshd-session: ceph-admin [priv]"
               │ └─77014 "sshd-session: ceph-admin@notty"
               ├─session-30.scope
               │ ├─77065 "sshd-session: ceph-admin [priv]"
               │ └─77068 "sshd-session: ceph-admin@notty"
               ├─session-31.scope
               │ ├─77092 "sshd-session: ceph-admin [priv]"
               │ └─77095 "sshd-session: ceph-admin@notty"
               ├─session-32.scope
               │ ├─77146 "sshd-session: ceph-admin [priv]"
               │ └─77149 "sshd-session: ceph-admin@notty"
               └─user@42477.service
                 └─init.scope
                   ├─76617 /usr/lib/systemd/systemd --user
                   └─76619 "(sd-pam)"

● dbus.socket - D-Bus System Message Bus Socket
     Loaded: loaded (/usr/lib/systemd/system/dbus.socket; enabled; preset: enabled)
     Active: active (running) since Sat 2025-11-29 06:29:44 UTC; 1h 47min ago
      Until: Sat 2025-11-29 06:29:44 UTC; 1h 47min ago
   Triggers: ● dbus-broker.service
     Listen: /run/dbus/system_bus_socket (Stream)
     CGroup: /system.slice/dbus.socket

Nov 29 06:29:44 localhost systemd[1]: Listening on D-Bus System Message Bus Socket.

● dm-event.socket - Device-mapper event daemon FIFOs
     Loaded: loaded (/usr/lib/systemd/system/dm-event.socket; enabled; preset: enabled)
     Active: active (listening) since Sat 2025-11-29 07:02:44 UTC; 1h 14min ago
      Until: Sat 2025-11-29 07:02:44 UTC; 1h 14min ago
   Triggers: ● dm-event.service
       Docs: man:dmeventd(8)
     Listen: /run/dmeventd-server (FIFO)
             /run/dmeventd-client (FIFO)
     CGroup: /system.slice/dm-event.socket

Nov 29 07:02:44 compute-0 systemd[1]: Listening on Device-mapper event daemon FIFOs.

● iscsid.socket - Open-iSCSI iscsid Socket
     Loaded: loaded (/usr/lib/systemd/system/iscsid.socket; enabled; preset: enabled)
     Active: active (running) since Sat 2025-11-29 07:33:25 UTC; 43min ago
      Until: Sat 2025-11-29 07:33:25 UTC; 43min ago
   Triggers: ● iscsid.service
       Docs: man:iscsid(8)
             man:iscsiadm(8)
     Listen: @ISCSIADM_ABSTRACT_NAMESPACE (Stream)
     CGroup: /system.slice/iscsid.socket

Nov 29 07:33:25 compute-0 systemd[1]: Listening on Open-iSCSI iscsid Socket.

○ iscsiuio.socket - Open-iSCSI iscsiuio Socket
     Loaded: loaded (/usr/lib/systemd/system/iscsiuio.socket; enabled; preset: enabled)
     Active: inactive (dead)
   Triggers: ● iscsiuio.service
       Docs: man:iscsiuio(8)
     Listen: @ISCSID_UIP_ABSTRACT_NAMESPACE (Stream)

○ libvirtd-admin.socket - libvirt legacy monolithic daemon admin socket
     Loaded: loaded (/usr/lib/systemd/system/libvirtd-admin.socket; disabled; preset: disabled)
     Active: inactive (dead)
   Triggers: ● libvirtd.service
     Listen: /run/libvirt/libvirt-admin-sock (Stream)

○ libvirtd-ro.socket - libvirt legacy monolithic daemon read-only socket
     Loaded: loaded (/usr/lib/systemd/system/libvirtd-ro.socket; disabled; preset: disabled)
     Active: inactive (dead)
   Triggers: ● libvirtd.service
     Listen: /run/libvirt/libvirt-sock-ro (Stream)

○ libvirtd-tls.socket
     Loaded: masked (Reason: Unit libvirtd-tls.socket is masked.)
     Active: inactive (dead)

○ libvirtd.socket - libvirt legacy monolithic daemon socket
     Loaded: loaded (/usr/lib/systemd/system/libvirtd.socket; disabled; preset: disabled)
     Active: inactive (dead)
   Triggers: ● libvirtd.service
     Listen: /run/libvirt/libvirt-sock (Stream)

● lvm2-lvmpolld.socket - LVM2 poll daemon socket
     Loaded: loaded (/usr/lib/systemd/system/lvm2-lvmpolld.socket; enabled; preset: enabled)
     Active: active (listening) since Sat 2025-11-29 07:02:45 UTC; 1h 14min ago
      Until: Sat 2025-11-29 07:02:45 UTC; 1h 14min ago
   Triggers: ● lvm2-lvmpolld.service
       Docs: man:lvmpolld(8)
     Listen: /run/lvm/lvmpolld.socket (Stream)
     CGroup: /system.slice/lvm2-lvmpolld.socket

Nov 29 07:02:45 compute-0 systemd[1]: Listening on LVM2 poll daemon socket.

● rpcbind.socket - RPCbind Server Activation Socket
     Loaded: loaded (/usr/lib/systemd/system/rpcbind.socket; enabled; preset: enabled)
     Active: active (running) since Sat 2025-11-29 06:29:42 UTC; 1h 47min ago
      Until: Sat 2025-11-29 06:29:42 UTC; 1h 47min ago
   Triggers: ● rpcbind.service
     Listen: /run/rpcbind.sock (Stream)
             0.0.0.0:111 (Stream)
             0.0.0.0:111 (Datagram)
             [::]:111 (Stream)
             [::]:111 (Datagram)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48573)
     Memory: 16.0K (peak: 288.0K)
        CPU: 5ms
     CGroup: /system.slice/rpcbind.socket

● sssd-kcm.socket - SSSD Kerberos Cache Manager responder socket
     Loaded: loaded (/usr/lib/systemd/system/sssd-kcm.socket; enabled; preset: enabled)
     Active: active (listening) since Sat 2025-11-29 06:29:44 UTC; 1h 47min ago
      Until: Sat 2025-11-29 06:29:44 UTC; 1h 47min ago
   Triggers: ● sssd-kcm.service
       Docs: man:sssd-kcm(8)
     Listen: /run/.heim_org.h5l.kcm-socket (Stream)
     CGroup: /system.slice/sssd-kcm.socket

Nov 29 06:29:44 localhost systemd[1]: Listening on SSSD Kerberos Cache Manager responder socket.

○ syslog.socket - Syslog Socket
     Loaded: loaded (/usr/lib/systemd/system/syslog.socket; static)
     Active: inactive (dead)
   Triggers: ● syslog.service
       Docs: man:systemd.special(7)
             https://www.freedesktop.org/wiki/Software/systemd/syslog
     Listen: /run/systemd/journal/syslog (Datagram)

● systemd-coredump.socket - Process Core Dump Socket
     Loaded: loaded (/usr/lib/systemd/system/systemd-coredump.socket; static)
     Active: active (listening) since Sat 2025-11-29 06:29:42 UTC; 1h 47min ago
      Until: Sat 2025-11-29 06:29:42 UTC; 1h 47min ago
       Docs: man:systemd-coredump(8)
     Listen: /run/systemd/coredump (SequentialPacket)
   Accepted: 1; Connected: 0;
     CGroup: /system.slice/systemd-coredump.socket

● systemd-initctl.socket - initctl Compatibility Named Pipe
     Loaded: loaded (/usr/lib/systemd/system/systemd-initctl.socket; static)
     Active: active (listening) since Sat 2025-11-29 06:29:42 UTC; 1h 47min ago
      Until: Sat 2025-11-29 06:29:42 UTC; 1h 47min ago
   Triggers: ● systemd-initctl.service
       Docs: man:systemd-initctl.socket(8)
     Listen: /run/initctl (FIFO)
     CGroup: /system.slice/systemd-initctl.socket

● systemd-journald-dev-log.socket - Journal Socket (/dev/log)
     Loaded: loaded (/usr/lib/systemd/system/systemd-journald-dev-log.socket; static)
     Active: active (running) since Sat 2025-11-29 06:28:07 UTC; 1h 49min ago
      Until: Sat 2025-11-29 06:28:07 UTC; 1h 49min ago
   Triggers: ● systemd-journald.service
       Docs: man:systemd-journald.service(8)
             man:journald.conf(5)
     Listen: /run/systemd/journal/dev-log (Datagram)
     CGroup: /system.slice/systemd-journald-dev-log.socket

Notice: journal has been rotated since unit was started, output may be incomplete.

● systemd-journald.socket - Journal Socket
     Loaded: loaded (/usr/lib/systemd/system/systemd-journald.socket; static)
     Active: active (running) since Sat 2025-11-29 06:28:07 UTC; 1h 49min ago
      Until: Sat 2025-11-29 06:28:07 UTC; 1h 49min ago
   Triggers: ● systemd-journald.service
       Docs: man:systemd-journald.service(8)
             man:journald.conf(5)
     Listen: /run/systemd/journal/socket (Datagram)
             /run/systemd/journal/stdout (Stream)
     CGroup: /system.slice/systemd-journald.socket

Notice: journal has been rotated since unit was started, output may be incomplete.

● systemd-rfkill.socket - Load/Save RF Kill Switch Status /dev/rfkill Watch
     Loaded: loaded (/usr/lib/systemd/system/systemd-rfkill.socket; static)
     Active: active (listening) since Sat 2025-11-29 06:29:45 UTC; 1h 47min ago
      Until: Sat 2025-11-29 06:29:45 UTC; 1h 47min ago
   Triggers: ● systemd-rfkill.service
       Docs: man:systemd-rfkill.socket(8)
     Listen: /dev/rfkill (Special)
     CGroup: /system.slice/systemd-rfkill.socket

Nov 29 06:29:45 np0005539576.novalocal systemd[1]: Listening on Load/Save RF Kill Switch Status /dev/rfkill Watch.

● systemd-udevd-control.socket - udev Control Socket
     Loaded: loaded (/usr/lib/systemd/system/systemd-udevd-control.socket; static)
     Active: active (running) since Sat 2025-11-29 06:29:42 UTC; 1h 47min ago
      Until: Sat 2025-11-29 06:29:42 UTC; 1h 47min ago
   Triggers: ● systemd-udevd.service
       Docs: man:systemd-udevd-control.socket(8)
             man:udev(7)
     Listen: /run/udev/control (SequentialPacket)
     CGroup: /system.slice/systemd-udevd-control.socket

● systemd-udevd-kernel.socket - udev Kernel Socket
     Loaded: loaded (/usr/lib/systemd/system/systemd-udevd-kernel.socket; static)
     Active: active (running) since Sat 2025-11-29 06:29:42 UTC; 1h 47min ago
      Until: Sat 2025-11-29 06:29:42 UTC; 1h 47min ago
   Triggers: ● systemd-udevd.service
       Docs: man:systemd-udevd-kernel.socket(8)
             man:udev(7)
     Listen: kobject-uevent 1 (Netlink)
     CGroup: /system.slice/systemd-udevd-kernel.socket

○ virtinterfaced-admin.socket - libvirt interface daemon admin socket
     Loaded: loaded (/usr/lib/systemd/system/virtinterfaced-admin.socket; enabled; preset: enabled)
     Active: inactive (dead)
   Triggers: ● virtinterfaced.service
     Listen: /run/libvirt/virtinterfaced-admin-sock (Stream)

○ virtinterfaced-ro.socket - libvirt interface daemon read-only socket
     Loaded: loaded (/usr/lib/systemd/system/virtinterfaced-ro.socket; enabled; preset: enabled)
     Active: inactive (dead)
   Triggers: ● virtinterfaced.service
     Listen: /run/libvirt/virtinterfaced-sock-ro (Stream)

○ virtinterfaced.socket - libvirt interface daemon socket
     Loaded: loaded (/usr/lib/systemd/system/virtinterfaced.socket; enabled; preset: enabled)
     Active: inactive (dead)
   Triggers: ● virtinterfaced.service
     Listen: /run/libvirt/virtinterfaced-sock (Stream)

○ virtlockd-admin.socket - libvirt locking daemon admin socket
     Loaded: loaded (/usr/lib/systemd/system/virtlockd-admin.socket; enabled; preset: enabled)
     Active: inactive (dead)
   Triggers: ● virtlockd.service
     Listen: /run/libvirt/virtlockd-admin-sock (Stream)

● virtlockd.socket - libvirt locking daemon socket
     Loaded: loaded (/usr/lib/systemd/system/virtlockd.socket; enabled; preset: enabled)
     Active: active (listening) since Sat 2025-11-29 07:32:01 UTC; 45min ago
      Until: Sat 2025-11-29 07:32:01 UTC; 45min ago
   Triggers: ● virtlockd.service
     Listen: /run/libvirt/virtlockd-sock (Stream)
     CGroup: /system.slice/virtlockd.socket

Nov 29 07:32:01 compute-0 systemd[1]: Listening on libvirt locking daemon socket.

● virtlogd-admin.socket - libvirt logging daemon admin socket
     Loaded: loaded (/usr/lib/systemd/system/virtlogd-admin.socket; enabled; preset: enabled)
    Drop-In: /etc/systemd/system/virtlogd-admin.socket.d
             └─override.conf
     Active: active (running) since Sat 2025-11-29 07:31:57 UTC; 45min ago
      Until: Sat 2025-11-29 07:31:57 UTC; 45min ago
   Triggers: ● virtlogd.service
     Listen: /run/libvirt/virtlogd-admin-sock (Stream)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48573)
     Memory: 0B (peak: 512.0K)
        CPU: 5ms
     CGroup: /system.slice/virtlogd-admin.socket

Nov 29 07:31:57 compute-0 systemd[1]: Starting libvirt logging daemon admin socket...
Nov 29 07:31:57 compute-0 systemd[1]: Listening on libvirt logging daemon admin socket.

● virtlogd.socket - libvirt logging daemon socket
     Loaded: loaded (/usr/lib/systemd/system/virtlogd.socket; enabled; preset: enabled)
    Drop-In: /etc/systemd/system/virtlogd.socket.d
             └─override.conf
     Active: active (running) since Sat 2025-11-29 07:31:57 UTC; 45min ago
      Until: Sat 2025-11-29 07:31:57 UTC; 45min ago
   Triggers: ● virtlogd.service
     Listen: /run/libvirt/virtlogd-sock (Stream)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48573)
     Memory: 4.0K (peak: 512.0K)
        CPU: 6ms
     CGroup: /system.slice/virtlogd.socket

Nov 29 07:31:57 compute-0 systemd[1]: Starting libvirt logging daemon socket...
Nov 29 07:31:57 compute-0 systemd[1]: Listening on libvirt logging daemon socket.

○ virtnetworkd-admin.socket - libvirt network daemon admin socket
     Loaded: loaded (/usr/lib/systemd/system/virtnetworkd-admin.socket; enabled; preset: enabled)
     Active: inactive (dead)
   Triggers: ● virtnetworkd.service
     Listen: /run/libvirt/virtnetworkd-admin-sock (Stream)

○ virtnetworkd-ro.socket - libvirt network daemon read-only socket
     Loaded: loaded (/usr/lib/systemd/system/virtnetworkd-ro.socket; enabled; preset: enabled)
     Active: inactive (dead)
   Triggers: ● virtnetworkd.service
     Listen: /run/libvirt/virtnetworkd-sock-ro (Stream)

○ virtnetworkd.socket - libvirt network daemon socket
     Loaded: loaded (/usr/lib/systemd/system/virtnetworkd.socket; enabled; preset: enabled)
     Active: inactive (dead)
   Triggers: ● virtnetworkd.service
     Listen: /run/libvirt/virtnetworkd-sock (Stream)

● virtnodedevd-admin.socket - libvirt nodedev daemon admin socket
     Loaded: loaded (/usr/lib/systemd/system/virtnodedevd-admin.socket; enabled; preset: enabled)
    Drop-In: /etc/systemd/system/virtnodedevd-admin.socket.d
             └─override.conf
     Active: active (running) since Sat 2025-11-29 07:31:59 UTC; 45min ago
      Until: Sat 2025-11-29 07:31:59 UTC; 45min ago
   Triggers: ● virtnodedevd.service
     Listen: /run/libvirt/virtnodedevd-admin-sock (Stream)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48573)
     Memory: 0B (peak: 568.0K)
        CPU: 2ms
     CGroup: /system.slice/virtnodedevd-admin.socket

Nov 29 07:31:59 compute-0 systemd[1]: Starting libvirt nodedev daemon admin socket...
Nov 29 07:31:59 compute-0 systemd[1]: Listening on libvirt nodedev daemon admin socket.

● virtnodedevd-ro.socket - libvirt nodedev daemon read-only socket
     Loaded: loaded (/usr/lib/systemd/system/virtnodedevd-ro.socket; enabled; preset: enabled)
    Drop-In: /etc/systemd/system/virtnodedevd-ro.socket.d
             └─override.conf
     Active: active (running) since Sat 2025-11-29 07:31:59 UTC; 45min ago
      Until: Sat 2025-11-29 07:31:59 UTC; 45min ago
   Triggers: ● virtnodedevd.service
     Listen: /run/libvirt/virtnodedevd-sock-ro (Stream)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48573)
     Memory: 0B (peak: 512.0K)
        CPU: 4ms
     CGroup: /system.slice/virtnodedevd-ro.socket

Nov 29 07:31:59 compute-0 systemd[1]: Starting libvirt nodedev daemon read-only socket...
Nov 29 07:31:59 compute-0 systemd[1]: Listening on libvirt nodedev daemon read-only socket.

● virtnodedevd.socket - libvirt nodedev daemon socket
     Loaded: loaded (/usr/lib/systemd/system/virtnodedevd.socket; enabled; preset: enabled)
    Drop-In: /etc/systemd/system/virtnodedevd.socket.d
             └─override.conf
     Active: active (running) since Sat 2025-11-29 07:31:59 UTC; 45min ago
      Until: Sat 2025-11-29 07:31:59 UTC; 45min ago
   Triggers: ● virtnodedevd.service
     Listen: /run/libvirt/virtnodedevd-sock (Stream)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48573)
     Memory: 0B (peak: 512.0K)
        CPU: 2ms
     CGroup: /system.slice/virtnodedevd.socket

Nov 29 07:31:59 compute-0 systemd[1]: Starting libvirt nodedev daemon socket...
Nov 29 07:31:59 compute-0 systemd[1]: Listening on libvirt nodedev daemon socket.

○ virtnwfilterd-admin.socket - libvirt nwfilter daemon admin socket
     Loaded: loaded (/usr/lib/systemd/system/virtnwfilterd-admin.socket; enabled; preset: enabled)
     Active: inactive (dead)
   Triggers: ● virtnwfilterd.service
     Listen: /run/libvirt/virtnwfilterd-admin-sock (Stream)

○ virtnwfilterd-ro.socket - libvirt nwfilter daemon read-only socket
     Loaded: loaded (/usr/lib/systemd/system/virtnwfilterd-ro.socket; enabled; preset: enabled)
     Active: inactive (dead)
   Triggers: ● virtnwfilterd.service
     Listen: /run/libvirt/virtnwfilterd-sock-ro (Stream)

○ virtnwfilterd.socket - libvirt nwfilter daemon socket
     Loaded: loaded (/usr/lib/systemd/system/virtnwfilterd.socket; enabled; preset: enabled)
     Active: inactive (dead)
   Triggers: ● virtnwfilterd.service
     Listen: /run/libvirt/virtnwfilterd-sock (Stream)

● virtproxyd-admin.socket - libvirt proxy daemon admin socket
     Loaded: loaded (/usr/lib/systemd/system/virtproxyd-admin.socket; enabled; preset: enabled)
    Drop-In: /etc/systemd/system/virtproxyd-admin.socket.d
             └─override.conf
     Active: active (listening) since Sat 2025-11-29 07:32:00 UTC; 45min ago
      Until: Sat 2025-11-29 07:32:00 UTC; 45min ago
   Triggers: ● virtproxyd.service
     Listen: /run/libvirt/libvirt-admin-sock (Stream)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48573)
     Memory: 0B (peak: 512.0K)
        CPU: 3ms
     CGroup: /system.slice/virtproxyd-admin.socket

Nov 29 07:32:00 compute-0 systemd[1]: Starting libvirt proxy daemon admin socket...
Nov 29 07:32:00 compute-0 systemd[1]: Listening on libvirt proxy daemon admin socket.

● virtproxyd-ro.socket - libvirt proxy daemon read-only socket
     Loaded: loaded (/usr/lib/systemd/system/virtproxyd-ro.socket; enabled; preset: enabled)
    Drop-In: /etc/systemd/system/virtproxyd-ro.socket.d
             └─override.conf
     Active: active (listening) since Sat 2025-11-29 07:32:00 UTC; 45min ago
      Until: Sat 2025-11-29 07:32:00 UTC; 45min ago
   Triggers: ● virtproxyd.service
     Listen: /run/libvirt/libvirt-sock-ro (Stream)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48573)
     Memory: 0B (peak: 476.0K)
        CPU: 3ms
     CGroup: /system.slice/virtproxyd-ro.socket

Nov 29 07:32:00 compute-0 systemd[1]: Starting libvirt proxy daemon read-only socket...
Nov 29 07:32:00 compute-0 systemd[1]: Listening on libvirt proxy daemon read-only socket.

● virtproxyd-tls.socket - libvirt proxy daemon TLS IP socket
     Loaded: loaded (/usr/lib/systemd/system/virtproxyd-tls.socket; enabled; preset: disabled)
     Active: active (listening) since Sat 2025-11-29 07:30:27 UTC; 46min ago
      Until: Sat 2025-11-29 07:30:27 UTC; 46min ago
   Triggers: ● virtproxyd.service
     Listen: [::]:16514 (Stream)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48573)
     Memory: 8.0K (peak: 272.0K)
        CPU: 2ms
     CGroup: /system.slice/virtproxyd-tls.socket

Nov 29 07:30:27 compute-0 systemd[1]: Listening on libvirt proxy daemon TLS IP socket.

● virtproxyd.socket - libvirt proxy daemon socket
     Loaded: loaded (/usr/lib/systemd/system/virtproxyd.socket; enabled; preset: enabled)
    Drop-In: /etc/systemd/system/virtproxyd.socket.d
             └─override.conf
     Active: active (listening) since Sat 2025-11-29 07:30:27 UTC; 46min ago
      Until: Sat 2025-11-29 07:30:27 UTC; 46min ago
   Triggers: ● virtproxyd.service
     Listen: /run/libvirt/libvirt-sock (Stream)
     CGroup: /system.slice/virtproxyd.socket

Nov 29 07:30:27 compute-0 systemd[1]: Listening on libvirt proxy daemon socket.

● virtqemud-admin.socket - libvirt QEMU daemon admin socket
     Loaded: loaded (/usr/lib/systemd/system/virtqemud-admin.socket; enabled; preset: enabled)
    Drop-In: /etc/systemd/system/virtqemud-admin.socket.d
             └─override.conf
     Active: active (running) since Sat 2025-11-29 07:32:01 UTC; 45min ago
      Until: Sat 2025-11-29 07:32:01 UTC; 45min ago
   Triggers: ● virtqemud.service
     Listen: /run/libvirt/virtqemud-admin-sock (Stream)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48573)
     Memory: 0B (peak: 512.0K)
        CPU: 3ms
     CGroup: /system.slice/virtqemud-admin.socket

Nov 29 07:32:01 compute-0 systemd[1]: Starting libvirt QEMU daemon admin socket...
Nov 29 07:32:01 compute-0 systemd[1]: Listening on libvirt QEMU daemon admin socket.

● virtqemud-ro.socket - libvirt QEMU daemon read-only socket
     Loaded: loaded (/usr/lib/systemd/system/virtqemud-ro.socket; enabled; preset: enabled)
    Drop-In: /etc/systemd/system/virtqemud-ro.socket.d
             └─override.conf
     Active: active (running) since Sat 2025-11-29 07:32:01 UTC; 45min ago
      Until: Sat 2025-11-29 07:32:01 UTC; 45min ago
   Triggers: ● virtqemud.service
     Listen: /run/libvirt/virtqemud-sock-ro (Stream)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48573)
     Memory: 0B (peak: 512.0K)
        CPU: 2ms
     CGroup: /system.slice/virtqemud-ro.socket

Nov 29 07:32:01 compute-0 systemd[1]: Starting libvirt QEMU daemon read-only socket...
Nov 29 07:32:01 compute-0 systemd[1]: Listening on libvirt QEMU daemon read-only socket.

● virtqemud.socket - libvirt QEMU daemon socket
     Loaded: loaded (/usr/lib/systemd/system/virtqemud.socket; enabled; preset: enabled)
    Drop-In: /etc/systemd/system/virtqemud.socket.d
             └─override.conf
     Active: active (running) since Sat 2025-11-29 07:32:01 UTC; 45min ago
      Until: Sat 2025-11-29 07:32:01 UTC; 45min ago
   Triggers: ● virtqemud.service
     Listen: /run/libvirt/virtqemud-sock (Stream)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48573)
     Memory: 0B (peak: 544.0K)
        CPU: 3ms
     CGroup: /system.slice/virtqemud.socket

Nov 29 07:32:01 compute-0 systemd[1]: Starting libvirt QEMU daemon socket...
Nov 29 07:32:01 compute-0 systemd[1]: Listening on libvirt QEMU daemon socket.

● virtsecretd-admin.socket - libvirt secret daemon admin socket
     Loaded: loaded (/usr/lib/systemd/system/virtsecretd-admin.socket; enabled; preset: enabled)
    Drop-In: /etc/systemd/system/virtsecretd-admin.socket.d
             └─override.conf
     Active: active (running) since Sat 2025-11-29 07:32:02 UTC; 45min ago
      Until: Sat 2025-11-29 07:32:02 UTC; 45min ago
   Triggers: ● virtsecretd.service
     Listen: /run/libvirt/virtsecretd-admin-sock (Stream)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48573)
     Memory: 0B (peak: 512.0K)
        CPU: 2ms
     CGroup: /system.slice/virtsecretd-admin.socket

Nov 29 07:32:02 compute-0 systemd[1]: Starting libvirt secret daemon admin socket...
Nov 29 07:32:02 compute-0 systemd[1]: Listening on libvirt secret daemon admin socket.

● virtsecretd-ro.socket - libvirt secret daemon read-only socket
     Loaded: loaded (/usr/lib/systemd/system/virtsecretd-ro.socket; enabled; preset: enabled)
    Drop-In: /etc/systemd/system/virtsecretd-ro.socket.d
             └─override.conf
     Active: active (running) since Sat 2025-11-29 07:32:02 UTC; 45min ago
      Until: Sat 2025-11-29 07:32:02 UTC; 45min ago
   Triggers: ● virtsecretd.service
     Listen: /run/libvirt/virtsecretd-sock-ro (Stream)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48573)
     Memory: 0B (peak: 528.0K)
        CPU: 3ms
     CGroup: /system.slice/virtsecretd-ro.socket

Nov 29 07:32:02 compute-0 systemd[1]: Starting libvirt secret daemon read-only socket...
Nov 29 07:32:02 compute-0 systemd[1]: Listening on libvirt secret daemon read-only socket.

● virtsecretd.socket - libvirt secret daemon socket
     Loaded: loaded (/usr/lib/systemd/system/virtsecretd.socket; enabled; preset: enabled)
    Drop-In: /etc/systemd/system/virtsecretd.socket.d
             └─override.conf
     Active: active (running) since Sat 2025-11-29 07:32:02 UTC; 45min ago
      Until: Sat 2025-11-29 07:32:02 UTC; 45min ago
   Triggers: ● virtsecretd.service
     Listen: /run/libvirt/virtsecretd-sock (Stream)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48573)
     Memory: 0B (peak: 512.0K)
        CPU: 3ms
     CGroup: /system.slice/virtsecretd.socket

Nov 29 07:32:02 compute-0 systemd[1]: Starting libvirt secret daemon socket...
Nov 29 07:32:02 compute-0 systemd[1]: Listening on libvirt secret daemon socket.

○ virtstoraged-admin.socket - libvirt storage daemon admin socket
     Loaded: loaded (/usr/lib/systemd/system/virtstoraged-admin.socket; enabled; preset: enabled)
     Active: inactive (dead)
   Triggers: ● virtstoraged.service
     Listen: /run/libvirt/virtstoraged-admin-sock (Stream)

○ virtstoraged-ro.socket - libvirt storage daemon read-only socket
     Loaded: loaded (/usr/lib/systemd/system/virtstoraged-ro.socket; enabled; preset: enabled)
     Active: inactive (dead)
   Triggers: ● virtstoraged.service
     Listen: /run/libvirt/virtstoraged-sock-ro (Stream)

○ virtstoraged.socket - libvirt storage daemon socket
     Loaded: loaded (/usr/lib/systemd/system/virtstoraged.socket; enabled; preset: enabled)
     Active: inactive (dead)
   Triggers: ● virtstoraged.service
     Listen: /run/libvirt/virtstoraged-sock (Stream)

● swap.swap - /swap
     Loaded: loaded (/etc/fstab; generated)
     Active: active since Sat 2025-11-29 07:05:16 UTC; 1h 12min ago
      Until: Sat 2025-11-29 07:05:16 UTC; 1h 12min ago
       What: /swap
       Docs: man:fstab(5)
             man:systemd-fstab-generator(8)

● basic.target - Basic System
     Loaded: loaded (/usr/lib/systemd/system/basic.target; static)
     Active: active since Sat 2025-11-29 06:29:44 UTC; 1h 47min ago
      Until: Sat 2025-11-29 06:29:44 UTC; 1h 47min ago
       Docs: man:systemd.special(7)

Nov 29 06:29:44 localhost systemd[1]: Reached target Basic System.

○ blockdev@dev-disk-by\x2duuid-b277050f\x2d8ace\x2d464d\x2dabb6\x2d4c46d4c45253.target - Block Device Preparation for /dev/disk/by-uuid/b277050f-8ace-464d-abb6-4c46d4c45253
     Loaded: loaded (/usr/lib/systemd/system/blockdev@.target; static)
     Active: inactive (dead)
       Docs: man:systemd.special(7)

○ blockdev@dev-vda1.target - Block Device Preparation for /dev/vda1
     Loaded: loaded (/usr/lib/systemd/system/blockdev@.target; static)
     Active: inactive (dead)
       Docs: man:systemd.special(7)

● ceph-14ff1f30-5059-58f1-9a23-69871bb275a1.target - Ceph cluster 14ff1f30-5059-58f1-9a23-69871bb275a1
     Loaded: loaded (/etc/systemd/system/ceph-14ff1f30-5059-58f1-9a23-69871bb275a1.target; enabled; preset: disabled)
     Active: active since Sat 2025-11-29 07:11:12 UTC; 1h 6min ago
      Until: Sat 2025-11-29 07:11:12 UTC; 1h 6min ago

Nov 29 07:11:12 compute-0 systemd[1]: Reached target Ceph cluster 14ff1f30-5059-58f1-9a23-69871bb275a1.

● ceph.target - All Ceph clusters and services
     Loaded: loaded (/etc/systemd/system/ceph.target; enabled; preset: disabled)
     Active: active since Sat 2025-11-29 07:11:12 UTC; 1h 6min ago
      Until: Sat 2025-11-29 07:11:12 UTC; 1h 6min ago

Nov 29 07:11:12 compute-0 systemd[1]: Reached target All Ceph clusters and services.

● cloud-config.target - Cloud-config availability
     Loaded: loaded (/usr/lib/systemd/system/cloud-config.target; static)
     Active: active since Sat 2025-11-29 06:29:53 UTC; 1h 47min ago
      Until: Sat 2025-11-29 06:29:53 UTC; 1h 47min ago

Nov 29 06:29:53 np0005539576.novalocal systemd[1]: Reached target Cloud-config availability.

● cloud-init.target - Cloud-init target
     Loaded: loaded (/usr/lib/systemd/system/cloud-init.target; enabled-runtime; preset: disabled)
     Active: active since Sat 2025-11-29 06:29:54 UTC; 1h 47min ago
      Until: Sat 2025-11-29 06:29:54 UTC; 1h 47min ago

Nov 29 06:29:54 np0005539576.novalocal systemd[1]: Reached target Cloud-init target.

○ cryptsetup-pre.target - Local Encrypted Volumes (Pre)
     Loaded: loaded (/usr/lib/systemd/system/cryptsetup-pre.target; static)
     Active: inactive (dead)
       Docs: man:systemd.special(7)

● cryptsetup.target - Local Encrypted Volumes
     Loaded: loaded (/usr/lib/systemd/system/cryptsetup.target; static)
     Active: active since Sat 2025-11-29 06:29:42 UTC; 1h 47min ago
      Until: Sat 2025-11-29 06:29:42 UTC; 1h 47min ago
       Docs: man:systemd.special(7)

● edpm_libvirt.target
     Loaded: loaded (/etc/systemd/system/edpm_libvirt.target; static)
     Active: active since Sat 2025-11-29 07:32:43 UTC; 44min ago
      Until: Sat 2025-11-29 07:32:43 UTC; 44min ago

Nov 29 07:32:43 compute-0 systemd[1]: Reached target edpm_libvirt.target.

○ emergency.target - Emergency Mode
     Loaded: loaded (/usr/lib/systemd/system/emergency.target; static)
     Active: inactive (dead)
       Docs: man:systemd.special(7)

○ first-boot-complete.target - First Boot Complete
     Loaded: loaded (/usr/lib/systemd/system/first-boot-complete.target; static)
     Active: inactive (dead)
  Condition: start condition failed at Sat 2025-11-29 06:29:43 UTC; 1h 47min ago
       Docs: man:systemd.special(7)

Nov 29 06:29:43 localhost systemd[1]: First Boot Complete was skipped because of an unmet condition check (ConditionFirstBoot=yes).

○ getty-pre.target - Preparation for Logins
     Loaded: loaded (/usr/lib/systemd/system/getty-pre.target; static)
     Active: inactive (dead)
       Docs: man:systemd.special(7)
             man:systemd-getty-generator(8)
             http://0pointer.de/blog/projects/serial-console.html

● getty.target - Login Prompts
     Loaded: loaded (/usr/lib/systemd/system/getty.target; static)
     Active: active since Sat 2025-11-29 06:29:53 UTC; 1h 47min ago
      Until: Sat 2025-11-29 06:29:53 UTC; 1h 47min ago
       Docs: man:systemd.special(7)
             man:systemd-getty-generator(8)
             http://0pointer.de/blog/projects/serial-console.html

Nov 29 06:29:53 np0005539576.novalocal systemd[1]: Reached target Login Prompts.

○ graphical.target - Graphical Interface
     Loaded: loaded (/usr/lib/systemd/system/graphical.target; static)
     Active: inactive (dead)
       Docs: man:systemd.special(7)

○ initrd-fs.target - Initrd File Systems
     Loaded: loaded (/usr/lib/systemd/system/initrd-fs.target; static)
     Active: inactive (dead) since Sat 2025-11-29 06:29:42 UTC; 1h 47min ago
       Docs: man:systemd.special(7)

Nov 29 06:29:41 localhost systemd[1]: Reached target Initrd File Systems.

○ initrd-root-device.target - Initrd Root Device
     Loaded: loaded (/usr/lib/systemd/system/initrd-root-device.target; static)
     Active: inactive (dead) since Sat 2025-11-29 06:29:41 UTC; 1h 47min ago
       Docs: man:systemd.special(7)

Nov 29 06:28:08 localhost systemd[1]: Reached target Initrd Root Device.
Nov 29 06:29:41 localhost systemd[1]: Stopped target Initrd Root Device.

○ initrd-root-fs.target - Initrd Root File System
     Loaded: loaded (/usr/lib/systemd/system/initrd-root-fs.target; static)
     Active: inactive (dead) since Sat 2025-11-29 06:29:42 UTC; 1h 47min ago
       Docs: man:systemd.special(7)

Nov 29 06:29:41 localhost systemd[1]: Reached target Initrd Root File System.

○ initrd-switch-root.target - Switch Root
     Loaded: loaded (/usr/lib/systemd/system/initrd-switch-root.target; static)
     Active: inactive (dead) since Sat 2025-11-29 06:29:42 UTC; 1h 47min ago

Nov 29 06:29:41 localhost systemd[1]: Reached target Switch Root.

○ initrd-usr-fs.target - Initrd /usr File System
     Loaded: loaded (/usr/lib/systemd/system/initrd-usr-fs.target; static)
     Active: inactive (dead) since Sat 2025-11-29 06:29:41 UTC; 1h 47min ago
       Docs: man:systemd.special(7)

Nov 29 06:29:41 localhost systemd[1]: Stopped target Initrd /usr File System.
Notice: journal has been rotated since unit was started, output may be incomplete.

○ initrd.target - Initrd Default Target
     Loaded: loaded (/usr/lib/systemd/system/initrd.target; static)
     Active: inactive (dead) since Sat 2025-11-29 06:29:41 UTC; 1h 47min ago
       Docs: man:systemd.special(7)

Nov 29 06:29:41 localhost systemd[1]: Reached target Initrd Default Target.
Nov 29 06:29:41 localhost systemd[1]: Stopped target Initrd Default Target.

● integritysetup.target - Local Integrity Protected Volumes
     Loaded: loaded (/usr/lib/systemd/system/integritysetup.target; static)
     Active: active since Sat 2025-11-29 06:29:42 UTC; 1h 47min ago
      Until: Sat 2025-11-29 06:29:42 UTC; 1h 47min ago
       Docs: man:systemd.special(7)

● local-fs-pre.target - Preparation for Local File Systems
     Loaded: loaded (/usr/lib/systemd/system/local-fs-pre.target; static)
     Active: active since Sat 2025-11-29 06:29:43 UTC; 1h 47min ago
      Until: Sat 2025-11-29 06:29:43 UTC; 1h 47min ago
       Docs: man:systemd.special(7)

Nov 29 06:29:43 localhost systemd[1]: Reached target Preparation for Local File Systems.

● local-fs.target - Local File Systems
     Loaded: loaded (/usr/lib/systemd/system/local-fs.target; static)
     Active: active since Sat 2025-11-29 06:29:43 UTC; 1h 47min ago
      Until: Sat 2025-11-29 06:29:43 UTC; 1h 47min ago
       Docs: man:systemd.special(7)

Nov 29 06:29:43 localhost systemd[1]: Reached target Local File Systems.

● multi-user.target - Multi-User System
     Loaded: loaded (/usr/lib/systemd/system/multi-user.target; indirect; preset: disabled)
     Active: active since Sat 2025-11-29 06:29:53 UTC; 1h 47min ago
      Until: Sat 2025-11-29 06:29:53 UTC; 1h 47min ago
       Docs: man:systemd.special(7)

Nov 29 06:29:53 np0005539576.novalocal systemd[1]: Reached target Multi-User System.

● network-online.target - Network is Online
     Loaded: loaded (/usr/lib/systemd/system/network-online.target; static)
     Active: active since Sat 2025-11-29 06:29:53 UTC; 1h 47min ago
      Until: Sat 2025-11-29 06:29:53 UTC; 1h 47min ago
       Docs: man:systemd.special(7)
             https://systemd.io/NETWORK_ONLINE

Nov 29 06:29:53 np0005539576.novalocal systemd[1]: Reached target Network is Online.

● network-pre.target - Preparation for Network
     Loaded: loaded (/usr/lib/systemd/system/network-pre.target; static)
     Active: active since Sat 2025-11-29 06:29:45 UTC; 1h 47min ago
      Until: Sat 2025-11-29 06:29:45 UTC; 1h 47min ago
       Docs: man:systemd.special(7)
             https://systemd.io/NETWORK_ONLINE

Nov 29 06:29:45 np0005539576.novalocal systemd[1]: Reached target Preparation for Network.

● network.target - Network
     Loaded: loaded (/usr/lib/systemd/system/network.target; static)
     Active: active since Sat 2025-11-29 06:29:45 UTC; 1h 47min ago
      Until: Sat 2025-11-29 06:29:45 UTC; 1h 47min ago
       Docs: man:systemd.special(7)
             https://systemd.io/NETWORK_ONLINE

Nov 29 06:29:45 np0005539576.novalocal systemd[1]: Reached target Network.

● nfs-client.target - NFS client services
     Loaded: loaded (/usr/lib/systemd/system/nfs-client.target; enabled; preset: disabled)
     Active: active since Sat 2025-11-29 06:29:45 UTC; 1h 47min ago
      Until: Sat 2025-11-29 06:29:45 UTC; 1h 47min ago

Nov 29 06:29:45 np0005539576.novalocal systemd[1]: Reached target NFS client services.

○ nss-lookup.target - Host and Network Name Lookups
     Loaded: loaded (/usr/lib/systemd/system/nss-lookup.target; static)
     Active: inactive (dead)
       Docs: man:systemd.special(7)

● nss-user-lookup.target - User and Group Name Lookups
     Loaded: loaded (/usr/lib/systemd/system/nss-user-lookup.target; static)
     Active: active since Sat 2025-11-29 06:29:44 UTC; 1h 47min ago
      Until: Sat 2025-11-29 06:29:44 UTC; 1h 47min ago
       Docs: man:systemd.special(7)

Nov 29 06:29:44 localhost systemd[1]: Reached target User and Group Name Lookups.

● paths.target - Path Units
     Loaded: loaded (/usr/lib/systemd/system/paths.target; static)
     Active: active since Sat 2025-11-29 06:29:42 UTC; 1h 47min ago
      Until: Sat 2025-11-29 06:29:42 UTC; 1h 47min ago
       Docs: man:systemd.special(7)

○ remote-cryptsetup.target - Remote Encrypted Volumes
     Loaded: loaded (/usr/lib/systemd/system/remote-cryptsetup.target; disabled; preset: enabled)
     Active: inactive (dead) since Sat 2025-11-29 06:29:41 UTC; 1h 47min ago
       Docs: man:systemd.special(7)

Nov 29 06:28:08 localhost systemd[1]: Reached target Remote Encrypted Volumes.
Nov 29 06:29:41 localhost systemd[1]: Stopped target Remote Encrypted Volumes.

● remote-fs-pre.target - Preparation for Remote File Systems
     Loaded: loaded (/usr/lib/systemd/system/remote-fs-pre.target; static)
     Active: active since Sat 2025-11-29 06:29:45 UTC; 1h 47min ago
      Until: Sat 2025-11-29 06:29:45 UTC; 1h 47min ago
       Docs: man:systemd.special(7)

Nov 29 06:29:45 np0005539576.novalocal systemd[1]: Reached target Preparation for Remote File Systems.

● remote-fs.target - Remote File Systems
     Loaded: loaded (/usr/lib/systemd/system/remote-fs.target; enabled; preset: enabled)
     Active: active since Sat 2025-11-29 06:29:45 UTC; 1h 47min ago
      Until: Sat 2025-11-29 06:29:45 UTC; 1h 47min ago
       Docs: man:systemd.special(7)

Nov 29 06:29:45 np0005539576.novalocal systemd[1]: Reached target Remote File Systems.

○ remote-veritysetup.target - Remote Verity Protected Volumes
    Unit syslog.target could not be found.
 Loaded: loaded (/usr/lib/systemd/system/remote-veritysetup.target; disabled; preset: disabled)
     Active: inactive (dead)
       Docs: man:systemd.special(7)

○ rescue.target - Rescue Mode
     Loaded: loaded (/usr/lib/systemd/system/rescue.target; static)
     Active: inactive (dead)
       Docs: man:systemd.special(7)

● rpc_pipefs.target
     Loaded: loaded (/usr/lib/systemd/system/rpc_pipefs.target; static)
     Active: active since Sat 2025-11-29 06:29:42 UTC; 1h 47min ago
      Until: Sat 2025-11-29 06:29:42 UTC; 1h 47min ago

● rpcbind.target - RPC Port Mapper
     Loaded: loaded (/usr/lib/systemd/system/rpcbind.target; static)
     Active: active since Sat 2025-11-29 06:29:42 UTC; 1h 47min ago
      Until: Sat 2025-11-29 06:29:42 UTC; 1h 47min ago
       Docs: man:systemd.special(7)

○ shutdown.target - System Shutdown
     Loaded: loaded (/usr/lib/systemd/system/shutdown.target; static)
     Active: inactive (dead)
       Docs: man:systemd.special(7)

● slices.target - Slice Units
     Loaded: loaded (/usr/lib/systemd/system/slices.target; static)
     Active: active since Sat 2025-11-29 06:29:42 UTC; 1h 47min ago
      Until: Sat 2025-11-29 06:29:42 UTC; 1h 47min ago
       Docs: man:systemd.special(7)

● sockets.target - Socket Units
     Loaded: loaded (/usr/lib/systemd/system/sockets.target; static)
     Active: active since Sat 2025-11-29 06:29:44 UTC; 1h 47min ago
      Until: Sat 2025-11-29 06:29:44 UTC; 1h 47min ago
       Docs: man:systemd.special(7)

Nov 29 06:29:44 localhost systemd[1]: Reached target Socket Units.

● sshd-keygen.target
     Loaded: loaded (/usr/lib/systemd/system/sshd-keygen.target; static)
     Active: active since Sat 2025-11-29 07:30:06 UTC; 47min ago
      Until: Sat 2025-11-29 07:30:06 UTC; 47min ago

Nov 29 07:30:06 compute-0 systemd[1]: Reached target sshd-keygen.target.

● swap.target - Swaps
     Loaded: loaded (/usr/lib/systemd/system/swap.target; static)
     Active: active since Sat 2025-11-29 06:29:42 UTC; 1h 47min ago
      Until: Sat 2025-11-29 06:29:42 UTC; 1h 47min ago
       Docs: man:systemd.special(7)

● sysinit.target - System Initialization
     Loaded: loaded (/usr/lib/systemd/system/sysinit.target; static)
     Active: active since Sat 2025-11-29 06:29:44 UTC; 1h 47min ago
      Until: Sat 2025-11-29 06:29:44 UTC; 1h 47min ago
       Docs: man:systemd.special(7)

Nov 29 06:29:44 localhost systemd[1]: Reached target System Initialization.

● time-set.target - System Time Set
     Loaded: loaded (/usr/lib/systemd/system/time-set.target; static)
     Active: active since Sat 2025-11-29 07:11:13 UTC; 1h 6min ago
      Until: Sat 2025-11-29 07:11:13 UTC; 1h 6min ago
       Docs: man:systemd.special(7)

Nov 29 07:11:13 compute-0 systemd[1]: Reached target System Time Set.

● time-sync.target - System Time Synchronized
     Loaded: loaded (/usr/lib/systemd/system/time-sync.target; static)
     Active: active since Sat 2025-11-29 07:11:13 UTC; 1h 6min ago
      Until: Sat 2025-11-29 07:11:13 UTC; 1h 6min ago
       Docs: man:systemd.special(7)

Nov 29 07:11:13 compute-0 systemd[1]: Reached target System Time Synchronized.

● timers.target - Timer Units
     Loaded: loaded (/usr/lib/systemd/system/timers.target; static)
     Active: active since Sat 2025-11-29 06:29:44 UTC; 1h 47min ago
      Until: Sat 2025-11-29 06:29:44 UTC; 1h 47min ago
       Docs: man:systemd.special(7)

Nov 29 06:29:44 localhost systemd[1]: Reached target Timer Units.

○ umount.target - Unmount All Filesystems
     Loaded: loaded (/usr/lib/systemd/system/umount.target; static)
     Active: inactive (dead)
       Docs: man:systemd.special(7)

○ veritysetup-pre.target - Local Verity Protected Volumes (Pre)
     Loaded: loaded (/usr/lib/systemd/system/veritysetup-pre.target; static)
     Active: inactive (dead)
       Docs: man:systemd.special(7)

● veritysetup.target - Local Verity Protected Volumes
     Loaded: loaded (/usr/lib/systemd/system/veritysetup.target; static)
     Active: active since Sat 2025-11-29 06:29:42 UTC; 1h 47min ago
      Until: Sat 2025-11-29 06:29:42 UTC; 1h 47min ago
       Docs: man:systemd.special(7)

○ virt-guest-shutdown.target - Libvirt guests shutdown
     Loaded: loaded (/etc/systemd/system/virt-guest-shutdown.target; static)
     Active: inactive (dead)
       Docs: https://libvirt.org

● 23d05e03be4cb9084b6afbc3edf0d56047ca0f3f1aaf62172739e6d6ce3f7fa8-719f575451f10305.timer - /usr/bin/podman healthcheck run 23d05e03be4cb9084b6afbc3edf0d56047ca0f3f1aaf62172739e6d6ce3f7fa8
     Loaded: loaded (/run/systemd/transient/23d05e03be4cb9084b6afbc3edf0d56047ca0f3f1aaf62172739e6d6ce3f7fa8-719f575451f10305.timer; transient)
  Transient: yes
     Active: active (waiting) since Sat 2025-11-29 07:24:13 UTC; 53min ago
      Until: Sat 2025-11-29 07:24:13 UTC; 53min ago
    Trigger: Sat 2025-11-29 08:17:25 UTC; 7s left
   Triggers: ● 23d05e03be4cb9084b6afbc3edf0d56047ca0f3f1aaf62172739e6d6ce3f7fa8-719f575451f10305.service

Nov 29 07:24:13 compute-0 systemd[1]: Started /usr/bin/podman healthcheck run 23d05e03be4cb9084b6afbc3edf0d56047ca0f3f1aaf62172739e6d6ce3f7fa8.

● 53b3e3edbc13651958250c6312d45e987c0dee5fbb3effb8392660dfc1eaa82f-6af3e4fb7442b0af.timer - /usr/bin/podman healthcheck run 53b3e3edbc13651958250c6312d45e987c0dee5fbb3effb8392660dfc1eaa82f
     Loaded: loaded (/run/systemd/transient/53b3e3edbc13651958250c6312d45e987c0dee5fbb3effb8392660dfc1eaa82f-6af3e4fb7442b0af.timer; transient)
  Transient: yes
     Active: active (waiting) since Sat 2025-11-29 07:35:57 UTC; 41min ago
      Until: Sat 2025-11-29 07:35:57 UTC; 41min ago
    Trigger: Sat 2025-11-29 08:17:25 UTC; 7s left
   Triggers: ● 53b3e3edbc13651958250c6312d45e987c0dee5fbb3effb8392660dfc1eaa82f-6af3e4fb7442b0af.service

Nov 29 07:35:57 compute-0 systemd[1]: Started /usr/bin/podman healthcheck run 53b3e3edbc13651958250c6312d45e987c0dee5fbb3effb8392660dfc1eaa82f.

● 8d4ccc3041d68cb3d47809db0bb1a7e168a735eaa6605edbf2567447905aa50e-5e7e647fed5e19a2.timer - /usr/bin/podman healthcheck run 8d4ccc3041d68cb3d47809db0bb1a7e168a735eaa6605edbf2567447905aa50e
     Loaded: loaded (/run/systemd/transient/8d4ccc3041d68cb3d47809db0bb1a7e168a735eaa6605edbf2567447905aa50e-5e7e647fed5e19a2.timer; transient)
  Transient: yes
     Active: active (waiting) since Sat 2025-11-29 07:25:57 UTC; 51min ago
      Until: Sat 2025-11-29 07:25:57 UTC; 51min ago
    Trigger: Sat 2025-11-29 08:17:25 UTC; 7s left
   Triggers: ● 8d4ccc3041d68cb3d47809db0bb1a7e168a735eaa6605edbf2567447905aa50e-5e7e647fed5e19a2.service

Nov 29 07:25:57 compute-0 systemd[1]: Started /usr/bin/podman healthcheck run 8d4ccc3041d68cb3d47809db0bb1a7e168a735eaa6605edbf2567447905aa50e.

● dnf-makecache.timer - dnf makecache --timer
     Loaded: loaded (/usr/lib/systemd/system/dnf-makecache.timer; enabled; preset: enabled)
     Active: active (waiting) since Sat 2025-11-29 06:29:44 UTC; 1h 47min ago
      Until: Sat 2025-11-29 06:29:44 UTC; 1h 47min ago
    Trigger: Sat 2025-11-29 08:42:16 UTC; 24min left
   Triggers: ● dnf-makecache.service

Nov 29 06:29:44 localhost systemd[1]: Started dnf makecache --timer.

● logrotate.timer - Daily rotation of log files
     Loaded: loaded (/usr/lib/systemd/system/logrotate.timer; enabled; preset: enabled)
     Active: active (waiting) since Sat 2025-11-29 06:29:44 UTC; 1h 47min ago
      Until: Sat 2025-11-29 06:29:44 UTC; 1h 47min ago
    Trigger: Sun 2025-11-30 00:00:00 UTC; 15h left
   Triggers: ● logrotate.service
       Docs: man:logrotate(8)
             man:logrotate.conf(5)

Nov 29 06:29:44 localhost systemd[1]: Started Daily rotation of log files.

○ raid-check.timer - Weekly RAID setup health check
     Loaded: loaded (/usr/lib/systemd/system/raid-check.timer; enabled; preset: enabled)
     Active: inactive (dead)
    Trigger: n/a
   Triggers: ● raid-check.service

○ sysstat-collect.timer - Run system activity accounting tool every 10 minutes
     Loaded: loaded (/usr/lib/systemd/system/sysstat-collect.timer; enabled; preset: disabled)
     Active: inactive (dead)
    Trigger: n/a
   Triggers: ● sysstat-collect.service

○ sysstat-summary.timer - Generate summary of yesterday's process accounting
     Loaded: loaded (/usr/lib/systemd/system/sysstat-summary.timer; enabled; preset: disabled)
     Active: inactive (dead)
    Trigger: n/a
   Triggers: ● sysstat-summary.service

● systemd-tmpfiles-clean.timer - Daily Cleanup of Temporary Directories
     Loaded: loaded (/usr/lib/systemd/system/systemd-tmpfiles-clean.timer; static)
     Active: active (waiting) since Sat 2025-11-29 06:29:44 UTC; 1h 47min ago
      Until: Sat 2025-11-29 06:29:44 UTC; 1h 47min ago
    Trigger: Sun 2025-11-30 06:43:23 UTC; 22h left
   Triggers: ● systemd-tmpfiles-clean.service
       Docs: man:tmpfiles.d(5)
             man:systemd-tmpfiles(8)

Nov 29 06:29:44 localhost systemd[1]: Started Daily Cleanup of Temporary Directories.

● unbound-anchor.timer - daily update of the root trust anchor for DNSSEC
     Loaded: loaded (/usr/lib/systemd/system/unbound-anchor.timer; enabled; preset: enabled)
     Active: active (waiting) since Sat 2025-11-29 07:05:59 UTC; 1h 11min ago
      Until: Sat 2025-11-29 07:05:59 UTC; 1h 11min ago
    Trigger: Sun 2025-11-30 00:00:00 UTC; 15h left
   Triggers: ● unbound-anchor.service
       Docs: man:unbound-anchor(8)

Nov 29 07:05:59 compute-0 systemd[1]: Started daily update of the root trust anchor for DNSSEC.
