● compute-0
    State: running
    Units: 476 loaded (incl. loaded aliases)
     Jobs: 0 queued
   Failed: 0 units
    Since: Tue 2025-12-02 10:03:31 UTC; 1h 35min ago
  systemd: 252-59.el9
   CGroup: /
           ├─307236 turbostat --debug sleep 10
           ├─307251 sleep 10
           ├─init.scope
           │ └─1 /usr/lib/systemd/systemd --switched-root --system --deserialize 31
           ├─machine.slice
           │ ├─libpod-130400eaf961ceaaa203e2cc0a5af0fe03396f0667ec510c1291b1ee03bff193.scope
           │ │ └─container
           │ │   ├─236124 dumb-init --single-child -- kolla_start
           │ │   └─236127 /usr/sbin/multipathd -d
           │ ├─libpod-301660b5961629ac564857138dcba46d0947a2a1c7d3debbb9f5976c1df04193.scope
           │ │ └─container
           │ │   ├─163736 dumb-init --single-child -- kolla_start
           │ │   ├─163757 "neutron-ovn-metadata-agent (/usr/bin/python3 /usr/bin/neutron-ovn-metadata-agent)"
           │ │   ├─163893 "neutron-ovn-metadata-agent (/usr/bin/python3 /usr/bin/neutron-ovn-metadata-agent)"
           │ │   ├─164036 /usr/bin/python3 /bin/privsep-helper --config-file /etc/neutron/neutron.conf --config-dir /etc/neutron.conf.d --privsep_context neutron.privileged.namespace_cmd --privsep_sock_path /tmp/tmp4q14yauh/privsep.sock
           │ │   ├─262398 /usr/bin/python3 /bin/privsep-helper --config-file /etc/neutron/neutron.conf --config-dir /etc/neutron.conf.d --privsep_context neutron.privileged.default --privsep_sock_path /tmp/tmpti_0kgk9/privsep.sock
           │ │   └─262581 /usr/bin/python3 /bin/privsep-helper --config-file /etc/neutron/neutron.conf --config-dir /etc/neutron.conf.d --privsep_context neutron.privileged.link_cmd --privsep_sock_path /tmp/tmpokt26739/privsep.sock
           │ ├─libpod-5c31229430d6adbfb6e358463fdefc47061e3db1d274ed40ca82e25890f29998.scope
           │ │ └─container
           │ │   ├─153855 dumb-init --single-child -- kolla_start
           │ │   └─153864 /usr/bin/ovn-controller --pidfile unix:/run/openvswitch/db.sock -p /etc/pki/tls/private/ovndb.key -c /etc/pki/tls/certs/ovndb.crt -C /etc/pki/tls/certs/ovndbca.crt
           │ └─libpod-5fbc74cebe070d8cc77fb5ba95cab60fe5a6a788996a0004958af316ccf471ad.scope
           │   └─container
           │     ├─254902 dumb-init --single-child -- kolla_start
           │     ├─254904 /usr/bin/python3 /usr/bin/nova-compute
           │     ├─262068 /usr/bin/python3 /bin/privsep-helper --config-file /etc/nova/nova.conf --config-file /etc/nova/nova-compute.conf --config-dir /etc/nova/nova.conf.d --privsep_context vif_plug_ovs.privsep.vif_plug --privsep_sock_path /tmp/tmp1no9jew6/privsep.sock
           │     ├─262759 /usr/bin/python3 /bin/privsep-helper --config-file /etc/nova/nova.conf --config-file /etc/nova/nova-compute.conf --config-dir /etc/nova/nova.conf.d --privsep_context os_brick.privileged.default --privsep_sock_path /tmp/tmpoqbx9wo3/privsep.sock
           │     └─262878 /usr/bin/python3 /bin/privsep-helper --config-file /etc/nova/nova.conf --config-file /etc/nova/nova-compute.conf --config-dir /etc/nova/nova.conf.d --privsep_context nova.privsep.sys_admin_pctxt --privsep_sock_path /tmp/tmp8u10dppg/privsep.sock
           ├─system.slice
           │ ├─NetworkManager.service
           │ │ └─48987 /usr/sbin/NetworkManager --no-daemon
           │ ├─auditd.service
           │ │ ├─702 /sbin/auditd
           │ │ └─704 /usr/sbin/sedispatch
           │ ├─chronyd.service
           │ │ └─58549 /usr/sbin/chronyd -F 2
           │ ├─crond.service
           │ │ ├─  1009 /usr/sbin/crond -n
           │ │ └─162071 /usr/sbin/anacron -s
           │ ├─dbus-broker.service
           │ │ ├─756 /usr/bin/dbus-broker-launch --scope system --audit
           │ │ └─773 dbus-broker --log 4 --controller 9 --machine-id 1f988c78c563e12389ab342aced42dbb --max-bytes 536870912 --max-fds 4096 --max-matches 131072 --audit
           │ ├─edpm_multipathd.service
           │ │ └─236122 /usr/bin/conmon --api-version 1 -c 130400eaf961ceaaa203e2cc0a5af0fe03396f0667ec510c1291b1ee03bff193 -u 130400eaf961ceaaa203e2cc0a5af0fe03396f0667ec510c1291b1ee03bff193 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/130400eaf961ceaaa203e2cc0a5af0fe03396f0667ec510c1291b1ee03bff193/userdata -p /run/containers/storage/overlay-containers/130400eaf961ceaaa203e2cc0a5af0fe03396f0667ec510c1291b1ee03bff193/userdata/pidfile -n multipathd --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/130400eaf961ceaaa203e2cc0a5af0fe03396f0667ec510c1291b1ee03bff193 --full-attach -s -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/130400eaf961ceaaa203e2cc0a5af0fe03396f0667ec510c1291b1ee03bff193/userdata/oci-log --conmon-pidfile /run/multipathd.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg 130400eaf961ceaaa203e2cc0a5af0fe03396f0667ec510c1291b1ee03bff193
           │ ├─edpm_nova_compute.service
           │ │ └─254900 /usr/bin/conmon --api-version 1 -c 5fbc74cebe070d8cc77fb5ba95cab60fe5a6a788996a0004958af316ccf471ad -u 5fbc74cebe070d8cc77fb5ba95cab60fe5a6a788996a0004958af316ccf471ad -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/5fbc74cebe070d8cc77fb5ba95cab60fe5a6a788996a0004958af316ccf471ad/userdata -p /run/containers/storage/overlay-containers/5fbc74cebe070d8cc77fb5ba95cab60fe5a6a788996a0004958af316ccf471ad/userdata/pidfile -n nova_compute --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/5fbc74cebe070d8cc77fb5ba95cab60fe5a6a788996a0004958af316ccf471ad --full-attach -s -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/5fbc74cebe070d8cc77fb5ba95cab60fe5a6a788996a0004958af316ccf471ad/userdata/oci-log --conmon-pidfile /run/nova_compute.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg 5fbc74cebe070d8cc77fb5ba95cab60fe5a6a788996a0004958af316ccf471ad
           │ ├─edpm_ovn_controller.service
           │ │ └─153849 /usr/bin/conmon --api-version 1 -c 5c31229430d6adbfb6e358463fdefc47061e3db1d274ed40ca82e25890f29998 -u 5c31229430d6adbfb6e358463fdefc47061e3db1d274ed40ca82e25890f29998 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/5c31229430d6adbfb6e358463fdefc47061e3db1d274ed40ca82e25890f29998/userdata -p /run/containers/storage/overlay-containers/5c31229430d6adbfb6e358463fdefc47061e3db1d274ed40ca82e25890f29998/userdata/pidfile -n ovn_controller --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/5c31229430d6adbfb6e358463fdefc47061e3db1d274ed40ca82e25890f29998 --full-attach -s -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/5c31229430d6adbfb6e358463fdefc47061e3db1d274ed40ca82e25890f29998/userdata/oci-log --conmon-pidfile /run/ovn_controller.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg 5c31229430d6adbfb6e358463fdefc47061e3db1d274ed40ca82e25890f29998
           │ ├─edpm_ovn_metadata_agent.service
           │ │ └─163733 /usr/bin/conmon --api-version 1 -c 301660b5961629ac564857138dcba46d0947a2a1c7d3debbb9f5976c1df04193 -u 301660b5961629ac564857138dcba46d0947a2a1c7d3debbb9f5976c1df04193 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/301660b5961629ac564857138dcba46d0947a2a1c7d3debbb9f5976c1df04193/userdata -p /run/containers/storage/overlay-containers/301660b5961629ac564857138dcba46d0947a2a1c7d3debbb9f5976c1df04193/userdata/pidfile -n ovn_metadata_agent --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/301660b5961629ac564857138dcba46d0947a2a1c7d3debbb9f5976c1df04193 --full-attach -s -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/301660b5961629ac564857138dcba46d0947a2a1c7d3debbb9f5976c1df04193/userdata/oci-log --conmon-pidfile /run/ovn_metadata_agent.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg 301660b5961629ac564857138dcba46d0947a2a1c7d3debbb9f5976c1df04193
           │ ├─gssproxy.service
           │ │ └─870 /usr/sbin/gssproxy -D
           │ ├─irqbalance.service
           │ │ └─782 /usr/sbin/irqbalance
           │ ├─iscsid.service
           │ │ └─238852 /usr/sbin/iscsid -f
           │ ├─ovs-vswitchd.service
           │ │ └─47291 ovs-vswitchd unix:/var/run/openvswitch/db.sock -vconsole:emer -vsyslog:err -vfile:info --mlockall --user openvswitch:hugetlbfs --no-chdir --log-file=/var/log/openvswitch/ovs-vswitchd.log --pidfile=/var/run/openvswitch/ovs-vswitchd.pid --detach
           │ ├─ovsdb-server.service
           │ │ └─47210 ovsdb-server /etc/openvswitch/conf.db -vconsole:emer -vsyslog:err -vfile:info --remote=punix:/var/run/openvswitch/db.sock --private-key=db:Open_vSwitch,SSL,private_key --certificate=db:Open_vSwitch,SSL,certificate --bootstrap-ca-cert=db:Open_vSwitch,SSL,ca_cert --user openvswitch:hugetlbfs --no-chdir --log-file=/var/log/openvswitch/ovsdb-server.log --pidfile=/var/run/openvswitch/ovsdb-server.pid --detach
           │ ├─polkit.service
           │ │ └─43476 /usr/lib/polkit-1/polkitd --no-debug
           │ ├─rpcbind.service
           │ │ └─700 /usr/bin/rpcbind -w -f
           │ ├─rsyslog.service
           │ │ └─1005 /usr/sbin/rsyslogd -n
           │ ├─sshd.service
           │ │ └─190649 "sshd: /usr/sbin/sshd -D [listener] 0 of 10-100 startups"
           │ ├─system-ceph\x2d95bc4eaa\x2d1a14\x2d59bf\x2dacf2\x2d4b3da055547d.slice
           │ │ ├─ceph-95bc4eaa-1a14-59bf-acf2-4b3da055547d@crash.compute-0.service
           │ │ │ ├─libpod-payload-3db2848945635a23ef05a5bb5103b9327936169af4da62c76a6e841d53c77f4b
           │ │ │ │ ├─83162 /run/podman-init -- /usr/bin/ceph-crash -n client.crash.compute-0
           │ │ │ │ └─83164 /usr/bin/python3 -s /usr/bin/ceph-crash -n client.crash.compute-0
           │ │ │ └─runtime
           │ │ │   └─83160 /usr/bin/conmon --api-version 1 -c 3db2848945635a23ef05a5bb5103b9327936169af4da62c76a6e841d53c77f4b -u 3db2848945635a23ef05a5bb5103b9327936169af4da62c76a6e841d53c77f4b -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/3db2848945635a23ef05a5bb5103b9327936169af4da62c76a6e841d53c77f4b/userdata -p /run/containers/storage/overlay-containers/3db2848945635a23ef05a5bb5103b9327936169af4da62c76a6e841d53c77f4b/userdata/pidfile -n ceph-95bc4eaa-1a14-59bf-acf2-4b3da055547d-crash-compute-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/3db2848945635a23ef05a5bb5103b9327936169af4da62c76a6e841d53c77f4b --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/3db2848945635a23ef05a5bb5103b9327936169af4da62c76a6e841d53c77f4b/userdata/oci-log --conmon-pidfile /run/ceph-95bc4eaa-1a14-59bf-acf2-4b3da055547d@crash.compute-0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 3db2848945635a23ef05a5bb5103b9327936169af4da62c76a6e841d53c77f4b
           │ │ ├─ceph-95bc4eaa-1a14-59bf-acf2-4b3da055547d@mds.cephfs.compute-0.bydekr.service
           │ │ │ ├─libpod-payload-f64fbb716bbb10d4999b599073f025f197d0e4bf9a2dfe0b7e86e89fb078831c
           │ │ │ │ ├─101612 /run/podman-init -- /usr/bin/ceph-mds -n mds.cephfs.compute-0.bydekr -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
           │ │ │ │ └─101614 /usr/bin/ceph-mds -n mds.cephfs.compute-0.bydekr -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
           │ │ │ └─runtime
           │ │ │   └─101610 /usr/bin/conmon --api-version 1 -c f64fbb716bbb10d4999b599073f025f197d0e4bf9a2dfe0b7e86e89fb078831c -u f64fbb716bbb10d4999b599073f025f197d0e4bf9a2dfe0b7e86e89fb078831c -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/f64fbb716bbb10d4999b599073f025f197d0e4bf9a2dfe0b7e86e89fb078831c/userdata -p /run/containers/storage/overlay-containers/f64fbb716bbb10d4999b599073f025f197d0e4bf9a2dfe0b7e86e89fb078831c/userdata/pidfile -n ceph-95bc4eaa-1a14-59bf-acf2-4b3da055547d-mds-cephfs-compute-0-bydekr --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/f64fbb716bbb10d4999b599073f025f197d0e4bf9a2dfe0b7e86e89fb078831c --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/f64fbb716bbb10d4999b599073f025f197d0e4bf9a2dfe0b7e86e89fb078831c/userdata/oci-log --conmon-pidfile /run/ceph-95bc4eaa-1a14-59bf-acf2-4b3da055547d@mds.cephfs.compute-0.bydekr.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg f64fbb716bbb10d4999b599073f025f197d0e4bf9a2dfe0b7e86e89fb078831c
           │ │ ├─ceph-95bc4eaa-1a14-59bf-acf2-4b3da055547d@mgr.compute-0.ntxcvs.service
           │ │ │ ├─libpod-payload-e03605b236b56660f5174f63911714aeac7aabe1e5dbe09eb60fe0086b6a7093
           │ │ │ │ ├─75370 /run/podman-init -- /usr/bin/ceph-mgr -n mgr.compute-0.ntxcvs -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
           │ │ │ │ └─75372 /usr/bin/ceph-mgr -n mgr.compute-0.ntxcvs -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
           │ │ │ └─runtime
           │ │ │   └─75368 /usr/bin/conmon --api-version 1 -c e03605b236b56660f5174f63911714aeac7aabe1e5dbe09eb60fe0086b6a7093 -u e03605b236b56660f5174f63911714aeac7aabe1e5dbe09eb60fe0086b6a7093 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/e03605b236b56660f5174f63911714aeac7aabe1e5dbe09eb60fe0086b6a7093/userdata -p /run/containers/storage/overlay-containers/e03605b236b56660f5174f63911714aeac7aabe1e5dbe09eb60fe0086b6a7093/userdata/pidfile -n ceph-95bc4eaa-1a14-59bf-acf2-4b3da055547d-mgr-compute-0-ntxcvs --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/e03605b236b56660f5174f63911714aeac7aabe1e5dbe09eb60fe0086b6a7093 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/e03605b236b56660f5174f63911714aeac7aabe1e5dbe09eb60fe0086b6a7093/userdata/oci-log --conmon-pidfile /run/ceph-95bc4eaa-1a14-59bf-acf2-4b3da055547d@mgr.compute-0.ntxcvs.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg e03605b236b56660f5174f63911714aeac7aabe1e5dbe09eb60fe0086b6a7093
           │ │ ├─ceph-95bc4eaa-1a14-59bf-acf2-4b3da055547d@mon.compute-0.service
           │ │ │ ├─libpod-payload-cfead6f8cdae3fb33ff10b470724c55f63ec4997c8e0a95beaf5732ac7b8da1b
           │ │ │ │ ├─75079 /run/podman-init -- /usr/bin/ceph-mon -n mon.compute-0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false
           │ │ │ │ └─75081 /usr/bin/ceph-mon -n mon.compute-0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false
           │ │ │ └─runtime
           │ │ │   └─75077 /usr/bin/conmon --api-version 1 -c cfead6f8cdae3fb33ff10b470724c55f63ec4997c8e0a95beaf5732ac7b8da1b -u cfead6f8cdae3fb33ff10b470724c55f63ec4997c8e0a95beaf5732ac7b8da1b -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/cfead6f8cdae3fb33ff10b470724c55f63ec4997c8e0a95beaf5732ac7b8da1b/userdata -p /run/containers/storage/overlay-containers/cfead6f8cdae3fb33ff10b470724c55f63ec4997c8e0a95beaf5732ac7b8da1b/userdata/pidfile -n ceph-95bc4eaa-1a14-59bf-acf2-4b3da055547d-mon-compute-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/cfead6f8cdae3fb33ff10b470724c55f63ec4997c8e0a95beaf5732ac7b8da1b --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/cfead6f8cdae3fb33ff10b470724c55f63ec4997c8e0a95beaf5732ac7b8da1b/userdata/oci-log --conmon-pidfile /run/ceph-95bc4eaa-1a14-59bf-acf2-4b3da055547d@mon.compute-0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg cfead6f8cdae3fb33ff10b470724c55f63ec4997c8e0a95beaf5732ac7b8da1b
           │ │ ├─ceph-95bc4eaa-1a14-59bf-acf2-4b3da055547d@osd.0.service
           │ │ │ ├─libpod-payload-13b62c875bf29e1fd647b09f26aeb88392fd8e7fd6f27f5d56d59c8433a0b222
           │ │ │ │ ├─88959 /run/podman-init -- /usr/bin/ceph-osd -n osd.0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
           │ │ │ │ └─88961 /usr/bin/ceph-osd -n osd.0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
           │ │ │ └─runtime
           │ │ │   └─88957 /usr/bin/conmon --api-version 1 -c 13b62c875bf29e1fd647b09f26aeb88392fd8e7fd6f27f5d56d59c8433a0b222 -u 13b62c875bf29e1fd647b09f26aeb88392fd8e7fd6f27f5d56d59c8433a0b222 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/13b62c875bf29e1fd647b09f26aeb88392fd8e7fd6f27f5d56d59c8433a0b222/userdata -p /run/containers/storage/overlay-containers/13b62c875bf29e1fd647b09f26aeb88392fd8e7fd6f27f5d56d59c8433a0b222/userdata/pidfile -n ceph-95bc4eaa-1a14-59bf-acf2-4b3da055547d-osd-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/13b62c875bf29e1fd647b09f26aeb88392fd8e7fd6f27f5d56d59c8433a0b222 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/13b62c875bf29e1fd647b09f26aeb88392fd8e7fd6f27f5d56d59c8433a0b222/userdata/oci-log --conmon-pidfile /run/ceph-95bc4eaa-1a14-59bf-acf2-4b3da055547d@osd.0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 13b62c875bf29e1fd647b09f26aeb88392fd8e7fd6f27f5d56d59c8433a0b222
           │ │ ├─ceph-95bc4eaa-1a14-59bf-acf2-4b3da055547d@osd.1.service
           │ │ │ ├─libpod-payload-2ab4bdfb1336673e273752f56f87fc01cb41214970130231b1c970afc70361db
           │ │ │ │ ├─89964 /run/podman-init -- /usr/bin/ceph-osd -n osd.1 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
           │ │ │ │ └─89966 /usr/bin/ceph-osd -n osd.1 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
           │ │ │ └─runtime
           │ │ │   └─89962 /usr/bin/conmon --api-version 1 -c 2ab4bdfb1336673e273752f56f87fc01cb41214970130231b1c970afc70361db -u 2ab4bdfb1336673e273752f56f87fc01cb41214970130231b1c970afc70361db -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/2ab4bdfb1336673e273752f56f87fc01cb41214970130231b1c970afc70361db/userdata -p /run/containers/storage/overlay-containers/2ab4bdfb1336673e273752f56f87fc01cb41214970130231b1c970afc70361db/userdata/pidfile -n ceph-95bc4eaa-1a14-59bf-acf2-4b3da055547d-osd-1 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/2ab4bdfb1336673e273752f56f87fc01cb41214970130231b1c970afc70361db --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/2ab4bdfb1336673e273752f56f87fc01cb41214970130231b1c970afc70361db/userdata/oci-log --conmon-pidfile /run/ceph-95bc4eaa-1a14-59bf-acf2-4b3da055547d@osd.1.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 2ab4bdfb1336673e273752f56f87fc01cb41214970130231b1c970afc70361db
           │ │ ├─ceph-95bc4eaa-1a14-59bf-acf2-4b3da055547d@osd.2.service
           │ │ │ ├─libpod-payload-227e7141028a66a33b9f9b116360c43ad8422375cf0151e5d0bdde0e35411ed5
           │ │ │ │ ├─91052 /run/podman-init -- /usr/bin/ceph-osd -n osd.2 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
           │ │ │ │ └─91055 /usr/bin/ceph-osd -n osd.2 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
           │ │ │ └─runtime
           │ │ │   └─91050 /usr/bin/conmon --api-version 1 -c 227e7141028a66a33b9f9b116360c43ad8422375cf0151e5d0bdde0e35411ed5 -u 227e7141028a66a33b9f9b116360c43ad8422375cf0151e5d0bdde0e35411ed5 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/227e7141028a66a33b9f9b116360c43ad8422375cf0151e5d0bdde0e35411ed5/userdata -p /run/containers/storage/overlay-containers/227e7141028a66a33b9f9b116360c43ad8422375cf0151e5d0bdde0e35411ed5/userdata/pidfile -n ceph-95bc4eaa-1a14-59bf-acf2-4b3da055547d-osd-2 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/227e7141028a66a33b9f9b116360c43ad8422375cf0151e5d0bdde0e35411ed5 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/227e7141028a66a33b9f9b116360c43ad8422375cf0151e5d0bdde0e35411ed5/userdata/oci-log --conmon-pidfile /run/ceph-95bc4eaa-1a14-59bf-acf2-4b3da055547d@osd.2.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 227e7141028a66a33b9f9b116360c43ad8422375cf0151e5d0bdde0e35411ed5
           │ │ └─ceph-95bc4eaa-1a14-59bf-acf2-4b3da055547d@rgw.rgw.compute-0.ssuoka.service
           │ │   ├─libpod-payload-ab2d5290b253be90bd5477bcee4a9bca9d7800a28bd9dd88c5ef94245f38483b
           │ │   │ ├─101149 /run/podman-init -- /usr/bin/radosgw -n client.rgw.rgw.compute-0.ssuoka -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
           │ │   │ └─101151 /usr/bin/radosgw -n client.rgw.rgw.compute-0.ssuoka -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
           │ │   └─runtime
           │ │     └─101147 /usr/bin/conmon --api-version 1 -c ab2d5290b253be90bd5477bcee4a9bca9d7800a28bd9dd88c5ef94245f38483b -u ab2d5290b253be90bd5477bcee4a9bca9d7800a28bd9dd88c5ef94245f38483b -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/ab2d5290b253be90bd5477bcee4a9bca9d7800a28bd9dd88c5ef94245f38483b/userdata -p /run/containers/storage/overlay-containers/ab2d5290b253be90bd5477bcee4a9bca9d7800a28bd9dd88c5ef94245f38483b/userdata/pidfile -n ceph-95bc4eaa-1a14-59bf-acf2-4b3da055547d-rgw-rgw-compute-0-ssuoka --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/ab2d5290b253be90bd5477bcee4a9bca9d7800a28bd9dd88c5ef94245f38483b --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/ab2d5290b253be90bd5477bcee4a9bca9d7800a28bd9dd88c5ef94245f38483b/userdata/oci-log --conmon-pidfile /run/ceph-95bc4eaa-1a14-59bf-acf2-4b3da055547d@rgw.rgw.compute-0.ssuoka.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg ab2d5290b253be90bd5477bcee4a9bca9d7800a28bd9dd88c5ef94245f38483b
           │ ├─system-getty.slice
           │ │ └─getty@tty1.service
           │ │   └─1010 /sbin/agetty -o "-p -- \\u" --noclear - linux
           │ ├─system-serial\x2dgetty.slice
           │ │ └─serial-getty@ttyS0.service
           │ │   └─1011 /sbin/agetty -o "-p -- \\u" --keep-baud 115200,57600,38400,9600 - vt220
           │ ├─systemd-hostnamed.service
           │ │ └─304498 /usr/lib/systemd/systemd-hostnamed
           │ ├─systemd-journald.service
           │ │ └─679 /usr/lib/systemd/systemd-journald
           │ ├─systemd-logind.service
           │ │ └─787 /usr/lib/systemd/systemd-logind
           │ ├─systemd-machined.service
           │ │ └─216222 /usr/lib/systemd/systemd-machined
           │ ├─systemd-udevd.service
           │ │ └─udev
           │ │   └─730 /usr/lib/systemd/systemd-udevd
           │ ├─tuned.service
           │ │ └─113067 /usr/bin/python3 -Es /usr/sbin/tuned -l -P
           │ ├─virtlogd.service
           │ │ └─215589 /usr/sbin/virtlogd
           │ ├─virtnodedevd.service
           │ │ └─255222 /usr/sbin/virtnodedevd --timeout 120
           │ ├─virtqemud.service
           │ │ └─254597 /usr/sbin/virtqemud --timeout 120
           │ └─virtsecretd.service
           │   └─262209 /usr/sbin/virtsecretd --timeout 120
           └─user.slice
             ├─user-1000.slice
             │ ├─session-1.scope
             │ │ └─4520 /usr/bin/python3
             │ ├─session-52.scope
             │ │ ├─301119 "sshd-session: zuul [priv]"
             │ │ ├─301122 "sshd-session: zuul@notty"
             │ │ ├─301123 sudo bash -c "rm -rf /var/tmp/sos-osp && mkdir /var/tmp/sos-osp && sos report --batch --all-logs --tmp-dir=/var/tmp/sos-osp  -p container,openstack_edpm,system,storage,virt"
             │ │ ├─301147 /usr/bin/python3 -s /sbin/sos report --batch --all-logs --tmp-dir=/var/tmp/sos-osp -p container,openstack_edpm,system,storage,virt
             │ │ ├─307235 timeout 15s turbostat --debug sleep 10
             │ │ ├─307887 timeout 300s systemctl status --all
             │ │ └─307888 systemctl status --all
             │ └─user@1000.service
             │   ├─app.slice
             │   │ └─dbus-broker.service
             │   │   ├─14561 /usr/bin/dbus-broker-launch --scope user
             │   │   └─14577 dbus-broker --log 4 --controller 9 --machine-id 1f988c78c563e12389ab342aced42dbb --max-bytes 100000000000000 --max-fds 25000000000000 --max-matches 5000000000
             │   ├─init.scope
             │   │ ├─4310 /usr/lib/systemd/systemd --user
             │   │ └─4312 "(sd-pam)"
             │   └─user.slice
             │     └─podman-pause-2aa5b999.scope
             │       └─14491 catatonit -P
             └─user-42477.slice
               ├─session-21.scope
               │ ├─76630 "sshd-session: ceph-admin [priv]"
               │ └─76652 "sshd-session: ceph-admin"
               ├─session-23.scope
               │ ├─76647 "sshd-session: ceph-admin [priv]"
               │ └─76653 "sshd-session: ceph-admin@notty"
               ├─session-24.scope
               │ ├─76704 "sshd-session: ceph-admin [priv]"
               │ └─76707 "sshd-session: ceph-admin@notty"
               ├─sessioUnit boot.automount could not be found.
n-25.scope
               │ ├─76758 "sshd-session: ceph-admin [priv]"
               │ └─76761 "sshd-session: ceph-admin@notty"
               ├─session-26.scope
               │ ├─76812 "sshd-session: ceph-admin [priv]"
               │ └─76816 "sshd-session: ceph-admin@notty"
               ├─session-27.scope
               │ ├─76867 "sshd-session: ceph-admin [priv]"
               │ └─76870 "sshd-session: ceph-admin@notty"
               ├─session-28.scope
               │ ├─76921 "sshd-session: ceph-admin [priv]"
               │ └─76924 "sshd-session: ceph-admin@notty"
               ├─session-29.scope
               │ ├─76975 "sshd-session: ceph-admin [priv]"
               │ └─76978 "sshd-session: ceph-admin@notty"
               ├─session-30.scope
               │ ├─77029 "sshd-session: ceph-admin [priv]"
               │ └─77032 "sshd-session: ceph-admin@notty"
               ├─session-31.scope
               │ ├─77083 "sshd-session: ceph-admin [priv]"
               │ └─77086 "sshd-session: ceph-admin@notty"
               ├─session-32.scope
               │ ├─77110 "sshd-session: ceph-admin [priv]"
               │ └─77113 "sshd-session: ceph-admin@notty"
               ├─session-33.scope
               │ ├─77164 "sshd-session: ceph-admin [priv]"
               │ └─77167 "sshd-session: ceph-admin@notty"
               └─user@42477.service
                 └─init.scope
                   ├─76634 /usr/lib/systemd/systemd --user
                   └─76636 "(sd-pam)"

● proc-sys-fs-binfmt_misc.automount - Arbitrary Executable File Formats File System Automount Point
     Loaded: loaded (/usr/lib/systemd/system/proc-sys-fs-binfmt_misc.automount; static)
     Active: active (running) since Tue 2025-12-02 10:03:31 UTC; 1h 35min ago
      Until: Tue 2025-12-02 10:03:31 UTC; 1h 35min ago
   Triggers: ● proc-sys-fs-binfmt_misc.mount
      Where: /proc/sys/fs/binfmt_misc
       Docs: https://docs.kernel.org/admin-guide/binfmt-misc.html
             https://www.freedesktop.org/wiki/Software/systemd/APIFileSystems

Dec 02 10:49:42 compute-0 systemd[1]: proc-sys-fs-binfmt_misc.automount: Got automount request for /proc/sys/fs/binfmt_misc, triggered by 78068 (sysctl)

● dev-cdrom.device - QEMU_DVD-ROM config-2
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:01.1-ata1-host0-target0:0:0-0:0:0:0-block-sr0.device
     Loaded: loaded
     Active: active (plugged) since Tue 2025-12-02 10:03:33 UTC; 1h 35min ago
      Until: Tue 2025-12-02 10:03:33 UTC; 1h 35min ago
     Device: /sys/devices/pci0000:00/0000:00:01.1/ata1/host0/target0:0:0/0:0:0:0/block/sr0

● dev-ceph_vg0-ceph_lv0.device - /dev/ceph_vg0/ceph_lv0
    Follows: unit currently follows state of sys-devices-virtual-block-dm\x2d0.device
     Loaded: loaded
     Active: active (plugged) since Tue 2025-12-02 10:47:50 UTC; 51min ago
      Until: Tue 2025-12-02 10:47:50 UTC; 51min ago
     Device: /sys/devices/virtual/block/dm-0

● dev-ceph_vg1-ceph_lv1.device - /dev/ceph_vg1/ceph_lv1
    Follows: unit currently follows state of sys-devices-virtual-block-dm\x2d1.device
     Loaded: loaded
     Active: active (plugged) since Tue 2025-12-02 10:47:55 UTC; 51min ago
      Until: Tue 2025-12-02 10:47:55 UTC; 51min ago
     Device: /sys/devices/virtual/block/dm-1

● dev-ceph_vg2-ceph_lv2.device - /dev/ceph_vg2/ceph_lv2
    Follows: unit currently follows state of sys-devices-virtual-block-dm\x2d2.device
     Loaded: loaded
     Active: active (plugged) since Tue 2025-12-02 10:48:00 UTC; 51min ago
      Until: Tue 2025-12-02 10:48:00 UTC; 51min ago
     Device: /sys/devices/virtual/block/dm-2

● dev-disk-by\x2ddiskseq-1.device - /dev/disk/by-diskseq/1
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:04.0-virtio2-block-vda.device
     Loaded: loaded
     Active: active (plugged) since Tue 2025-12-02 10:03:32 UTC; 1h 35min ago
      Until: Tue 2025-12-02 10:03:32 UTC; 1h 35min ago
     Device: /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda

● dev-disk-by\x2ddiskseq-11.device - /dev/disk/by-diskseq/11
    Follows: unit currently follows state of sys-devices-virtual-block-loop5.device
     Loaded: loaded
     Active: active (plugged) since Tue 2025-12-02 10:47:59 UTC; 51min ago
      Until: Tue 2025-12-02 10:47:59 UTC; 51min ago
     Device: /sys/devices/virtual/block/loop5

● dev-disk-by\x2ddiskseq-12.device - /dev/disk/by-diskseq/12
    Follows: unit currently follows state of sys-devices-virtual-block-dm\x2d2.device
     Loaded: loaded
     Active: active (plugged) since Tue 2025-12-02 10:48:00 UTC; 51min ago
      Until: Tue 2025-12-02 10:48:00 UTC; 51min ago
     Device: /sys/devices/virtual/block/dm-2

● dev-disk-by\x2ddiskseq-3.device - QEMU_DVD-ROM config-2
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:01.1-ata1-host0-target0:0:0-0:0:0:0-block-sr0.device
     Loaded: loaded
     Active: active (plugged) since Tue 2025-12-02 10:03:33 UTC; 1h 35min ago
      Until: Tue 2025-12-02 10:03:33 UTC; 1h 35min ago
     Device: /sys/devices/pci0000:00/0000:00:01.1/ata1/host0/target0:0:0/0:0:0:0/block/sr0

● dev-disk-by\x2ddiskseq-5.device - /dev/disk/by-diskseq/5
    Follows: unit currently follows state of sys-devices-virtual-block-loop3.device
     Loaded: loaded
     Active: active (plugged) since Tue 2025-12-02 10:47:49 UTC; 51min ago
      Until: Tue 2025-12-02 10:47:49 UTC; 51min ago
     Device: /sys/devices/virtual/block/loop3

● dev-disk-by\x2ddiskseq-6.device - /dev/disk/by-diskseq/6
    Follows: unit currently follows state of sys-devices-virtual-block-dm\x2d0.device
     Loaded: loaded
     Active: active (plugged) since Tue 2025-12-02 10:47:50 UTC; 51min ago
      Until: Tue 2025-12-02 10:47:50 UTC; 51min ago
     Device: /sys/devices/virtual/block/dm-0

● dev-disk-by\x2ddiskseq-8.device - /dev/disk/by-diskseq/8
    Follows: unit currently follows state of sys-devices-virtual-block-loop4.device
     Loaded: loaded
     Active: active (plugged) since Tue 2025-12-02 10:47:55 UTC; 51min ago
      Until: Tue 2025-12-02 10:47:55 UTC; 51min ago
     Device: /sys/devices/virtual/block/loop4

● dev-disk-by\x2ddiskseq-9.device - /dev/disk/by-diskseq/9
    Follows: unit currently follows state of sys-devices-virtual-block-dm\x2d1.device
     Loaded: loaded
     Active: active (plugged) since Tue 2025-12-02 10:47:55 UTC; 51min ago
      Until: Tue 2025-12-02 10:47:55 UTC; 51min ago
     Device: /sys/devices/virtual/block/dm-1

● dev-disk-by\x2did-ata\x2dQEMU_DVD\x2dROM_QM00001.device - QEMU_DVD-ROM config-2
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:01.1-ata1-host0-target0:0:0-0:0:0:0-block-sr0.device
     Loaded: loaded
     Active: active (plugged) since Tue 2025-12-02 10:03:33 UTC; 1h 35min ago
      Until: Tue 2025-12-02 10:03:33 UTC; 1h 35min ago
     Device: /sys/devices/pci0000:00/0000:00:01.1/ata1/host0/target0:0:0/0:0:0:0/block/sr0

● dev-disk-by\x2did-dm\x2dname\x2dceph_vg0\x2dceph_lv0.device - /dev/disk/by-id/dm-name-ceph_vg0-ceph_lv0
    Follows: unit currently follows state of sys-devices-virtual-block-dm\x2d0.device
     Loaded: loaded
     Active: active (plugged) since Tue 2025-12-02 10:47:50 UTC; 51min ago
      Until: Tue 2025-12-02 10:47:50 UTC; 51min ago
     Device: /sys/devices/virtual/block/dm-0

● dev-disk-by\x2did-dm\x2dname\x2dceph_vg1\x2dceph_lv1.device - /dev/disk/by-id/dm-name-ceph_vg1-ceph_lv1
    Follows: unit currently follows state of sys-devices-virtual-block-dm\x2d1.device
     Loaded: loaded
     Active: active (plugged) since Tue 2025-12-02 10:47:55 UTC; 51min ago
      Until: Tue 2025-12-02 10:47:55 UTC; 51min ago
     Device: /sys/devices/virtual/block/dm-1

● dev-disk-by\x2did-dm\x2dname\x2dceph_vg2\x2dceph_lv2.device - /dev/disk/by-id/dm-name-ceph_vg2-ceph_lv2
    Follows: unit currently follows state of sys-devices-virtual-block-dm\x2d2.device
     Loaded: loaded
     Active: active (plugged) since Tue 2025-12-02 10:48:00 UTC; 51min ago
      Until: Tue 2025-12-02 10:48:00 UTC; 51min ago
     Device: /sys/devices/virtual/block/dm-2

● dev-disk-by\x2did-dm\x2duuid\x2dLVM\x2d0vXr9Xa2LsYMAkAJ2UFgGJpUFYaamZyFdUOe4ZGTbdqRYF6FhJGQiUA5BHLWJriG.device - /dev/disk/by-id/dm-uuid-LVM-0vXr9Xa2LsYMAkAJ2UFgGJpUFYaamZyFdUOe4ZGTbdqRYF6FhJGQiUA5BHLWJriG
    Follows: unit currently follows state of sys-devices-virtual-block-dm\x2d1.device
     Loaded: loaded
     Active: active (plugged) since Tue 2025-12-02 10:47:55 UTC; 51min ago
      Until: Tue 2025-12-02 10:47:55 UTC; 51min ago
     Device: /sys/devices/virtual/block/dm-1

● dev-disk-by\x2did-dm\x2duuid\x2dLVM\x2d1Cotl8YbofzzaT4w0zRXPokVoVsKgSv9J6JKrSAy2LN6PYJXDkoC52VoGtUEXDeD.device - /dev/disk/by-id/dm-uuid-LVM-1Cotl8YbofzzaT4w0zRXPokVoVsKgSv9J6JKrSAy2LN6PYJXDkoC52VoGtUEXDeD
    Follows: unit currently follows state of sys-devices-virtual-block-dm\x2d0.device
     Loaded: loaded
     Active: active (plugged) since Tue 2025-12-02 10:47:50 UTC; 51min ago
      Until: Tue 2025-12-02 10:47:50 UTC; 51min ago
     Device: /sys/devices/virtual/block/dm-0

● dev-disk-by\x2did-dm\x2duuid\x2dLVM\x2ddLjUY2tSwlo7hOiL1tdzofapcpQorUd6H8UriFdVU7PJk9Itvntc5uvJPmO147ag.device - /dev/disk/by-id/dm-uuid-LVM-dLjUY2tSwlo7hOiL1tdzofapcpQorUd6H8UriFdVU7PJk9Itvntc5uvJPmO147ag
    Follows: unit currently follows state of sys-devices-virtual-block-dm\x2d2.device
     Loaded: loaded
     Active: active (plugged) since Tue 2025-12-02 10:48:00 UTC; 51min ago
      Until: Tue 2025-12-02 10:48:00 UTC; 51min ago
     Device: /sys/devices/virtual/block/dm-2

● dev-disk-by\x2did-lvm\x2dpv\x2duuid\x2d8wH47Y\x2dSJM1\x2d1OwQ\x2d2PmE\x2dsEzI\x2db7L8\x2do6EIte.device - /dev/disk/by-id/lvm-pv-uuid-8wH47Y-SJM1-1OwQ-2PmE-sEzI-b7L8-o6EIte
    Follows: unit currently follows state of sys-devices-virtual-block-loop3.device
     Loaded: loaded
     Active: active (plugged) since Tue 2025-12-02 10:47:50 UTC; 51min ago
      Until: Tue 2025-12-02 10:47:50 UTC; 51min ago
     Device: /sys/devices/virtual/block/loop3

● dev-disk-by\x2did-lvm\x2dpv\x2duuid\x2dc5qrQU\x2dA9f9\x2dnlka\x2dk6hA\x2daDQs\x2dJ4Ls\x2dDyb5do.device - /dev/disk/by-id/lvm-pv-uuid-c5qrQU-A9f9-nlka-k6hA-aDQs-J4Ls-Dyb5do
    Follows: unit currently follows state of sys-devices-virtual-block-loop5.device
     Loaded: loaded
     Active: active (plugged) since Tue 2025-12-02 10:48:00 UTC; 51min ago
      Until: Tue 2025-12-02 10:48:00 UTC; 51min ago
     Device: /sys/devices/virtual/block/loop5

● dev-disk-by\x2did-lvm\x2dpv\x2duuid\x2dZNmTPW\x2dxcU0\x2duzxF\x2dmir7\x2d0M93\x2deltV\x2dIVVyFm.device - /dev/disk/by-id/lvm-pv-uuid-ZNmTPW-xcU0-uzxF-mir7-0M93-eltV-IVVyFm
    Follows: unit currently follows state of sys-devices-virtual-block-loop4.device
     Loaded: loaded
     Active: active (plugged) since Tue 2025-12-02 10:47:55 UTC; 51min ago
      Until: Tue 2025-12-02 10:47:55 UTC; 51min ago
     Device: /sys/devices/virtual/block/loop4

● dev-disk-by\x2dlabel-config\x2d2.device - QEMU_DVD-ROM config-2
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:01.1-ata1-host0-target0:0:0-0:0:0:0-block-sr0.device
     Loaded: loaded
     Active: active (plugged) since Tue 2025-12-02 10:03:33 UTC; 1h 35min ago
      Until: Tue 2025-12-02 10:03:33 UTC; 1h 35min ago
     Device: /sys/devices/pci0000:00/0000:00:01.1/ata1/host0/target0:0:0/0:0:0:0/block/sr0

● dev-disk-by\x2dpartuuid-5a328d8c\x2d01.device - /dev/disk/by-partuuid/5a328d8c-01
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:04.0-virtio2-block-vda-vda1.device
     Loaded: loaded
     Active: active (plugged) since Tue 2025-12-02 10:03:32 UTC; 1h 35min ago
      Until: Tue 2025-12-02 10:03:32 UTC; 1h 35min ago
     Device: /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda/vda1

● dev-disk-by\x2dpath-pci\x2d0000:00:01.1\x2data\x2d1.0.device - QEMU_DVD-ROM config-2
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:01.1-ata1-host0-target0:0:0-0:0:0:0-block-sr0.device
     Loaded: loaded
     Active: active (plugged) since Tue 2025-12-02 10:03:33 UTC; 1h 35min ago
      Until: Tue 2025-12-02 10:03:33 UTC; 1h 35min ago
     Device: /sys/devices/pci0000:00/0000:00:01.1/ata1/host0/target0:0:0/0:0:0:0/block/sr0

● dev-disk-by\x2dpath-pci\x2d0000:00:01.1\x2data\x2d1.device - QEMU_DVD-ROM config-2
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:01.1-ata1-host0-target0:0:0-0:0:0:0-block-sr0.device
     Loaded: loaded
     Active: active (plugged) since Tue 2025-12-02 10:03:33 UTC; 1h 35min ago
      Until: Tue 2025-12-02 10:03:33 UTC; 1h 35min ago
     Device: /sys/devices/pci0000:00/0000:00:01.1/ata1/host0/target0:0:0/0:0:0:0/block/sr0

● dev-disk-by\x2dpath-pci\x2d0000:00:04.0.device - /dev/disk/by-path/pci-0000:00:04.0
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:04.0-virtio2-block-vda.device
     Loaded: loaded
     Active: active (plugged) since Tue 2025-12-02 10:03:32 UTC; 1h 35min ago
      Until: Tue 2025-12-02 10:03:32 UTC; 1h 35min ago
     Device: /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda

● dev-disk-by\x2dpath-pci\x2d0000:00:04.0\x2dpart1.device - /dev/disk/by-path/pci-0000:00:04.0-part1
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:04.0-virtio2-block-vda-vda1.device
     Loaded: loaded
     Active: active (plugged) since Tue 2025-12-02 10:03:32 UTC; 1h 35min ago
      Until: Tue 2025-12-02 10:03:32 UTC; 1h 35min ago
     Device: /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda/vda1

● dev-disk-by\x2dpath-virtio\x2dpci\x2d0000:00:04.0.device - /dev/disk/by-path/virtio-pci-0000:00:04.0
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:04.0-virtio2-block-vda.device
     Loaded: loaded
     Active: active (plugged) since Tue 2025-12-02 10:03:32 UTC; 1h 35min ago
      Until: Tue 2025-12-02 10:03:32 UTC; 1h 35min ago
     Device: /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda

● dev-disk-by\x2dpath-virtio\x2dpci\x2d0000:00:04.0\x2dpart1.device - /dev/disk/by-path/virtio-pci-0000:00:04.0-part1
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:04.0-virtio2-block-vda-vda1.device
     Loaded: loaded
     Active: active (plugged) since Tue 2025-12-02 10:03:32 UTC; 1h 35min ago
      Until: Tue 2025-12-02 10:03:32 UTC; 1h 35min ago
     Device: /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda/vda1

● dev-disk-by\x2duuid-2025\x2d12\x2d02\x2d10\x2d03\x2d18\x2d00.device - QEMU_DVD-ROM config-2
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:01.1-ata1-host0-target0:0:0-0:0:0:0-block-sr0.device
     Loaded: loaded
     Active: active (plugged) since Tue 2025-12-02 10:03:33 UTC; 1h 35min ago
      Until: Tue 2025-12-02 10:03:33 UTC; 1h 35min ago
     Device: /sys/devices/pci0000:00/0000:00:01.1/ata1/host0/target0:0:0/0:0:0:0/block/sr0

● dev-disk-by\x2duuid-b277050f\x2d8ace\x2d464d\x2dabb6\x2d4c46d4c45253.device - /dev/disk/by-uuid/b277050f-8ace-464d-abb6-4c46d4c45253
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:04.0-virtio2-block-vda-vda1.device
     Loaded: loaded
     Active: active (plugged) since Tue 2025-12-02 10:03:32 UTC; 1h 35min ago
      Until: Tue 2025-12-02 10:03:32 UTC; 1h 35min ago
     Device: /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda/vda1

Dec 02 10:03:30 localhost systemd[1]: Found device /dev/disk/by-uuid/b277050f-8ace-464d-abb6-4c46d4c45253.

● dev-dm\x2d0.device - /dev/dm-0
    Follows: unit currently follows state of sys-devices-virtual-block-dm\x2d0.device
     Loaded: loaded
     Active: active (plugged) since Tue 2025-12-02 10:47:50 UTC; 51min ago
      Until: Tue 2025-12-02 10:47:50 UTC; 51min ago
     Device: /sys/devices/virtual/block/dm-0

● dev-dm\x2d1.device - /dev/dm-1
    Follows: unit currently follows state of sys-devices-virtual-block-dm\x2d1.device
     Loaded: loaded
     Active: active (plugged) since Tue 2025-12-02 10:47:55 UTC; 51min ago
      Until: Tue 2025-12-02 10:47:55 UTC; 51min ago
     Device: /sys/devices/virtual/block/dm-1

● dev-dm\x2d2.device - /dev/dm-2
    Follows: unit currently follows state of sys-devices-virtual-block-dm\x2d2.device
     Loaded: loaded
     Active: active (plugged) since Tue 2025-12-02 10:48:00 UTC; 51min ago
      Until: Tue 2025-12-02 10:48:00 UTC; 51min ago
     Device: /sys/devices/virtual/block/dm-2

● dev-loop3.device - /dev/loop3
    Follows: unit currently follows state of sys-devices-virtual-block-loop3.device
     Loaded: loaded
     Active: active (plugged) since Tue 2025-12-02 10:47:49 UTC; 51min ago
      Until: Tue 2025-12-02 10:47:49 UTC; 51min ago
     Device: /sys/devices/virtual/block/loop3

● dev-loop4.device - /dev/loop4
    Follows: unit currently follows state of sys-devices-virtual-block-loop4.device
     Loaded: loaded
     Active: active (plugged) since Tue 2025-12-02 10:47:55 UTC; 51min ago
      Until: Tue 2025-12-02 10:47:55 UTC; 51min ago
     Device: /sys/devices/virtual/block/loop4

● dev-loop5.device - /dev/loop5
    Follows: unit currently follows state of sys-devices-virtual-block-loop5.device
     Loaded: loaded
     Active: active (plugged) since Tue 2025-12-02 10:47:59 UTC; 51min ago
      Until: Tue 2025-12-02 10:47:59 UTC; 51min ago
     Device: /sys/devices/virtual/block/loop5

● dev-mapper-ceph_vg0\x2dceph_lv0.device - /dev/mapper/ceph_vg0-ceph_lv0
    Follows: unit currently follows state of sys-devices-virtual-block-dm\x2d0.device
     Loaded: loaded
     Active: active (plugged) since Tue 2025-12-02 10:47:50 UTC; 51min ago
      Until: Tue 2025-12-02 10:47:50 UTC; 51min ago
     Device: /sys/devices/virtual/block/dm-0

● dev-mapper-ceph_vg1\x2dceph_lv1.device - /dev/mapper/ceph_vg1-ceph_lv1
    Follows: unit currently follows state of sys-devices-virtual-block-dm\x2d1.device
     Loaded: loaded
     Active: active (plugged) since Tue 2025-12-02 10:47:55 UTC; 51min ago
      Until: Tue 2025-12-02 10:47:55 UTC; 51min ago
     Device: /sys/devices/virtual/block/dm-1

● dev-mapper-ceph_vg2\x2dceph_lv2.device - /dev/mapper/ceph_vg2-ceph_lv2
    Follows: unit currently follows state of sys-devices-virtual-block-dm\x2d2.device
     Loaded: loaded
     Active: active (plugged) since Tue 2025-12-02 10:48:00 UTC; 51min ago
      Until: Tue 2025-12-02 10:48:00 UTC; 51min ago
     Device: /sys/devices/virtual/block/dm-2

● dev-rfkill.device - /dev/rfkill
    Follows: unit currently follows state of sys-devices-virtual-misc-rfkill.device
     Loaded: loaded
     Active: active (plugged) since Tue 2025-12-02 10:03:34 UTC; 1h 35min ago
      Until: Tue 2025-12-02 10:03:34 UTC; 1h 35min ago
     Device: /sys/devices/virtual/misc/rfkill

● dev-sr0.device - QEMU_DVD-ROM config-2
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:01.1-ata1-host0-target0:0:0-0:0:0:0-block-sr0.device
     Loaded: loaded
     Active: active (plugged) since Tue 2025-12-02 10:03:33 UTC; 1h 35min ago
      Until: Tue 2025-12-02 10:03:33 UTC; 1h 35min ago
     Device: /sys/devices/pci0000:00/0000:00:01.1/ata1/host0/target0:0:0/0:0:0:0/block/sr0

● dev-ttyS0.device - /dev/ttyS0
    Follows: unit currently follows state of sys-devices-pnp0-00:00-tty-ttyS0.device
     Loaded: loaded
     Active: active (plugged) since Tue 2025-12-02 10:03:32 UTC; 1h 35min ago
      Until: Tue 2025-12-02 10:03:32 UTC; 1h 35min ago
     Device: /sys/devices/pnp0/00:00/tty/ttyS0

Dec 02 10:03:32 localhost systemd[1]: Condition check resulted in /dev/ttyS0 being skipped.

● dev-ttyS1.device - /dev/ttyS1
    Follows: unit currently follows state of sys-devices-platform-serial8250-tty-ttyS1.device
     Loaded: loaded
     Active: active (plugged) since Tue 2025-12-02 10:03:32 UTC; 1h 35min ago
      Until: Tue 2025-12-02 10:03:32 UTC; 1h 35min ago
     Device: /sys/devices/platform/serial8250/tty/ttyS1

● dev-ttyS2.device - /dev/ttyS2
    Follows: unit currently follows state of sys-devices-platform-serial8250-tty-ttyS2.device
     Loaded: loaded
     Active: active (plugged) since Tue 2025-12-02 10:03:32 UTC; 1h 35min ago
      Until: Tue 2025-12-02 10:03:32 UTC; 1h 35min ago
     Device: /sys/devices/platform/serial8250/tty/ttyS2

● dev-ttyS3.device - /dev/ttyS3
    Follows: unit currently follows state of sys-devices-platform-serial8250-tty-ttyS3.device
     Loaded: loaded
     Active: active (plugged) since Tue 2025-12-02 10:03:32 UTC; 1h 35min ago
      Until: Tue 2025-12-02 10:03:32 UTC; 1h 35min ago
     Device: /sys/devices/platform/serial8250/tty/ttyS3

● dev-vda.device - /dev/vda
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:04.0-virtio2-block-vda.device
     Loaded: loaded
     Active: active (plugged) since Tue 2025-12-02 10:03:32 UTC; 1h 35min ago
      Until: Tue 2025-12-02 10:03:32 UTC; 1h 35min ago
     Device: /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda

● dev-vda1.device - /dev/vda1
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:04.0-virtio2-block-vda-vda1.device
     Loaded: loaded
     Active: active (plugged) since Tue 2025-12-02 10:03:32 UTC; 1h 35min ago
      Until: Tue 2025-12-02 10:03:32 UTC; 1h 35min ago
     Device: /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda/vda1

● sys-devices-pci0000:00-0000:00:01.1-ata1-host0-target0:0:0-0:0:0:0-block-sr0.device - QEMU_DVD-ROM config-2
     Loaded: loaded
     Active: active (plugged) since Tue 2025-12-02 10:03:33 UTC; 1h 35min ago
      Until: Tue 2025-12-02 10:03:33 UTC; 1h 35min ago
     Device: /sys/devices/pci0000:00/0000:00:01.1/ata1/host0/target0:0:0/0:0:0:0/block/sr0

● sys-devices-pci0000:00-0000:00:03.0-virtio1-net-eth0.device - Virtio network device
     Loaded: loaded
     Active: active (plugged) since Tue 2025-12-02 10:03:33 UTC; 1h 35min ago
      Until: Tue 2025-12-02 10:03:33 UTC; 1h 35min ago
     Device: /sys/devices/pci0000:00/0000:00:03.0/virtio1/net/eth0

● sys-devices-pci0000:00-0000:00:04.0-virtio2-block-vda-vda1.device - /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda/vda1
     Loaded: loaded
     Active: active (plugged) since Tue 2025-12-02 10:03:32 UTC; 1h 35min ago
      Until: Tue 2025-12-02 10:03:32 UTC; 1h 35min ago
     Device: /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda/vda1

● sys-devices-pci0000:00-0000:00:04.0-virtio2-block-vda.device - /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda
     Loaded: loaded
     Active: active (plugged) since Tue 2025-12-02 10:03:32 UTC; 1h 35min ago
      Until: Tue 2025-12-02 10:03:32 UTC; 1h 35min ago
     Device: /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda

● sys-devices-pci0000:00-0000:00:07.0-virtio5-net-eth1.device - Virtio network device
     Loaded: loaded
     Active: active (plugged) since Tue 2025-12-02 10:05:47 UTC; 1h 33min ago
      Until: Tue 2025-12-02 10:05:47 UTC; 1h 33min ago
     Device: /sys/devices/pci0000:00/0000:00:07.0/virtio5/net/eth1

● sys-devices-platform-serial8250-tty-ttyS1.device - /sys/devices/platform/serial8250/tty/ttyS1
     Loaded: loaded
     Active: active (plugged) since Tue 2025-12-02 10:03:32 UTC; 1h 35min ago
      Until: Tue 2025-12-02 10:03:32 UTC; 1h 35min ago
     Device: /sys/devices/platform/serial8250/tty/ttyS1

● sys-devices-platform-serial8250-tty-ttyS2.device - /sys/devices/platform/serial8250/tty/ttyS2
     Loaded: loaded
     Active: active (plugged) since Tue 2025-12-02 10:03:32 UTC; 1h 35min ago
      Until: Tue 2025-12-02 10:03:32 UTC; 1h 35min ago
     Device: /sys/devices/platform/serial8250/tty/ttyS2

● sys-devices-platform-serial8250-tty-ttyS3.device - /sys/devices/platform/serial8250/tty/ttyS3
     Loaded: loaded
     Active: active (plugged) since Tue 2025-12-02 10:03:32 UTC; 1h 35min ago
      Until: Tue 2025-12-02 10:03:32 UTC; 1h 35min ago
     Device: /sys/devices/platform/serial8250/tty/ttyS3

● sys-devices-pnp0-00:00-tty-ttyS0.device - /sys/devices/pnp0/00:00/tty/ttyS0
     Loaded: loaded
     Active: active (plugged) since Tue 2025-12-02 10:03:32 UTC; 1h 35min ago
      Until: Tue 2025-12-02 10:03:32 UTC; 1h 35min ago
     Device: /sys/devices/pnp0/00:00/tty/ttyS0

● sys-devices-virtual-block-dm\x2d0.device - /sys/devices/virtual/block/dm-0
     Loaded: loaded
     Active: active (plugged) since Tue 2025-12-02 10:47:50 UTC; 51min ago
      Until: Tue 2025-12-02 10:47:50 UTC; 51min ago
     Device: /sys/devices/virtual/block/dm-0

● sys-devices-virtual-block-dm\x2d1.device - /sys/devices/virtual/block/dm-1
     Loaded: loaded
     Active: active (plugged) since Tue 2025-12-02 10:47:55 UTC; 51min ago
      Until: Tue 2025-12-02 10:47:55 UTC; 51min ago
     Device: /sys/devices/virtual/block/dm-1

● sys-devices-virtual-block-dm\x2d2.device - /sys/devices/virtual/block/dm-2
     Loaded: loaded
     Active: active (plugged) since Tue 2025-12-02 10:48:00 UTC; 51min ago
      Until: Tue 2025-12-02 10:48:00 UTC; 51min ago
     Device: /sys/devices/virtual/block/dm-2

● sys-devices-virtual-block-loop3.device - /sys/devices/virtual/block/loop3
     Loaded: loaded
     Active: active (plugged) since Tue 2025-12-02 10:47:49 UTC; 51min ago
      Until: Tue 2025-12-02 10:47:49 UTC; 51min ago
     Device: /sys/devices/virtual/block/loop3

● sys-devices-virtual-block-loop4.device - /sys/devices/virtual/block/loop4
     Loaded: loaded
     Active: active (plugged) since Tue 2025-12-02 10:47:55 UTC; 51min ago
      Until: Tue 2025-12-02 10:47:55 UTC; 51min ago
     Device: /sys/devices/virtual/block/loop4

● sys-devices-virtual-block-loop5.device - /sys/devices/virtual/block/loop5
     Loaded: loaded
     Active: active (plugged) since Tue 2025-12-02 10:47:59 UTC; 51min ago
      Until: Tue 2025-12-02 10:47:59 UTC; 51min ago
     Device: /sys/devices/virtual/block/loop5

● sys-devices-virtual-misc-rfkill.device - /sys/devices/virtual/misc/rfkill
     Loaded: loaded
     Active: active (plugged) since Tue 2025-12-02 10:03:34 UTC; 1h 35min ago
      Until: Tue 2025-12-02 10:03:34 UTC; 1h 35min ago
     Device: /sys/devices/virtual/misc/rfkill

● sys-devices-virtual-net-br\x2dex.device - /sys/devices/virtual/net/br-ex
     Loaded: loaded
     Active: active (plugged) since Tue 2025-12-02 10:44:35 UTC; 54min ago
      Until: Tue 2025-12-02 10:44:35 UTC; 54min ago
     Device: /sys/devices/virtual/net/br-ex

● sys-devices-virtual-net-br\x2dint.device - /sys/devices/virtual/net/br-int
     Loaded: loaded
     Active: active (plugged) since Tue 2025-12-02 11:00:09 UTC; 39min ago
      Until: Tue 2025-12-02 11:00:09 UTC; 39min ago
     Device: /sys/devices/virtual/net/br-int

● sys-devices-virtual-net-genev_sys_6081.device - /sys/devices/virtual/net/genev_sys_6081
     Loaded: loaded
     Active: active (plugged) since Tue 2025-12-02 11:00:09 UTC; 39min ago
      Until: Tue 2025-12-02 11:00:09 UTC; 39min ago
     Device: /sys/devices/virtual/net/genev_sys_6081

● sys-devices-virtual-net-ovs\x2dsystem.device - /sys/devices/virtual/net/ovs-system
     Loaded: loaded
     Active: active (plugged) since Tue 2025-12-02 10:44:35 UTC; 54min ago
      Until: Tue 2025-12-02 10:44:35 UTC; 54min ago
     Device: /sys/devices/virtual/net/ovs-system

● sys-devices-virtual-net-vlan20.device - /sys/devices/virtual/net/vlan20
     Loaded: loaded
     Active: active (plugged) since Tue 2025-12-02 10:44:35 UTC; 54min ago
      Until: Tue 2025-12-02 10:44:35 UTC; 54min ago
     Device: /sys/devices/virtual/net/vlan20

● sys-devices-virtual-net-vlan21.device - /sys/devices/virtual/net/vlan21
     Loaded: loaded
     Active: active (plugged) since Tue 2025-12-02 10:44:35 UTC; 54min ago
      Until: Tue 2025-12-02 10:44:35 UTC; 54min ago
     Device: /sys/devices/virtual/net/vlan21

● sys-devices-virtual-net-vlan22.device - /sys/devices/virtual/net/vlan22
     Loaded: loaded
     Active: active (plugged) since Tue 2025-12-02 10:44:35 UTC; 54min ago
      Until: Tue 2025-12-02 10:44:35 UTC; 54min ago
     Device: /sys/devices/virtual/net/vlan22

● sys-devices-virtual-net-vlan23.device - /sys/devices/virtual/net/vlan23
     Loaded: loaded
     Active: active (plugged) since Tue 2025-12-02 10:44:35 UTC; 54min ago
      Until: Tue 2025-12-02 10:44:35 UTC; 54min ago
     Device: /sys/devices/virtual/net/vlan23

● sys-module-configfs.device - /sys/module/configfs
     Loaded: loaded
     Active: active (plugged) since Tue 2025-12-02 10:03:32 UTC; 1h 35min ago
     Unit boot.mount could not be found.
 Until: Tue 2025-12-02 10:03:32 UTC; 1h 35min ago
     Device: /sys/module/configfs

● sys-module-fuse.device - /sys/module/fuse
     Loaded: loaded
     Active: active (plugged) since Tue 2025-12-02 10:03:32 UTC; 1h 35min ago
      Until: Tue 2025-12-02 10:03:32 UTC; 1h 35min ago
     Device: /sys/module/fuse

● sys-subsystem-net-devices-br\x2dex.device - /sys/subsystem/net/devices/br-ex
     Loaded: loaded
     Active: active (plugged) since Tue 2025-12-02 10:44:35 UTC; 54min ago
      Until: Tue 2025-12-02 10:44:35 UTC; 54min ago
     Device: /sys/devices/virtual/net/br-ex

● sys-subsystem-net-devices-br\x2dint.device - /sys/subsystem/net/devices/br-int
     Loaded: loaded
     Active: active (plugged) since Tue 2025-12-02 11:00:09 UTC; 39min ago
      Until: Tue 2025-12-02 11:00:09 UTC; 39min ago
     Device: /sys/devices/virtual/net/br-int

● sys-subsystem-net-devices-eth0.device - Virtio network device
     Loaded: loaded
     Active: active (plugged) since Tue 2025-12-02 10:03:33 UTC; 1h 35min ago
      Until: Tue 2025-12-02 10:03:33 UTC; 1h 35min ago
     Device: /sys/devices/pci0000:00/0000:00:03.0/virtio1/net/eth0

● sys-subsystem-net-devices-eth1.device - Virtio network device
     Loaded: loaded
     Active: active (plugged) since Tue 2025-12-02 10:05:47 UTC; 1h 33min ago
      Until: Tue 2025-12-02 10:05:47 UTC; 1h 33min ago
     Device: /sys/devices/pci0000:00/0000:00:07.0/virtio5/net/eth1

● sys-subsystem-net-devices-genev_sys_6081.device - /sys/subsystem/net/devices/genev_sys_6081
     Loaded: loaded
     Active: active (plugged) since Tue 2025-12-02 11:00:09 UTC; 39min ago
      Until: Tue 2025-12-02 11:00:09 UTC; 39min ago
     Device: /sys/devices/virtual/net/genev_sys_6081

● sys-subsystem-net-devices-ovs\x2dsystem.device - /sys/subsystem/net/devices/ovs-system
     Loaded: loaded
     Active: active (plugged) since Tue 2025-12-02 10:44:35 UTC; 54min ago
      Until: Tue 2025-12-02 10:44:35 UTC; 54min ago
     Device: /sys/devices/virtual/net/ovs-system

● sys-subsystem-net-devices-vlan20.device - /sys/subsystem/net/devices/vlan20
     Loaded: loaded
     Active: active (plugged) since Tue 2025-12-02 10:44:35 UTC; 54min ago
      Until: Tue 2025-12-02 10:44:35 UTC; 54min ago
     Device: /sys/devices/virtual/net/vlan20

● sys-subsystem-net-devices-vlan21.device - /sys/subsystem/net/devices/vlan21
     Loaded: loaded
     Active: active (plugged) since Tue 2025-12-02 10:44:35 UTC; 54min ago
      Until: Tue 2025-12-02 10:44:35 UTC; 54min ago
     Device: /sys/devices/virtual/net/vlan21

● sys-subsystem-net-devices-vlan22.device - /sys/subsystem/net/devices/vlan22
     Loaded: loaded
     Active: active (plugged) since Tue 2025-12-02 10:44:35 UTC; 54min ago
      Until: Tue 2025-12-02 10:44:35 UTC; 54min ago
     Device: /sys/devices/virtual/net/vlan22

● sys-subsystem-net-devices-vlan23.device - /sys/subsystem/net/devices/vlan23
     Loaded: loaded
     Active: active (plugged) since Tue 2025-12-02 10:44:35 UTC; 54min ago
      Until: Tue 2025-12-02 10:44:35 UTC; 54min ago
     Device: /sys/devices/virtual/net/vlan23

● -.mount - Root Mount
     Loaded: loaded (/etc/fstab; generated)
     Active: active (mounted)
      Where: /
       What: /dev/vda1
       Docs: man:fstab(5)
             man:systemd-fstab-generator(8)

● dev-hugepages.mount - Huge Pages File System
     Loaded: loaded (/usr/lib/systemd/system/dev-hugepages.mount; static)
     Active: active (mounted) since Tue 2025-12-02 10:03:31 UTC; 1h 35min ago
      Until: Tue 2025-12-02 10:03:31 UTC; 1h 35min ago
      Where: /dev/hugepages
       What: hugetlbfs
       Docs: https://docs.kernel.org/admin-guide/mm/hugetlbpage.html
             https://www.freedesktop.org/wiki/Software/systemd/APIFileSystems
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48628)
     Memory: 52.0K (peak: 556.0K)
        CPU: 8ms
     CGroup: /dev-hugepages.mount

● dev-hugepages1G.mount - /dev/hugepages1G
     Loaded: loaded (/etc/fstab; generated)
     Active: active (mounted) since Tue 2025-12-02 10:46:49 UTC; 52min ago
      Unit home.mount could not be found.
Until: Tue 2025-12-02 10:46:49 UTC; 52min ago
      Where: /dev/hugepages1G
       What: none
       Docs: man:fstab(5)
             man:systemd-fstab-generator(8)

● dev-hugepages2M.mount - /dev/hugepages2M
     Loaded: loaded (/etc/fstab; generated)
     Active: active (mounted) since Tue 2025-12-02 10:46:50 UTC; 52min ago
      Until: Tue 2025-12-02 10:46:50 UTC; 52min ago
      Where: /dev/hugepages2M
       What: none
       Docs: man:fstab(5)
             man:systemd-fstab-generator(8)

● dev-mqueue.mount - POSIX Message Queue File System
     Loaded: loaded (/usr/lib/systemd/system/dev-mqueue.mount; static)
     Active: active (mounted) since Tue 2025-12-02 10:03:31 UTC; 1h 35min ago
      Until: Tue 2025-12-02 10:03:31 UTC; 1h 35min ago
      Where: /dev/mqueue
       What: mqueue
       Docs: man:mq_overview(7)
             https://www.freedesktop.org/wiki/Software/systemd/APIFileSystems
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48628)
     Memory: 4.0K (peak: 540.0K)
        CPU: 4ms
     CGroup: /dev-mqueue.mount

○ proc-fs-nfsd.mount - NFSD configuration filesystem
     Loaded: loaded (/usr/lib/systemd/system/proc-fs-nfsd.mount; static)
     Active: inactive (dead)
      Where: /proc/fs/nfsd
       What: nfsd

● proc-sys-fs-binfmt_misc.mount - Arbitrary Executable File Formats File System
     Loaded: loaded (/usr/lib/systemd/system/proc-sys-fs-binfmt_misc.mount; disabled; preset: disabled)
     Active: active (mounted) since Tue 2025-12-02 10:49:42 UTC; 49min ago
      Until: Tue 2025-12-02 10:49:42 UTC; 49min ago
TriggeredBy: ● proc-sys-fs-binfmt_misc.automount
      Where: /proc/sys/fs/binfmt_misc
       What: binfmt_misc
       Docs: https://docs.kernel.org/admin-guide/binfmt-misc.html
             https://www.freedesktop.org/wiki/Software/systemd/APIFileSystems
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48628)
     Memory: 8.0K (peak: 540.0K)
        CPU: 6ms
     CGroup: /proc-sys-fs-binfmt_misc.mount

Dec 02 10:49:42 compute-0 systemd[1]: Mounting Arbitrary Executable File Formats File System...
Dec 02 10:49:42 compute-0 systemd[1]: Mounted Arbitrary Executable File Formats File System.

● run-credentials-systemd\x2dsysctl.service.mount - /run/credentials/systemd-sysctl.service
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Tue 2025-12-02 10:43:25 UTC; 55min ago
      Until: Tue 2025-12-02 10:43:25 UTC; 55min ago
      Where: /run/credentials/systemd-sysctl.service
       What: none

● run-credentials-systemd\x2dsysusers.service.mount - /run/credentials/systemd-sysusers.service
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Tue 2025-12-02 10:03:32 UTC; 1h 35min ago
      Until: Tue 2025-12-02 10:03:32 UTC; 1h 35min ago
      Where: /run/credentials/systemd-sysusers.service
       What: none

● run-credentials-systemd\x2dtmpfiles\x2dsetup.service.mount - /run/credentials/systemd-tmpfiles-setup.service
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Tue 2025-12-02 10:03:32 UTC; 1h 35min ago
      Until: Tue 2025-12-02 10:03:32 UTC; 1h 35min ago
      Where: /run/credentials/systemd-tmpfiles-setup.service
       What: none

● run-credentials-systemd\x2dtmpfiles\x2dsetup\x2ddev.service.mount - /run/credentials/systemd-tmpfiles-setup-dev.service
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Tue 2025-12-02 10:03:32 UTC; 1h 35min ago
      Until: Tue 2025-12-02 10:03:32 UTC; 1h 35min ago
      Where: /run/credentials/systemd-tmpfiles-setup-dev.service
       What: none

● run-netns.mount - /run/netns
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Tue 2025-12-02 10:46:13 UTC; 53min ago
      Until: Tue 2025-12-02 10:46:13 UTC; 53min ago
      Where: /run/netns
       What: tmpfs

● run-user-1000.mount - /run/user/1000
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Tue 2025-12-02 10:04:12 UTC; 1h 35min ago
      Until: Tue 2025-12-02 10:04:12 UTC; 1h 35min ago
      Where: /Unit sysroot.mount could not be found.
run/user/1000
       What: tmpfs

● run-user-42477.mount - /run/user/42477
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Tue 2025-12-02 10:49:32 UTC; 49min ago
      Until: Tue 2025-12-02 10:49:32 UTC; 49min ago
      Where: /run/user/42477
       What: tmpfs

● sys-fs-fuse-connections.mount - FUSE Control File System
     Loaded: loaded (/usr/lib/systemd/system/sys-fs-fuse-connections.mount; static)
     Active: active (mounted) since Tue 2025-12-02 10:03:32 UTC; 1h 35min ago
      Until: Tue 2025-12-02 10:03:32 UTC; 1h 35min ago
      Where: /sys/fs/fuse/connections
       What: fusectl
       Docs: https://docs.kernel.org/filesystems/fuse.html
             https://www.freedesktop.org/wiki/Software/systemd/APIFileSystems
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48628)
     Memory: 4.0K (peak: 540.0K)
        CPU: 3ms
     CGroup: /sys-fs-fuse-connections.mount

Dec 02 10:03:32 localhost systemd[1]: Mounting FUSE Control File System...
Dec 02 10:03:32 localhost systemd[1]: Mounted FUSE Control File System.

● sys-kernel-config.mount - Kernel Configuration File System
     Loaded: loaded (/proc/self/mountinfo; static)
     Active: active (mounted) since Tue 2025-12-02 10:03:31 UTC; 1h 35min ago
      Until: Tue 2025-12-02 10:03:31 UTC; 1h 35min ago
      Where: /sys/kernel/config
       What: configfs
       Docs: https://docs.kernel.org/filesystems/configfs.html
             https://www.freedesktop.org/wiki/Software/systemd/APIFileSystems

● sys-kernel-debug-tracing.mount - /sys/kernel/debug/tracing
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Tue 2025-12-02 11:38:32 UTC; 41s ago
      Until: Tue 2025-12-02 11:38:32 UTC; 41s ago
      Where: /sys/kernel/debug/tracing
       What: tracefs

● sys-kernel-debug.mount - Kernel Debug File System
     Loaded: loaded (/usr/lib/systemd/system/sys-kernel-debug.mount; static)
     Active: active (mounted) since Tue 2025-12-02 10:03:31 UTC; 1h 35min ago
      Until: Tue 2025-12-02 10:03:31 UTC; 1h 35min ago
      Where: /sys/kernel/debug
       What: debugfs
       Docs: https://docs.kernel.org/filesystems/debugfs.html
             https://www.freedesktop.org/wiki/Software/systemd/APIFileSystems
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48628)
     Memory: 4.0K (peak: 540.0K)
        CPU: 3ms
     CGroup: /sys-kernel-debug.mount

● sys-kernel-tracing.mount - Kernel Trace File System
     Loaded: loaded (/usr/lib/systemd/system/sys-kernel-tracing.mount; static)
     Active: active (mounted) since Tue 2025-12-02 10:03:31 UTC; 1h 35min ago
      Until: Tue 2025-12-02 10:03:31 UTC; 1h 35min ago
      Where: /sys/kernel/tracing
       What: tracefs
       Docs: https://docs.kernel.org/trace/ftrace.html
             https://www.freedesktop.org/wiki/Software/systemd/APIFileSystems
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48628)
     Memory: 4.0K (peak: 540.0K)
        CPU: 4ms
     CGroup: /sys-kernel-tracing.mount

○ tmp.mount - Temporary Directory /tmp
     Loaded: loaded (/usr/lib/systemd/system/tmp.mount; disabled; preset: disabled)
     Active: inactive (dead)
      Where: /tmp
       What: tmpfs
       Docs: https://systemd.io/TEMPORARY_DIRECTORIES
             man:file-hierarchy(7)
             https://www.freedesktop.org/wiki/Software/systemd/APIFileSystems

● var-lib-containers-storage-overlay-061e98d451dac25599ee5792e210f3d07b02fa3c960c209c5b4596ee9b5b250d-merged.mount - /var/lib/containers/storage/overlay/061e98d451dac25599ee5792e210f3d07b02fa3c960c209c5b4596ee9b5b250d/merged
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Tue 2025-12-02 10:49:54 UTC; 49min ago
      Until: Tue 2025-12-02 10:49:54 UTC; 49min ago
      Where: /var/lib/containers/storage/overlay/061e98d451dac25599ee5792e210f3d07b02fa3c960c209c5b4596ee9b5b250d/merged
       What: overlay

● var-lib-containers-storage-overlay-1cc466066dc353ec43e3bad50d277240780b03b6e2dad9a5065a292db669592d-merged.mount - /var/lib/containers/storage/overlay/1cc466066dc353ec43e3bad50d277240780b03b6e2dad9a5065a292db669592d/merged
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Tue 2025-12-02 10:50:28 UTC; 48min ago
      Until: Tue 2025-12-02 10:50:28 UTC; 48min ago
      Where: /var/lib/containers/storage/overlay/1cc466066dc353ec43e3bad50d277240780b03b6e2dad9a5065a292db669592d/merged
       What: overlay

● var-lib-containers-storage-overlay-1ee4d307ff59af50425527f7f4b0b792737ab73ee7d0267a7648552c7a3177b9-merged.mount - /var/lib/containers/storage/overlay/1ee4d307ff59af50425527f7f4b0b792737ab73ee7d0267a7648552c7a3177b9/merged
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Tue 2025-12-02 10:50:23 UTC; 48min ago
      Until: Tue 2025-12-02 10:50:23 UTC; 48min ago
      Where: /var/lib/containers/storage/overlay/1ee4d307ff59af50425527f7f4b0b792737ab73ee7d0267a7648552c7a3177b9/merged
       What: overlay

● var-lib-containers-storage-overlay-31b9657d89fcfd28213277198afea8a63e49510566fd20ad37c0b0e7e17bf716-merged.mount - /var/lib/containers/storage/overlay/31b9657d89fcfd28213277198afea8a63e49510566fd20ad37c0b0e7e17bf716/merged
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Tue 2025-12-02 10:51:48 UTC; 47min ago
      Until: Tue 2025-12-02 10:51:48 UTC; 47min ago
      Where: /var/lib/containers/storage/overlay/31b9657d89fcfd28213277198afea8a63e49510566fd20ad37c0b0e7e17bf716/merged
       What: overlay

● var-lib-containers-storage-overlay-4fb26ad42649584d932fbce8ec5889bfb52368abb708e3f7b7feb373072c4fc1-merged.mount - /var/lib/containers/storage/overlay/4fb26ad42649584d932fbce8ec5889bfb52368abb708e3f7b7feb373072c4fc1/merged
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Tue 2025-12-02 11:07:36 UTC; 31min ago
      Until: Tue 2025-12-02 11:07:36 UTC; 31min ago
      Where: /var/lib/containers/storage/overlay/4fb26ad42649584d932fbce8ec5889bfb52368abb708e3f7b7feb373072c4fc1/merged
       What: overlay

● var-lib-containers-storage-overlay-79ca6d6ef7c22e8d30558ba37567215a2a297d0db6498b67805df7088a24006f-merged.mount - /var/lib/containers/storage/overlay/79ca6d6ef7c22e8d30558ba37567215a2a297d0db6498b67805df7088a24006f/merged
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Tue 2025-12-02 10:50:34 UTC; 48min ago
      Until: Tue 2025-12-02 10:50:34 UTC; 48min ago
      Where: /var/lib/containers/storage/overlay/79ca6d6ef7c22e8d30558ba37567215a2a297d0db6498b67805df7088a24006f/merged
       What: overlay

● var-lib-containers-storage-overlay-86b1726cb1d10deee462ab1a56f93e2eb0b73920ca53ea4fe2c8c15b9ddeb76c-merged.mount - /var/lib/containers/storage/overlay/86b1726cb1d10deee462ab1a56f93e2eb0b73920ca53ea4fe2c8c15b9ddeb76c/merged
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Tue 2025-12-02 11:00:08 UTC; 39min ago
      Until: Tue 2025-12-02 11:00:08 UTC; 39min ago
      Where: /var/lib/containers/storage/overlay/86b1726cb1d10deee462ab1a56f93e2eb0b73920ca53ea4fe2c8c15b9ddeb76c/merged
       What: overlay

● var-lib-containers-storage-overlay-b75b80620e97dc1d3fadb000494dd900f76bef2bb2173902422153c10d2f627c-merged.mount - /var/lib/containers/storage/overlay/b75b80620e97dc1d3fadb000494dd900f76bef2bb2173902422153c10d2f627c/merged
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Tue 2025-12-02 11:01:17 UTC; 37min ago
      Until: Tue 2025-12-02 11:01:17 UTC; 37min ago
      Where: /var/lib/containers/storage/overlay/b75b80620e97dc1d3fadb000494dd900f76bef2bb2173902422153c10d2f627c/merged
       What: overlay

● var-lib-containers-storage-overlay-b9bb8973f1dd8a5b5993a5ce1e212a1446bde079af117b95eabd62e864b7b797-merged.mount - /var/lib/containers/storage/overlay/b9bb8973f1dd8a5b5993a5ce1e212a1446bde079af117b95eabd62e864b7b797/merged
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Tue 2025-12-02 10:51:52 UTC; 47min ago
      Until: Tue 2025-12-02 10:51:52 UTC; 47min ago
      Where: /var/lib/containers/storage/overlay/b9bb8973f1dd8a5b5993a5ce1e212a1446bde079af117b95eabd62e864b7b797/merged
       What: overlay

● var-lib-containers-storage-overlay-e858e10f33e19e7fcdf7f29f1f633b3ebf4b082c09b6e65db48d426cac3af8ff-merged.mount - /var/lib/containers/storage/overlay/e858e10f33e19e7fcdf7f29f1f633b3ebf4b082c09b6e65db48d426cac3af8ff/merged
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Tue 2025-12-02 10:48:39 UTC; 50min ago
      Until: Tue 2025-12-02 10:48:39 UTC; 50min ago
      Where: /var/lib/containers/storage/overlay/e858e10f33e19e7fcdf7f29f1f633b3ebf4b082c09b6e65db48d426cac3af8ff/merged
       What: overlay

● var-lib-containers-storage-overlay-eccb66f16a26faf4a5e1787ba91edb6752a8a5ed028e8bbe7b08e51fb99492b6-merged.mount - /var/lib/containers/storage/overlay/eccb66f16a26faf4a5e1787ba91edb6752a8a5ed028e8bbe7b08e51fb99492b6/merged
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Tue 2025-12-02 10:48:41 UTC; 50min ago
      Until: Tue 2025-12-02 10:48:41 UTC; 50min ago
      Where: /var/lib/containers/storage/overlay/eccb66f16a26faf4a5e1787ba91edb6752a8a5ed028e8bbe7b08e51fb99492b6/merged
       What: overlay

● var-lib-containers-storage-overlay-f10da5fa7620e3f9c9d84b6427da33e91b9d0c0662d4a06a6e927aec3f6ee065-merged.mount - /var/lib/containers/storage/overlay/f10da5fa7620e3f9c9d84b6427da33e91b9d0c0662d4a06a6e927aec3f6ee065/merged
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Tue 2025-12-02 11:09:25 UTC; 29min ago
      Until: Tue 2025-12-02 11:09:25 UTC; 29min ago
      Where: /var/lib/containers/storage/overlay/f10da5fa7620e3f9c9d84b6427da33e91b9d0c0662d4a06a6e927aec3f6ee065/merged
       What: overlay

● var-lib-containers-storage-overlay.mount - /var/lib/containers/storage/overlay
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Tue 2025-12-02 10:48:39 UTC; 50min ago
      Until: Tue 2025-12-02 10:48:39 UTC; 50min ago
      Where: /var/lib/containers/storage/overlay
       What: /dev/vda1

● var-lib-containers-storage-overlay\x2dcontainers-130400eaf961ceaaa203e2cc0a5af0fe03396f0667ec510c1291b1ee03bff193-userdata-shm.mount - /var/lib/containers/storage/overlay-containers/130400eaf961ceaaa203e2cc0a5af0fe03396f0667ec510c1291b1ee03bff193/userdata/shm
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Tue 2025-12-02 11:07:36 UTC; 31min ago
      Until: Tue 2025-12-02 11:07:36 UTC; 31min ago
      Where: /var/lib/containers/storage/overlay-containers/130400eaf961ceaaa203e2cc0a5af0fe03396f0667ec510c1291b1ee03bff193/userdata/shm
       What: shm

● var-lib-containers-storage-overlay\x2dcontainers-301660b5961629ac564857138dcba46d0947a2a1c7d3debbb9f5976c1df04193-userdata-shm.mount - /var/lib/containers/storage/overlay-containers/301660b5961629ac564857138dcba46d0947a2a1c7d3debbb9f5976c1df04193/userdata/shm
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Tue 2025-12-02 11:01:17 UTC; 37min ago
      Until: Tue 2025-12-02 11:01:17 UTC; 37min ago
      Where: /var/lib/containers/storage/overlay-containers/301660b5961629ac564857138dcba46d0947a2a1c7d3debbb9f5976c1df04193/userdata/shm
       What: shm

● var-lib-containers-storage-overlay\x2dcontainers-5c31229430d6adbfb6e358463fdefc47061e3db1d274ed40ca82e25890f29998-userdata-shm.mount - /var/lib/containers/storage/overlay-containers/5c31229430d6adbfb6e358463fdefc47061e3db1d274ed40ca82e25890f29998/userdata/shm
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Tue 2025-12-02 11:00:08 UTC; 39min ago
      Until: Tue 2025-12-02 11:00:08 UTC; 39min ago
      Where: /var/lib/containers/storage/overlay-containers/5c31229430d6adbfb6e358463fdefc47061e3db1d274ed40ca82e25890f29998/userdata/shm
       What: shm

● var-lib-containers-storage-overlay\x2dcontainers-5fbc74cebe070d8cc77fb5ba95cab60fe5a6a788996a0004958af316ccf471ad-userdata-shm.mount - /var/lib/containers/storage/overlay-containers/5fbc74cebe070d8cc77fb5ba95cab60fe5a6a788996a0004958af316ccf471ad/userdata/shm
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Tue 2025-12-02 11:09:25 UTC; 29min ago
      Until: Tue 2025-12-02 11:09:25 UTC; 29min ago
      Where: /var/lib/containers/storage/overlay-containers/5fbc74cebe070d8cc77fb5ba95cab60fe5a6a788996a0004958af316ccf471ad/userdata/shm
       What: shm

○ var-lib-machines.mount - Virtual Machine and Container Storage (Compatibility)
     Loaded: loaded (/usr/lib/systemd/system/var-lib-machines.mount; static)
     Active: inactive (dead)
  Condition: start condition failed at Tue 2025-12-02 11:05:26 UTC; 33min ago
      Where: /var/lib/machines
       What: /var/lib/machines.raw

Dec 02 11:05:26 compute-0 systemd[1]: Virtual Machine and Container Storage (Compatibility) was skipped because of an unmet condition check (ConditionPathExists=/var/lib/machines.raw).

● var-lib-nfs-rpc_pipefs.mount - RPC Pipe File System
     Loaded: loaded (/proc/self/mountinfo; static)
     Active: active (mounted) since Tue 2025-12-02 10:03:31 UTC; 1h 35min ago
      Until: Tue 2025-12-02 10:03:31 UTC; 1h 35min ago
      Where: /var/lib/nfs/rpc_pipefs
       What: rpc_pipefs

● systemd-ask-password-console.path - Dispatch Password Requests to Console Directory Watch
     Loaded: loaded (/usr/lib/systemd/system/systemd-ask-password-console.path; static)
     Active: active (waiting) since Tue 2025-12-02 10:03:31 UTC; 1h 35min ago
      Until: Tue 2025-12-02 10:03:31 UTC; 1h 35min ago
   Triggers: ● systemd-ask-password-console.service
       Docs: man:systemd-ask-password-console.path(8)

● systemd-ask-password-wall.path - Forward Password Requests to Wall Directory Watch
     Loaded: loaded (/usr/lib/systemd/system/systemd-ask-password-wall.path; static)
     Active: active (waiting) since Tue 2025-12-02 10:03:31 UTC; 1h 35min ago
      Until: Tue 2025-12-02 10:03:31 UTC; 1h 35min ago
   Triggers: ● systemd-ask-password-wall.service
       Docs: man:systemd-ask-password-wall.path(8)

● init.scope - System and Service Manager
     Loaded: loaded
  Transient: yes
     Active: active (running) since Tue 2025-12-02 10:03:29 UTC; 1h 35min ago
       Docs: man:systemd(1)
         IO: 3.3M read, 0B written
      Tasks: 1 (limit: 48628)
     Memory: 50.2M (peak: 68.6M)
        CPU: 1min 20.516s
     CGroup: /init.scope
             └─1 /usr/lib/systemd/systemd --switched-root --system --deserialize 31

Dec 02 11:38:16 compute-0 systemd[1]: libpod-conmon-cb488b55b1b61f85d1a62219d00cc34dc0b80dea28af3b9bbdc72d51097e7239.scope: Deactivated successfully.
Dec 02 11:38:16 compute-0 systemd[1]: Started libpod-conmon-c00ce674ac8e3776d6d6b71f1a07e4148b4ecc2421b1cc3c193f03c4617a4dc2.scope.
Dec 02 11:38:16 compute-0 systemd[1]: Started libcrun container.
Dec 02 11:38:18 compute-0 systemd[1]: libpod-c00ce674ac8e3776d6d6b71f1a07e4148b4ecc2421b1cc3c193f03c4617a4dc2.scope: Deactivated successfully.
Dec 02 11:38:18 compute-0 systemd[1]: libpod-c00ce674ac8e3776d6d6b71f1a07e4148b4ecc2421b1cc3c193f03c4617a4dc2.scope: Consumed 1.188s CPU time.
Dec 02 11:38:18 compute-0 systemd[1]: var-lib-containers-storage-overlay-a1778cd07b89f3efdacb6bebb5be0e5156ee3680241594b00541e30da33de4c2-merged.mount: Deactivated successfully.
Dec 02 11:38:18 compute-0 systemd[1]: libpod-conmon-c00ce674ac8e3776d6d6b71f1a07e4148b4ecc2421b1cc3c193f03c4617a4dc2.scope: Deactivated successfully.
Dec 02 11:38:22 compute-0 systemd[1]: Started Session 52 of User zuul.
Dec 02 11:38:55 compute-0 systemd[1]: Starting Hostname Service...
Dec 02 11:38:55 compute-0 systemd[1]: Started Hostname Service.

● libpod-130400eaf961ceaaa203e2cc0a5af0fe03396f0667ec510c1291b1ee03bff193.scope - libcrun container
     Loaded: loaded (/run/systemd/transient/libpod-130400eaf961ceaaa203e2cc0a5af0fe03396f0667ec510c1291b1ee03bff193.scope; transient)
  Transient: yes
     Active: active (running) since Tue 2025-12-02 11:07:36 UTC; 31min ago
         IO: 96.0K read, 4.0K written
      Tasks: 8 (limit: 4096)
     Memory: 19.1M (peak: 21.0M)
        CPU: 1.546s
     CGroup: /machine.slice/libpod-130400eaf961ceaaa203e2cc0a5af0fe03396f0667ec510c1291b1ee03bff193.scope
             └─container
               ├─236124 dumb-init --single-child -- kolla_start
               └─236127 /usr/sbin/multipathd -d

Dec 02 11:07:36 compute-0 systemd[1]: Started libcrun container.
Dec 02 11:07:36 compute-0 sudo[236128]:     root : PWD=/ ; USER=root ; COMMAND=/usr/local/bin/kolla_set_configs
Dec 02 11:07:36 compute-0 sudo[236128]: pam_systemd(sudo:session): Failed to connect to system bus: No such file or directory
Dec 02 11:07:36 compute-0 sudo[236128]: pam_unix(sudo:session): session opened for user root(uid=0) by (uid=0)
Dec 02 11:07:36 compute-0 sudo[236128]: pam_unix(sudo:session): session closed for user root
Dec 02 11:07:36 compute-0 sudo[236145]:     root : PWD=/ ; USER=root ; COMMAND=/usr/local/bin/kolla_copy_cacerts
Dec 02 11:07:36 compute-0 sudo[236145]: pam_systemd(sudo:session): Failed to connect to system bus: No such file or directory
Dec 02 11:07:36 compute-0 sudo[236145]: pam_unix(sudo:session): session opened for user root(uid=0) by (uid=0)
Dec 02 11:07:36 compute-0 sudo[236145]: pam_unix(sudo:session): session closed for user root

● libpod-301660b5961629ac564857138dcba46d0947a2a1c7d3debbb9f5976c1df04193.scope - libcrun container
     Loaded: loaded (/run/systemd/transient/libpod-301660b5961629ac564857138dcba46d0947a2a1c7d3debbb9f5976c1df04193.scope; transient)
  Transient: yes
    Drop-In: /run/systemd/transient/libpod-301660b5961629ac564857138dcba46d0947a2a1c7d3debbb9f5976c1df04193.scope.d
             └─dep.conf
     Active: active (running) since Tue 2025-12-02 11:01:17 UTC; 37min ago
         IO: 11.0M read, 15.1M written
      Tasks: 11 (limit: 4096)
     Memory: 433.7M (peak: 477.1M)
        CPU: 54.562s
     CGroup: /machine.slice/libpod-301660b5961629ac564857138dcba46d0947a2a1c7d3debbb9f5976c1df04193.scope
             └─container
               ├─163736 dumb-init --single-child -- kolla_start
               ├─163757 "neutron-ovn-metadata-agent (/usr/bin/python3 /usr/bin/neutron-ovn-metadata-agent)"
               ├─163893 "neutron-ovn-metadata-agent (/usr/bin/python3 /usr/bin/neutron-ovn-metadata-agent)"
               ├─164036 /usr/bin/python3 /bin/privsep-helper --config-file /etc/neutron/neutron.conf --config-dir /etc/neutron.conf.d --privsep_context neutron.privileged.namespace_cmd --privsep_sock_path /tmp/tmp4q14yauh/privsep.sock
               ├─262398 /usr/bin/python3 /bin/privsep-helper --config-file /etc/neutron/neutron.conf --config-dir /etc/neutron.conf.d --privsep_context neutron.privileged.default --privsep_sock_path /tmp/tmpti_0kgk9/privsep.sock
               └─262581 /usr/bin/python3 /bin/privsep-helper --config-file /etc/neutron/neutron.conf --config-dir /etc/neutron.conf.d --privsep_context neutron.privileged.link_cmd --privsep_sock_path /tmp/tmpokt26739/privsep.sock

Dec 02 11:35:41 compute-0 podman[297501]: 2025-12-02 11:35:41.403600117 +0000 UTC m=+0.071507511 container died 7cc664f642f0295030b3d599080d4d09126932d21480e569081457a298876213 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=neutron-haproxy-ovnmeta-28b69a92-5b45-421b-9985-afeebc6820aa, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, tcib_managed=true, io.buildah.version=1.41.3, tcib_build_tag=fa2bb8efef6782c26ea7f1675eeb36dd, maintainer=OpenStack Kubernetes Operator team, org.label-schema.schema-version=1.0, org.label-schema.build-date=20251125, org.label-schema.license=GPLv2)
Dec 02 11:35:41 compute-0 podman[297501]: 2025-12-02 11:35:41.467785387 +0000 UTC m=+0.135692761 container cleanup 7cc664f642f0295030b3d599080d4d09126932d21480e569081457a298876213 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=neutron-haproxy-ovnmeta-28b69a92-5b45-421b-9985-afeebc6820aa, org.label-schema.build-date=20251125, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, tcib_managed=true, org.label-schema.vendor=CentOS, tcib_build_tag=fa2bb8efef6782c26ea7f1675eeb36dd, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, org.label-schema.schema-version=1.0)
Dec 02 11:35:41 compute-0 podman[297532]: 2025-12-02 11:35:41.564791184 +0000 UTC m=+0.060444162 container remove 7cc664f642f0295030b3d599080d4d09126932d21480e569081457a298876213 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=neutron-haproxy-ovnmeta-28b69a92-5b45-421b-9985-afeebc6820aa, maintainer=OpenStack Kubernetes Operator team, org.label-schema.schema-version=1.0, io.buildah.version=1.41.3, org.label-schema.name=CentOS Stream 9 Base Image, tcib_build_tag=fa2bb8efef6782c26ea7f1675eeb36dd, tcib_managed=true, org.label-schema.build-date=20251125, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS)
Dec 02 11:36:00 compute-0 podman[298741]: 2025-12-02 11:36:00.206578912 +0000 UTC m=+0.058072617 container create 83ce492a8a5ae5af28223973e0a23ce20cb34ab4b8346c01ad14b0b9d4823ece (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=neutron-haproxy-ovnmeta-28b69a92-5b45-421b-9985-afeebc6820aa, io.buildah.version=1.41.3, tcib_build_tag=fa2bb8efef6782c26ea7f1675eeb36dd, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, tcib_managed=true, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251125, org.label-schema.schema-version=1.0)
Dec 02 11:36:00 compute-0 podman[298741]: 2025-12-02 11:36:00.178055832 +0000 UTC m=+0.029549567 image pull 014dc726c85414b29f2dde7b5d875685d08784761c0f0ffa8630d1583a877bf9 quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified
Dec 02 11:36:00 compute-0 podman[298741]: 2025-12-02 11:36:00.304285888 +0000 UTC m=+0.155779613 container init 83ce492a8a5ae5af28223973e0a23ce20cb34ab4b8346c01ad14b0b9d4823ece (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=neutron-haproxy-ovnmeta-28b69a92-5b45-421b-9985-afeebc6820aa, tcib_managed=true, maintainer=OpenStack Kubernetes Operator team, io.buildah.version=1.41.3, tcib_build_tag=fa2bb8efef6782c26ea7f1675eeb36dd, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, org.label-schema.build-date=20251125)
Dec 02 11:36:00 compute-0 podman[298741]: 2025-12-02 11:36:00.31180129 +0000 UTC m=+0.163295005 container start 83ce492a8a5ae5af28223973e0a23ce20cb34ab4b8346c01ad14b0b9d4823ece (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=neutron-haproxy-ovnmeta-28b69a92-5b45-421b-9985-afeebc6820aa, tcib_managed=true, maintainer=OpenStack Kubernetes Operator team, io.buildah.version=1.41.3, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, tcib_build_tag=fa2bb8efef6782c26ea7f1675eeb36dd, org.label-schema.vendor=CentOS, org.label-schema.build-date=20251125)
Dec 02 11:36:39 compute-0 podman[298949]: 2025-12-02 11:36:39.039693363 +0000 UTC m=+0.066706941 container died 83ce492a8a5ae5af28223973e0a23ce20cb34ab4b8346c01ad14b0b9d4823ece (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=neutron-haproxy-ovnmeta-28b69a92-5b45-421b-9985-afeebc6820aa, io.buildah.version=1.41.3, org.label-schema.license=GPLv2, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251125, org.label-schema.schema-version=1.0, tcib_managed=true, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, tcib_build_tag=fa2bb8efef6782c26ea7f1675eeb36dd)
Dec 02 11:36:39 compute-0 podman[298949]: 2025-12-02 11:36:39.088835298 +0000 UTC m=+0.115848866 container cleanup 83ce492a8a5ae5af28223973e0a23ce20cb34ab4b8346c01ad14b0b9d4823ece (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=neutron-haproxy-ovnmeta-28b69a92-5b45-421b-9985-afeebc6820aa, org.label-schema.vendor=CentOS, tcib_managed=true, io.buildah.version=1.41.3, org.label-schema.build-date=20251125, org.label-schema.name=CentOS Stream 9 Base Image, tcib_build_tag=fa2bb8efef6782c26ea7f1675eeb36dd, org.label-schema.license=GPLv2, maintainer=OpenStack Kubernetes Operator team, org.label-schema.schema-version=1.0)
Dec 02 11:36:39 compute-0 podman[299004]: 2025-12-02 11:36:39.166351448 +0000 UTC m=+0.050700968 container remove 83ce492a8a5ae5af28223973e0a23ce20cb34ab4b8346c01ad14b0b9d4823ece (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=neutron-haproxy-ovnmeta-28b69a92-5b45-421b-9985-afeebc6820aa, maintainer=OpenStack Kubernetes Operator team, tcib_managed=true, org.label-schema.schema-version=1.0, tcib_build_tag=fa2bb8efef6782c26ea7f1675eeb36dd, org.label-schema.build-date=20251125, io.buildah.version=1.41.3, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS)

● libpod-5c31229430d6adbfb6e358463fdefc47061e3db1d274ed40ca82e25890f29998.scope - libcrun container
     Loaded: loaded (/run/systemd/transient/libpod-5c31229430d6adbfb6e358463fdefc47061e3db1d274ed40ca82e25890f29998.scope; transient)
  Transient: yes
    Drop-In: /run/systemd/transient/libpod-5c31229430d6adbfb6e358463fdefc47061e3db1d274ed40ca82e25890f29998.scope.d
             └─dep.conf
     Active: active (running) since Tue 2025-12-02 11:00:09 UTC; 39min ago
         IO: 7.7M read, 4.0K written
      Tasks: 6 (limit: 4096)
     Memory: 19.9M (peak: 24.6M)
        CPU: 8.939s
     CGroup: /machine.slice/libpod-5c31229430d6adbfb6e358463fdefc47061e3db1d274ed40ca82e25890f29998.scope
             └─container
               ├─153855 dumb-init --single-child -- kolla_start
               └─153864 /usr/bin/ovn-controller --pidfile unix:/run/openvswitch/db.sock -p /etc/pki/tls/private/ovndb.key -c /etc/pki/tls/certs/ovndb.crt -C /etc/pki/tls/certs/ovndbca.crt

Dec 02 11:00:09 compute-0 systemd[1]: Started libcrun container.

● libpod-5fbc74cebe070d8cc77fb5ba95cab60fe5a6a788996a0004958af316ccf471ad.scope - libcrun container
     Loaded: loaded (/run/systemd/transient/libpod-5fbc74cebe070d8cc77fb5ba95cab60fe5a6a788996a0004958af316ccf471ad.scope; transient)
  Transient: yes
     Active: active (running) since Tue 2025-12-02 11:09:25 UTC; 29min ago
         IO: 41.0M read, 41.9M written
      Tasks: 29 (limit: 4096)
     Memory: 440.3M (peak: 516.7M)
        CPU: 2min 34.607s
     CGroup: /machine.slice/libpod-5fbc74cebe070d8cc77fb5ba95cab60fe5a6a788996a0004958af316ccf471ad.scope
             └─container
               ├─254902 dumb-init --single-child -- kolla_start
               ├─254904 /usr/bin/python3 /usr/bin/nova-compute
               ├─262068 /usr/bin/python3 /bin/privsep-helper --config-file /etc/nova/nova.conf --config-file /etc/nova/nova-compute.conf --config-dir /etc/nova/nova.conf.d --privsep_context vif_plug_ovs.privsep.vif_plug --privsep_sock_path /tmp/tmp1no9jew6/privsep.sock
               ├─262759 /usr/bin/python3 /bin/privsep-helper --config-file /etc/nova/nova.conf --config-file /etc/nova/nova-compute.conf --config-dir /etc/nova/nova.conf.d --privsep_context os_brick.privileged.default --privsep_sock_path /tmp/tmpoqbx9wo3/privsep.sock
               └─262878 /usr/bin/python3 /bin/privsep-helper --config-file /etc/nova/nova.conf --config-file /etc/nova/nova-compute.conf --config-dir /etc/nova/nova.conf.d --privsep_context nova.privsep.sys_admin_pctxt --privsep_sock_path /tmp/tmp8u10dppg/privsep.sock

Dec 02 11:09:25 compute-0 systemd[1]: Started libcrun container.
Dec 02 11:16:41 compute-0 systemd-coredump[262900]: Process 262880 (qemu-img) of user 0 dumped core.
                                                    
                                                    Stack trace of thread 262890:
                                                    #0  0x00007f778ae1803c __pthread_kill_implementation (libc.so.6 + 0x8d03c)
                                                    #1  0x00007f778adcab86 raise (libc.so.6 + 0x3fb86)
                                                    #2  0x00007f778adb4873 abort (libc.so.6 + 0x29873)
                                                    #3  0x0000557cd48e85df ___interceptor_pthread_create (qemu-img + 0x4f5df)
                                                    #4  0x00007f7783928ff4 _ZN6Thread10try_createEm (libceph-common.so.2 + 0x258ff4)
                                                    #5  0x00007f778392b6ae _ZN6Thread6createEPKcm (libceph-common.so.2 + 0x25b6ae)
                                                    #6  0x00007f7788ef826b _ZNSt8_Rb_treeISt4pairINSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEESt10type_indexES0_IKS8_N4ceph12immobile_anyILm576EEEESt10_Select1stISD_ENSA_6common11CephContext19associated_objs_cmpESaISD_EE22_M_emplace_hint_uniqueIJRKSt21piecewise_construct_tSt5tupleIJRSt17basic_string_viewIcS4_ERS7_EESP_IJRKSt15in_place_type_tIN6librbd21TaskFinisherSingletonEERPSH_EEEEESt17_Rb_tree_iteratorISD_ESt23_Rb_tree_const_iteratorISD_EDpOT_.constprop.0 (librbd.so.1 + 0x51126b)
                                                    #7  0x00007f7788b257a6 _ZN6librbd8ImageCtx4initEv (librbd.so.1 + 0x13e7a6)
                                                    #8  0x00007f7788bff2d3 _ZN6librbd5image11OpenRequestINS_8ImageCtxEE12send_refreshEv (librbd.so.1 + 0x2182d3)
                                                    #9  0x00007f7788bfff46 _ZN6librbd5image11OpenRequestINS_8ImageCtxEE23handle_v2_get_data_poolEPi (librbd.so.1 + 0x218f46)
                                                    #10 0x00007f7788c002a7 _ZN6librbd4util6detail20rados_state_callbackINS_5image11OpenRequestINS_8ImageCtxEEEXadL_ZNS6_23handle_v2_get_data_poolEPiEELb1EEEvPvS8_ (librbd.so.1 + 0x2192a7)
                                                    #11 0x00007f77888fe0ac _ZN5boost4asio6detail18completion_handlerINS1_7binder0IN8librados14CB_AioCompleteEEENS0_10io_context19basic_executor_typeISaIvELm0EEEE11do_completeEPvPNS1_19scheduler_operationERKNS_6system10error_codeEm (librados.so.2 + 0xad0ac)
                                                    #12 0x00007f77888fd585 _ZN5boost4asio6detail14strand_service11do_completeEPvPNS1_19scheduler_operationERKNS_6system10error_codeEm (librados.so.2 + 0xac585)
                                                    #13 0x00007f7788978498 _ZN5boost4asio6detail9scheduler3runERNS_6system10error_codeE.constprop.0.isra.0 (librados.so.2 + 0x127498)
                                                    #14 0x00007f77889174e4 _ZNSt6thread11_State_implINS_8_InvokerISt5tupleIJZ17make_named_threadIZN4ceph5async15io_context_pool5startEsEUlvE_JEES_St17basic_string_viewIcSt11char_traitsIcEEOT_DpOT0_EUlSD_SG_E_S7_EEEEE6_M_runEv (librados.so.2 + 0xc64e4)
                                                    #15 0x00007f7783582ae4 execute_native_thread_routine (libstdc++.so.6 + 0xdbae4)
                                                    #16 0x00007f778ae162fa start_thread (libc.so.6 + 0x8b2fa)
                                                    #17 0x00007f778ae9b400 __clone3 (libc.so.6 + 0x110400)
                                                    
                                                    Stack trace of thread 262880:
                                                    #0  0x00007f778ae1338a __futex_abstimed_wait_common (libc.so.6 + 0x8838a)
                                                    #1  0x00007f778ae158e2 pthread_cond_wait@@GLIBC_2.3.2 (libc.so.6 + 0x8a8e2)
                                                    #2  0x00007f778357c6c0 _ZNSt18condition_variable4waitERSt11unique_lockISt5mutexE (libstdc++.so.6 + 0xd56c0)
                                                    #3  0x00007f7788b2ceb3 _ZN6librbd10ImageStateINS_8ImageCtxEE4openEm (librbd.so.1 + 0x145eb3)
                                                    #4  0x00007f7788afcfcb rbd_open (librbd.so.1 + 0x115fcb)
                                                    #5  0x00007f77890a789d qemu_rbd_open (block-rbd.so + 0x489d)
                                                    #6  0x0000557cd48f925c bdrv_open_driver.llvm.1535778247189356743 (qemu-img + 0x6025c)
                                                    #7  0x0000557cd48fe4b7 bdrv_open_inherit.llvm.1535778247189356743 (qemu-img + 0x654b7)
                                                    #8  0x0000557cd490bde1 bdrv_open_child_bs.llvm.1535778247189356743 (qemu-img + 0x72de1)
                                                    #9  0x0000557cd48fdc36 bdrv_open_inherit.llvm.1535778247189356743 (qemu-img + 0x64c36)
                                                    #10 0x0000557cd492d4b3 blk_new_open (qemu-img + 0x944b3)
                                                    #11 0x0000557cd49ed516 img_open_file (qemu-img + 0x154516)
                                                    #12 0x0000557cd49ed0c0 img_open (qemu-img + 0x1540c0)
                                                    #13 0x0000557cd49e903b img_info (qemu-img + 0x15003b)
                                                    #14 0x0000557cd49e26ca main (qemu-img + 0x1496ca)
                                                    #15 0x00007f778adb5610 __libc_start_call_main (libc.so.6 + 0x2a610)
                                                    #16 0x00007f778adb56c0 __libc_start_main@@GLIBC_2.34 (libc.so.6 + 0x2a6c0)
                                                    #17 0x0000557cd48e8285 _start (qemu-img + 0x4f285)
                                                    
                                                    Stack trace of thread 262882:
                                                    #0  0x00007f778ae1338a __futex_abstimed_wait_common (libc.so.6 + 0x8838a)
                                                    #1  0x00007f778ae158e2 pthread_cond_wait@@GLIBC_2.3.2 (libc.so.6 + 0x8a8e2)
                                                    #2  0x00007f778357c6c0 _ZNSt18condition_variable4waitERSt11unique_lockISt5mutexE (libstdc++.so.6 + 0xd56c0)
                                                    #3  0x00007f7783b3b0a2 _ZN4ceph7logging3Log5entryEv (libceph-common.so.2 + 0x46b0a2)
                                                    #4  0x00007f778ae162fa start_thread (libc.so.6 + 0x8b2fa)
                                                    #5  0x00007f778ae9b400 __clone3 (libc.so.6 + 0x110400)
                                                    
                                                    Stack trace of thread 262898:
                                                    #0  0x00007f778ae242a2 sysmalloc (libc.so.6 + 0x992a2)
                                                    #1  0x00007f778ae24e67 _int_malloc (libc.so.6 + 0x99e67)
                                                    #2  0x00007f778ae25821 tcache_init.part.0 (libc.so.6 + 0x9a821)
                                                    #3  0x00007f778ae25f7e __libc_malloc (libc.so.6 + 0x9af7e)
                                                    #4  0x00007f778b521e7e malloc (ld-linux-x86-64.so.2 + 0x12e7e)
                                                    #5  0x00007f778b525eec __tls_get_addr (ld-linux-x86-64.so.2 + 0x16eec)
                                                    #6  0x00007f778396c5a4 ceph_pthread_setname (libceph-common.so.2 + 0x29c5a4)
                                                    #7  0x00007f7783928f38 _ZN6Thread13entry_wrapperEv (libceph-common.so.2 + 0x258f38)
                                                    #8  0x00007f778ae162fa start_thread (libc.so.6 + 0x8b2fa)
                                                    #9  0x00007f778ae9b400 __clone3 (libc.so.6 + 0x110400)
                                                    
                                                    Stack trace of thread 262881:
                                                    #0  0x00007f778ae9382d syscall (libc.so.6 + 0x10882d)
                                                    #1  0x0000557cd4a73193 qemu_event_wait (qemu-img + 0x1da193)
                                                    #2  0x0000557cd4a7e2e7 call_rcu_thread (qemu-img + 0x1e52e7)
                                                    #3  0x0000557cd4a712aa qemu_thread_start.llvm.12875871551448449403 (qemu-img + 0x1d82aa)
                                                    #4  0x00007f778ae162fa start_thread (libc.so.6 + 0x8b2fa)
                                                    #5  0x00007f778ae9b400 __clone3 (libc.so.6 + 0x110400)
                                                    
                                                    Stack trace of thread 262897:
                                                    #0  0x00007f778ae1338a __futex_abstimed_wait_common (libc.so.6 + 0x8838a)
                                                    #1  0x00007f778ae158e2 pthread_cond_wait@@GLIBC_2.3.2 (libc.so.6 + 0x8a8e2)
                                                    #2  0x00007f778357c6c0 _ZNSt18condition_variable4waitERSt11unique_lockISt5mutexE (libstdc++.so.6 + 0xd56c0)
                                                    #3  0x00007f778392e7f8 _ZN15CommonSafeTimerISt5mutexE12timer_threadEv (libceph-common.so.2 + 0x25e7f8)
                                                    #4  0x00007f778392ef81 _ZN21CommonSafeTimerThreadISt5mutexE5entryEv (libceph-common.so.2 + 0x25ef81)
                                                    #5  0x00007f778ae162fa start_thread (libc.so.6 + 0x8b2fa)
                                                    #6  0x00007f778ae9b400 __clone3 (libc.so.6 + 0x110400)
                                                    
                                                    Stack trace of thread 262883:
                                                    #0  0x00007f778ae9aa3e epoll_wait (libc.so.6 + 0x10fa3e)
                                                    #1  0x00007f7783b10618 _ZN11EpollDriver10event_waitERSt6vectorI14FiredFileEventSaIS1_EEP7timeval (libceph-common.so.2 + 0x440618)
                                                    #2  0x00007f7783b0e702 _ZN11EventCenter14process_eventsEjPNSt6chrono8durationImSt5ratioILl1ELl1000000000EEEE (libceph-common.so.2 + 0x43e702)
                                                    #3  0x00007f7783b0f2c6 _ZNSt17_Function_handlerIFvvEZN12NetworkStack10add_threadEP6WorkerEUlvE_E9_M_invokeERKSt9_Any_data (libceph-common.so.2 + 0x43f2c6)
                                                    #4  0x00007f7783582ae4 execute_native_thread_routine (libstdc++.so.6 + 0xdbae4)
                                                    #5  0x00007f778ae162fa start_thread (libc.so.6 + 0x8b2fa)
                                                    #6  0x00007f778ae9b400 __clone3 (libc.so.6 + 0x110400)
                                                    
                                                    Stack trace of thread 262892:
                                                    #0  0x00007f778ae1338a __futex_abstimed_wait_common (libc.so.6 + 0x8838a)
                                                    #1  0x00007f778ae15cc0 pthread_cond_clockwait@GLIBC_2.30 (libc.so.6 + 0x8acc0)
                                                    #2  0x00007f7788950364 _ZN4ceph5timerINS_17coarse_mono_clockEE12timer_threadEv (librados.so.2 + 0xff364)
                                                    #3  0x00007f7783582ae4 execute_native_thread_routine (libstdc++.so.6 + 0xdbae4)
                                                    #4  0x00007f778ae162fa start_thread (libc.so.6 + 0x8b2fa)
                                                    #5  0x00007f778ae9b400 __clone3 (libc.so.6 + 0x110400)
                                                    
                                                    Stack trace of thread 262889:
                                                    #0  0x00007f778ae1338a __futex_abstimed_wait_common (libc.so.6 + 0x8838a)
                                                    #1  0x00007f778ae15cc0 pthread_cond_clockwait@GLIBC_2.30 (libc.so.6 + 0x8acc0)
                                                    #2  0x00007f7783949150 _ZN4ceph6common24CephContextServiceThread5entryEv (libceph-common.so.2 + 0x279150)
                                                    #3  0x00007f778ae162fa start_thread (libc.so.6 + 0x8b2fa)
                                                    #4  0x00007f778ae9b400 __clone3 (libc.so.6 + 0x110400)
                                                    
                                                    Stack trace of thread 262891:
                                                    #0  0x00007f778ae1338a __futex_abstimed_wait_common (libc.so.6 + 0x8838a)
                                                    #1  0x00007f778ae158e2 pthread_cond_wait@@GLIBC_2.3.2 (libc.so.6 + 0x8a8e2)
                                                    #2  0x00007f7788978266 _ZN5boost4asio6detail9scheduler3runERNS_6system10error_codeE.constprop.0.isra.0 (librados.so.2 + 0x127266)
                                                    #3  0x00007f77889174e4 _ZNSt6thread11_State_implINS_8_InvokerISt5tupleIJZ17make_named_threadIZN4ceph5async15io_context_pool5startEsEUlvE_JEES_St17basic_string_viewIcSt11char_traitsIcEEOT_DpOT0_EUlSD_SG_E_S7_EEEEE6_M_runEv (librados.so.2 + 0xc64e4)
                                                    #4  0x00007f7783582ae4 execute_native_thread_routine (libstdc++.so.6 + 0xdbae4)
                                                    #5  0x00007f778ae162fa start_thread (libc.so.6 + 0x8b2fa)
                                                    #6  0x00007f778ae9b400 __clone3 (libc.so.6 + 0x110400)
                                                    
                                                    Stack trace of thread 262896:
                                                    #0  0x00007f778ae1338a __futex_abstimed_wait_common (libc.so.6 + 0x8838a)
                                                    #1  0x00007f778ae158e2 pthread_cond_wait@@GLIBC_2.3.2 (libc.so.6 + 0x8a8e2)
                                                    #2  0x00007f778357c6c0 _ZNSt18condition_variable4waitERSt11unique_lockISt5mutexE (libstdc++.so.6 + 0xd56c0)
                                                    #3  0x00007f778392e7f8 _ZN15CommonSafeTimerISt5mutexE12timer_threadEv (libceph-common.so.2 + 0x25e7f8)
                                                    #4  0x00007f778392ef81 _ZN21CommonSafeTimerThreadISt5mutexE5entryEv (libceph-common.so.2 + 0x25ef81)
                                                    #5  0x00007f778ae162fa start_thread (libc.so.6 + 0x8b2fa)
                                                    #6  0x00007f778ae9b400 __clone3 (libc.so.6 + 0x110400)
                                                    
                                                    Stack trace of thread 262893:
                                                    #0  0x00007f778ae1338a __futex_abstimed_wait_common (libc.so.6 + 0x8838a)
                                                    #1  0x00007f778ae158e2 pthread_cond_wait@@GLIBC_2.3.2 (libc.so.6 + 0x8a8e2)
                                                    #2  0x00007f778357c6c0 _ZNSt18condition_variable4waitERSt11unique_lockISt5mutexE (libstdc++.so.6 + 0xd56c0)
                                                    #3  0x00007f7783a3749f _ZN13DispatchQueue5entryEv (libceph-common.so.2 + 0x36749f)
                                                    #4  0x00007f7783ac8411 _ZN13DispatchQueue14DispatchThread5entryEv (libceph-common.so.2 + 0x3f8411)
                                                    #5  0x00007f778ae162fa start_thread (libc.so.6 + 0x8b2fa)
                                                    #6  0x00007f778ae9b400 __clone3 (libc.so.6 + 0x110400)
                                                    
                                                    Stack trace of thread 262885:
                                                    #0  0x00007f778ae9aa3e epoll_wait (libc.so.6 + 0x10fa3e)
                                                    #1  0x00007f7783b10618 _ZN11EpollDriver10event_waitERSt6vectorI14FiredFileEventSaIS1_EEP7timeval (libceph-common.so.2 + 0x440618)
                                                    #2  0x00007f7783b0e702 _ZN11EventCenter14process_eventsEjPNSt6chrono8durationImSt5ratioILl1ELl1000000000EEEE (libceph-common.so.2 + 0x43e702)
                                                    #3  0x00007f7783b0f2c6 _ZNSt17_Function_handlerIFvvEZN12NetworkStack10add_threadEP6WorkerEUlvE_E9_M_invokeERKSt9_Any_data (libceph-common.so.2 + 0x43f2c6)
                                                    #4  0x00007f7783582ae4 execute_native_thread_routine (libstdc++.so.6 + 0xdbae4)
                                                    #5  0x00007f778ae162fa start_thread (libc.so.6 + 0x8b2fa)
                                                    #6  0x00007f778ae9b400 __clone3 (libc.so.6 + 0x110400)
                                                    
                                                    Stack trace of thread 262894:
                                                    #0  0x00007f778ae1338a __futex_abstimed_wait_common (libc.so.6 + 0x8838a)
                                                    #1  0x00007f778ae158e2 pthread_cond_wait@@GLIBC_2.3.2 (libc.so.6 + 0x8a8e2)
                                                    #2  0x00007f778357c6c0 _ZNSt18condition_variable4waitERSt11unique_lockISt5mutexE (libstdc++.so.6 + 0xd56c0)
                                                    #3  0x00007f7783a370b9 _ZN13DispatchQueue18run_local_deliveryEv (libceph-common.so.2 + 0x3670b9)
                                                    #4  0x00007f7783ac8431 _ZN13DispatchQueue19LocalDeliveryThread5entryEv (libceph-common.so.2 + 0x3f8431)
                                                    #5  0x00007f778ae162fa start_thread (libc.so.6 + 0x8b2fa)
                                                    #6  0x00007f778ae9b400 __clone3 (libc.so.6 + 0x110400)
                                                    
                                                    Stack trace of thread 262895:
                                                    #0  0x00007f778ae1338a __futex_abstimed_wait_common (libc.so.6 + 0x8838a)
                                                    #1  0x00007f778ae15cc0 pthread_cond_clockwait@GLIBC_2.30 (libc.so.6 + 0x8acc0)
                                                    #2  0x00007f778392eb23 _ZN15CommonSafeTimerISt5mutexE12timer_threadEv (libceph-common.so.2 + 0x25eb23)
                                                    #3  0x00007f778392ef81 _ZN21CommonSafeTimerThreadISt5mutexE5entryEv (libceph-common.so.2 + 0x25ef81)
                                                    #4  0x00007f778ae162fa start_thread (libc.so.6 + 0x8b2fa)
                                                    #5  0x00007f778ae9b400 __clone3 (libc.so.6 + 0x110400)
                                                    
                                                    Stack trace of thread 262884:
                                                    #0  0x00007f778ae9aa3e epoll_wait (libc.so.6 + 0x10fa3e)
                                                    #1  0x00007f7783b10618 _ZN11EpollDriver10event_waitERSt6vectorI14FiredFileEventSaIS1_EEP7timeval (libceph-common.so.2 + 0x440618)
                                                    #2  0x00007f7783b0e702 _ZN11EventCenter14process_eventsEjPNSt6chrono8durationImSt5ratioILl1ELl1000000000EEEE (libceph-common.so.2 + 0x43e702)
                                                    #3  0x00007f7783b0f2c6 _ZNSt17_Function_handlerIFvvEZN12NetworkStack10add_threadEP6WorkerEUlvE_E9_M_invokeERKSt9_Any_data (libceph-common.so.2 + 0x43f2c6)
                                                    #4  0x00007f7783582ae4 execute_native_thread_routine (libstdc++.so.6 + 0xdbae4)
                                                    #5  0x00007f778ae162fa start_thread (libc.so.6 + 0x8b2fa)
                                                    #6  0x00007f778ae9b400 __clone3 (libc.so.6 + 0x110400)
                                                    ELF object binary architecture: AMD x86-64

● session-1.scope - Session 1 of User zuul
     Loaded: loaded (/run/systemd/transient/session-1.scope; transient)
  Transient: yes
     Active: active (abandoned) since Tue 2025-12-02 10:04:12 UTC; 1h 35min ago
         IO: 0B read, 0B written
      Tasks: 1
     Memory: 17.1M (peak: 39.2M)
        CPU: 1min 8.796s
     CGroup: /user.slice/user-1000.slice/session-1.scope
             └─4520 /usr/bin/python3

Dec 02 10:05:59 np0005542249.novalocal python3[7141]: ansible-ansible.legacy.copy Invoked with src=/home/zuul/.ansible/tmp/ansible-tmp-1764669958.5467687-102-127106510555169/source dest=/etc/NetworkManager/system-connections/ci-private-network.nmconnection mode=0600 owner=root group=root follow=False _original_basename=bootstrap-ci-network-nm-connection.nmconnection.j2 checksum=77fa4f5893f92e45970fb0039bd711512da7362c backup=False force=True unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None remote_src=None local_follow=None seuser=None serole=None selevel=None setype=None attributes=None
Dec 02 10:05:59 np0005542249.novalocal sudo[7139]: pam_unix(sudo:session): session closed for user root
Dec 02 10:05:59 np0005542249.novalocal sudo[7189]:     zuul : PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-dxuhlhpydyqcdmsromdebsupgobabzsc ; OS_CLOUD=vexxhost /usr/bin/python3'
Dec 02 10:05:59 np0005542249.novalocal sudo[7189]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Dec 02 10:05:59 np0005542249.novalocal python3[7191]: ansible-ansible.builtin.systemd Invoked with name=NetworkManager state=restarted daemon_reload=False daemon_reexec=False scope=system no_block=False enabled=None force=None masked=None
Dec 02 10:06:00 np0005542249.novalocal sudo[7189]: pam_unix(sudo:session): session closed for user root
Dec 02 10:06:00 np0005542249.novalocal python3[7275]: ansible-ansible.legacy.command Invoked with _raw_params=ip route zuul_log_id=fa163e3b-3c83-127d-a3b6-0000000000a7-0-controller zuul_ansible_split_streams=False _uses_shell=False warn=False stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None
Dec 02 10:07:00 np0005542249.novalocal sshd-session[4319]: Received disconnect from 38.102.83.114 port 57578:11: disconnected by user
Dec 02 10:07:00 np0005542249.novalocal sshd-session[4319]: Disconnected from user zuul 38.102.83.114 port 57578
Dec 02 10:07:00 np0005542249.novalocal sshd-session[4306]: pam_unix(sshd:session): session closed for user zuul

● session-21.scope - Session 21 of User ceph-admin
     Loaded: loaded (/run/systemd/transient/session-21.scope; transient)
  Transient: yes
     Active: active (running) since Tue 2025-12-02 10:49:32 UTC; 49min ago
         IO: 0B read, 0B written
      Tasks: 2
     Memory: 1.1M (peak: 1.3M)
        CPU: 117ms
     CGroup: /user.slice/user-42477.slice/session-21.scope
             ├─76630 "sshd-session: ceph-admin [priv]"
             └─76652 "sshd-session: ceph-admin"

Dec 02 10:49:32 compute-0 systemd[1]: Started Session 21 of User ceph-admin.

● session-23.scope - Session 23 of User ceph-admin
     Loaded: loaded (/run/systemd/transient/session-23.scope; transient)
  Transient: yes
     Active: active (running) since Tue 2025-12-02 10:49:32 UTC; 49min ago
         IO: 0B read, 0B written
      Tasks: 2
     Memory: 1.1M (peak: 4.1M)
        CPU: 259ms
     CGroup: /user.slice/user-42477.slice/session-23.scope
             ├─76647 "sshd-session: ceph-admin [priv]"
             └─76653 "sshd-session: ceph-admin@notty"

Dec 02 10:49:32 compute-0 systemd[1]: Started Session 23 of User ceph-admin.
Dec 02 10:49:32 compute-0 sudo[76654]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Dec 02 10:49:32 compute-0 sudo[76654]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Dec 02 10:49:32 compute-0 sudo[76654]: pam_unix(sudo:session): session closed for user root
Dec 02 10:49:32 compute-0 sudo[76679]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Dec 02 10:49:32 compute-0 sudo[76679]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Dec 02 10:49:32 compute-0 sudo[76679]: pam_unix(sudo:session): session closed for user root

● session-24.scope - Session 24 of User ceph-admin
     Loaded: loaded (/run/systemd/transient/session-24.scope; transient)
  Transient: yes
     Active: active (running) since Tue 2025-12-02 10:49:33 UTC; 49min ago
         IO: 0B read, 0B written
      Tasks: 2
     Memory: 1.1M (peak: 5.9M)
        CPU: 273ms
     CGroup: /user.slice/user-42477.slice/session-24.scope
             ├─76704 "sshd-session: ceph-admin [priv]"
             └─76707 "sshd-session: ceph-admin@notty"

Dec 02 10:49:33 compute-0 systemd[1]: Started Session 24 of User ceph-admin.
Dec 02 10:49:33 compute-0 sudo[76708]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Dec 02 10:49:33 compute-0 sudo[76708]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Dec 02 10:49:33 compute-0 sudo[76708]: pam_unix(sudo:session): session closed for user root
Dec 02 10:49:33 compute-0 sudo[76733]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/95bc4eaa-1a14-59bf-acf2-4b3da055547d/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --timeout 895 check-host --expect-hostname compute-0
Dec 02 10:49:33 compute-0 sudo[76733]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Dec 02 10:49:33 compute-0 sudo[76733]: pam_unix(sudo:session): session closed for user root

● session-25.scope - Session 25 of User ceph-admin
     Loaded: loaded (/run/systemd/transient/session-25.scope; transient)
  Transient: yes
     Active: active (running) since Tue 2025-12-02 10:49:33 UTC; 49min ago
         IO: 0B read, 0B written
      Tasks: 2
     Memory: 1.1M (peak: 4.1M)
        CPU: 268ms
     CGroup: /user.slice/user-42477.slice/session-25.scope
             ├─76758 "sshd-session: ceph-admin [priv]"
             └─76761 "sshd-session: ceph-admin@notty"

Dec 02 10:49:33 compute-0 systemd[1]: Started Session 25 of User ceph-admin.
Dec 02 10:49:33 compute-0 sudo[76762]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Dec 02 10:49:33 compute-0 sudo[76762]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Dec 02 10:49:33 compute-0 sudo[76762]: pam_unix(sudo:session): session closed for user root
Dec 02 10:49:33 compute-0 sudo[76787]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/ls /var/lib/ceph/95bc4eaa-1a14-59bf-acf2-4b3da055547d/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d
Dec 02 10:49:33 compute-0 sudo[76787]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Dec 02 10:49:33 compute-0 sudo[76787]: pam_unix(sudo:session): session closed for user root

● session-26.scope - Session 26 of User ceph-admin
     Loaded: loaded (/run/systemd/transient/session-26.scope; transient)
  Transient: yes
     Active: active (running) since Tue 2025-12-02 10:49:35 UTC; 49min ago
         IO: 0B read, 0B written
      Tasks: 2
     Memory: 1.1M (peak: 4.0M)
        CPU: 294ms
     CGroup: /user.slice/user-42477.slice/session-26.scope
             ├─76812 "sshd-session: ceph-admin [priv]"
             └─76816 "sshd-session: ceph-admin@notty"

Dec 02 10:49:35 compute-0 systemd[1]: Started Session 26 of User ceph-admin.
Dec 02 10:49:35 compute-0 sshd-session[76812]: pam_unix(sshd:session): session opened for user ceph-admin(uid=42477) by ceph-admin(uid=0)
Dec 02 10:49:35 compute-0 sudo[76817]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Dec 02 10:49:35 compute-0 sudo[76817]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Dec 02 10:49:35 compute-0 sudo[76817]: pam_unix(sudo:session): session closed for user root
Dec 02 10:49:35 compute-0 sudo[76842]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/mkdir -p /var/lib/ceph/95bc4eaa-1a14-59bf-acf2-4b3da055547d
Dec 02 10:49:35 compute-0 sudo[76842]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Dec 02 10:49:35 compute-0 sudo[76842]: pam_unix(sudo:session): session closed for user root

● session-27.scope - Session 27 of User ceph-admin
     Loaded: loaded (/run/systemd/transient/session-27.scope; transient)
  Transient: yes
     Active: active (running) since Tue 2025-12-02 10:49:35 UTC; 49min ago
         IO: 0B read, 0B written
      Tasks: 2
     Memory: 1.1M (peak: 4.0M)
        CPU: 294ms
     CGroup: /user.slice/user-42477.slice/session-27.scope
             ├─76867 "sshd-session: ceph-admin [priv]"
             └─76870 "sshd-session: ceph-admin@notty"

Dec 02 10:49:35 compute-0 systemd[1]: Started Session 27 of User ceph-admin.
Dec 02 10:49:35 compute-0 sudo[76871]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Dec 02 10:49:35 compute-0 sudo[76871]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Dec 02 10:49:35 compute-0 sudo[76871]: pam_unix(sudo:session): session closed for user root
Dec 02 10:49:35 compute-0 sudo[76896]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/mkdir -p /tmp/cephadm-95bc4eaa-1a14-59bf-acf2-4b3da055547d/var/lib/ceph/95bc4eaa-1a14-59bf-acf2-4b3da055547d
Dec 02 10:49:35 compute-0 sudo[76896]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Dec 02 10:49:35 compute-0 sudo[76896]: pam_unix(sudo:session): session closed for user root

● session-28.scope - Session 28 of User ceph-admin
     Loaded: loaded (/run/systemd/transient/session-28.scope; transient)
  Transient: yes
     Active: active (running) since Tue 2025-12-02 10:49:35 UTC; 49min ago
         IO: 0B read, 0B written
      Tasks: 2
     Memory: 1.1M (peak: 4.3M)
        CPU: 298ms
     CGroup: /user.slice/user-42477.slice/session-28.scope
             ├─76921 "sshd-session: ceph-admin [priv]"
             └─76924 "sshd-session: ceph-admin@notty"

Dec 02 10:49:35 compute-0 systemd[1]: Started Session 28 of User ceph-admin.
Dec 02 10:49:36 compute-0 sudo[76925]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Dec 02 10:49:36 compute-0 sudo[76925]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Dec 02 10:49:36 compute-0 sudo[76925]: pam_unix(sudo:session): session closed for user root
Dec 02 10:49:36 compute-0 sudo[76950]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/touch /tmp/cephadm-95bc4eaa-1a14-59bf-acf2-4b3da055547d/var/lib/ceph/95bc4eaa-1a14-59bf-acf2-4b3da055547d/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d.new
Dec 02 10:49:36 compute-0 sudo[76950]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Dec 02 10:49:36 compute-0 sudo[76950]: pam_unix(sudo:session): session closed for user root

● session-29.scope - Session 29 of User ceph-admin
     Loaded: loaded (/run/systemd/transient/session-29.scope; transient)
  Transient: yes
     Active: active (running) since Tue 2025-12-02 10:49:36 UTC; 49min ago
         IO: 0B read, 0B written
      Tasks: 2
     Memory: 1.1M (peak: 4.4M)
        CPU: 293ms
     CGroup: /user.slice/user-42477.slice/session-29.scope
             ├─76975 "sshd-session: ceph-admin [priv]"
             └─76978 "sshd-session: ceph-admin@notty"

Dec 02 10:49:36 compute-0 systemd[1]: Started Session 29 of User ceph-admin.
Dec 02 10:49:36 compute-0 sudo[76979]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Dec 02 10:49:36 compute-0 sudo[76979]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Dec 02 10:49:36 compute-0 sudo[76979]: pam_unix(sudo:session): session closed for user root
Dec 02 10:49:36 compute-0 sudo[77004]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/chown -R ceph-admin /tmp/cephadm-95bc4eaa-1a14-59bf-acf2-4b3da055547d
Dec 02 10:49:36 compute-0 sudo[77004]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Dec 02 10:49:36 compute-0 sudo[77004]: pam_unix(sudo:session): session closed for user root

● session-30.scope - Session 30 of User ceph-admin
     Loaded: loaded (/run/systemd/transient/session-30.scope; transient)
  Transient: yes
     Active: active (running) since Tue 2025-12-02 10:49:36 UTC; 49min ago
         IO: 0B read, 0B written
      Tasks: 2
     Memory: 1.1M (peak: 4.3M)
        CPU: 299ms
     CGroup: /user.slice/user-42477.slice/session-30.scope
             ├─77029 "sshd-session: ceph-admin [priv]"
             └─77032 "sshd-session: ceph-admin@notty"

Dec 02 10:49:36 compute-0 systemd[1]: Started Session 30 of User ceph-admin.
Dec 02 10:49:37 compute-0 sudo[77033]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Dec 02 10:49:37 compute-0 sudo[77033]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Dec 02 10:49:37 compute-0 sudo[77033]: pam_unix(sudo:session): session closed for user root
Dec 02 10:49:37 compute-0 sudo[77058]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/chmod 644 /tmp/cephadm-95bc4eaa-1a14-59bf-acf2-4b3da055547d/var/lib/ceph/95bc4eaa-1a14-59bf-acf2-4b3da055547d/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d.new
Dec 02 10:49:37 compute-0 sudo[77058]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Dec 02 10:49:37 compute-0 sudo[77058]: pam_unix(sudo:session): session closed for user root

● session-31.scope - Session 31 of User ceph-admin
     Loaded: loaded (/run/systemd/transient/session-31.scope; transient)
  Transient: yes
     Active: active (running) since Tue 2025-12-02 10:49:37 UTC; 49min ago
         IO: 0B read, 216.0K written
      Tasks: 2
     Memory: 1.4M (peak: 3.5M)
        CPU: 159ms
     CGroup: /user.slice/user-42477.slice/session-31.scope
             ├─77083 "sshd-session: ceph-admin [priv]"
             └─77086 "sshd-session: ceph-admin@notty"

Dec 02 10:49:37 compute-0 systemd[1]: Started Session 31 of User ceph-admin.

● session-32.scope - Session 32 of User ceph-admin
     Loaded: loaded (/run/systemd/transient/session-32.scope; transient)
  Transient: yes
     Active: active (running) since Tue 2025-12-02 10:49:37 UTC; 49min ago
         IO: 0B read, 0B written
      Tasks: 2
     Memory: 1.1M (peak: 4.3M)
        CPU: 233ms
     CGroup: /user.slice/user-42477.slice/session-32.scope
             ├─77110 "sshd-session: ceph-admin [priv]"
             └─77113 "sshd-session: ceph-admin@notty"

Dec 02 10:49:37 compute-0 systemd[1]: Started Session 32 of User ceph-admin.
Dec 02 10:49:38 compute-0 sudo[77114]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Dec 02 10:49:38 compute-0 sudo[77114]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Dec 02 10:49:38 compute-0 sudo[77114]: pam_unix(sudo:session): session closed for user root
Dec 02 10:49:38 compute-0 sudo[77139]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/mv /tmp/cephadm-95bc4eaa-1a14-59bf-acf2-4b3da055547d/var/lib/ceph/95bc4eaa-1a14-59bf-acf2-4b3da055547d/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d.new /var/lib/ceph/95bc4eaa-1a14-59bf-acf2-4b3da055547d/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d
Dec 02 10:49:38 compute-0 sudo[77139]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Dec 02 10:49:38 compute-0 sudo[77139]: pam_unix(sudo:session): session closed for user root

● session-33.scope - Session 33 of User ceph-admin
     Loaded: loaded (/run/systemd/transient/session-33.scope; transient)
  Transient: yes
     Active: active (running) since Tue 2025-12-02 10:49:38 UTC; 49min ago
         IO: 1.0M read, 154.6M written
      Tasks: 2
     Memory: 5.6M (peak: 56.1M)
        CPU: 4min 42.760s
     CGroup: /user.slice/user-42477.slice/session-33.scope
             ├─77164 "sshd-session: ceph-admin [priv]"
             └─77167 "sshd-session: ceph-admin@notty"

Dec 02 11:38:16 compute-0 podman[301005]: 2025-12-02 11:38:16.985904003 +0000 UTC m=+0.198588557 container attach c00ce674ac8e3776d6d6b71f1a07e4148b4ecc2421b1cc3c193f03c4617a4dc2 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=quizzical_solomon, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default)
Dec 02 11:38:18 compute-0 podman[301005]: 2025-12-02 11:38:18.160765791 +0000 UTC m=+1.373450435 container died c00ce674ac8e3776d6d6b71f1a07e4148b4ecc2421b1cc3c193f03c4617a4dc2 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=quizzical_solomon, ceph=True, org.label-schema.license=GPLv2, CEPH_REF=reef, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Dec 02 11:38:18 compute-0 podman[301005]: 2025-12-02 11:38:18.236208736 +0000 UTC m=+1.448893290 container remove c00ce674ac8e3776d6d6b71f1a07e4148b4ecc2421b1cc3c193f03c4617a4dc2 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=quizzical_solomon, io.buildah.version=1.39.3, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image)
Dec 02 11:38:18 compute-0 sudo[300896]: pam_unix(sudo:session): session closed for user root
Dec 02 11:38:18 compute-0 sudo[301069]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Dec 02 11:38:18 compute-0 sudo[301069]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Dec 02 11:38:18 compute-0 sudo[301069]: pam_unix(sudo:session): session closed for user root
Dec 02 11:38:18 compute-0 sudo[301094]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/ls /etc/sysctl.d
Dec 02 11:38:18 compute-0 sudo[301094]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Dec 02 11:38:18 compute-0 sudo[301094]: pam_unix(sudo:session): session closed for user root

● session-52.scope - Session 52 of User zuul
     Loaded: loaded (/run/systemd/transient/session-52.scope; transient)
  Transient: yes
     Active: active (running) since Tue 2025-12-02 11:38:22 UTC; 51s ago
         IO: 491.4M read, 74.9M written
      Tasks: 13
     Memory: 863.8M (peak: 949.4M)
        CPU: 2min 20.194s
     CGroup: /user.slice/user-1000.slice/session-52.scope
             ├─301119 "sshd-session: zuul [priv]"
             ├─301122 "sshd-session: zuul@notty"
             ├─301123 sudo bash -c "rm -rf /var/tmp/sos-osp && mkdir /var/tmp/sos-osp && sos report --batch --all-logs --tmp-dir=/var/tmp/sos-osp  -p container,openstack_edpm,system,storage,virt"
             ├─301147 /usr/bin/python3 -s /sbin/sos report --batch --all-logs --tmp-dir=/var/tmp/sos-osp -p container,openstack_edpm,system,storage,virt
             ├─307235 timeout 15s turbostat --debug sleep 10
             ├─307887 timeout 300s systemctl status --all
             └─307888 systemctl status --all

Dec 02 11:38:22 compute-0 systemd[1]: Started Session 52 of User zuul.
Dec 02 11:38:22 compute-0 sudo[301123]:     zuul : PWD=/home/zuul ; USER=root ; COMMAND=/bin/bash -c 'rm -rf /var/tmp/sos-osp && mkdir /var/tmp/sos-osp && sos report --batch --all-logs --tmp-dir=/var/tmp/sos-osp  -p container,openstack_edpm,system,storage,virt'
Dec 02 11:38:22 compute-0 sudo[301123]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Dec 02 11:38:33 compute-0 ovs-vsctl[301477]: ovs|00001|db_ctl_base|ERR|no key "dpdk-init" in Open_vSwitch record "." column other_config
Dec 02 11:39:04 compute-0 ovs-appctl[306389]: ovs|00001|daemon_unix|WARN|/var/run/openvswitch/ovs-monitor-ipsec.pid: open: No such file or directory
Dec 02 11:39:04 compute-0 ovs-appctl[306399]: ovs|00001|daemon_unix|WARN|/var/run/openvswitch/ovs-monitor-ipsec.pid: open: No such file or directory

○ 130400eaf961ceaaa203e2cc0a5af0fe03396f0667ec510c1291b1ee03bff193-746b1df9bc05c1b6.service - /usr/bin/podman healthcheck run 130400eaf961ceaaa203e2cc0a5af0fe03396f0667ec510c1291b1ee03bff193
     Loaded: loaded (/run/systemd/transient/130400eaf961ceaaa203e2cc0a5af0fe03396f0667ec510c1291b1ee03bff193-746b1df9bc05c1b6.service; transient)
  Transient: yes
     Active: inactive (dead) since Tue 2025-12-02 11:39:01 UTC; 12s ago
   Duration: 111ms
TriggeredBy: ● 130400eaf961ceaaa203e2cc0a5af0fe03396f0667ec510c1291b1ee03bff193-746b1df9bc05c1b6.timer
    Process: 305275 ExecStart=/usr/bin/podman healthcheck run 130400eaf961ceaaa203e2cc0a5af0fe03396f0667ec510c1291b1ee03bff193 (code=exited, status=0/SUCCESS)
   Main PID: 305275 (code=exited, status=0/SUCCESS)
        CPU: 96ms

Dec 02 11:39:01 compute-0 podman[305275]: 2025-12-02 11:39:01.011905272 +0000 UTC m=+0.083011840 container health_status 130400eaf961ceaaa203e2cc0a5af0fe03396f0667ec510c1291b1ee03bff193 (image=quay.io/podified-antelope-centos9/openstack-multipathd:current-podified, name=multipathd, health_status=healthy, health_failing_streak=0, health_log=, tcib_managed=true, config_id=multipathd, io.buildah.version=1.41.3, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, maintainer=OpenStack Kubernetes Operator team, org.label-schema.vendor=CentOS, tcib_build_tag=fa2bb8efef6782c26ea7f1675eeb36dd, container_name=multipathd, managed_by=edpm_ansible, org.label-schema.build-date=20251125, org.label-schema.schema-version=1.0, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/multipathd', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-multipathd:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/multipathd.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run/udev:/run/udev', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:ro', '/var/lib/iscsi:/var/lib/iscsi', '/etc/multipath:/etc/multipath:z', '/etc/multipath.conf:/etc/multipath.conf:ro', '/var/lib/openstack/healthchecks/multipathd:/openstack:ro,z']})

○ 301660b5961629ac564857138dcba46d0947a2a1c7d3debbb9f5976c1df04193-29801a8faae2132d.service - /usr/bin/podman healthcheck run 301660b5961629ac564857138dcba46d0947a2a1c7d3debbb9f5976c1df04193
     Loaded: loaded (/run/systemd/transient/301660b5961629ac564857138dcba46d0947a2a1c7d3debbb9f5976c1df04193-29801a8faae2132d.service; transient)
  Transient: yes
     Active: inactive (dead) since Tue 2025-12-02 11:39:10 UTC; 3s ago
   Duration: 84ms
TriggeredBy: ● 301660b5961629ac564857138dcba46d0947a2a1c7d3debbb9f5976c1df04193-29801a8faae2132d.timer
    Process: 307436 ExecStart=/usr/bin/podman healthcheck run 301660b5961629ac564857138dcba46d0947a2a1c7d3debbb9f5976c1df04193 (code=exited, status=0/SUCCESS)
   Main PID: 307436 (code=exited, status=0/SUCCESS)
        CPU: 80ms

Dec 02 11:39:09 compute-0 podman[307436]: 2025-12-02 11:39:09.99259343 +0000 UTC m=+0.061665094 container health_status 301660b5961629ac564857138dcba46d0947a2a1c7d3debbb9f5976c1df04193 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=ovn_metadata_agent, health_status=healthy, health_failing_streak=0, health_log=, container_name=ovn_metadata_agent, maintainer=OpenStack Kubernetes Operator team, tcib_managed=true, config_data={'cgroupns': 'host', 'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'EDPM_CONFIG_HASH': '0823bd3e096c75f72e4a95820d41b0d4b6a1172bd2892ddb9f29b788a11bc87d'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_metadata_agent', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified', 'net': 'host', 'pid': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/run/openvswitch:/run/openvswitch:z', '/var/lib/config-data/ansible-generated/neutron-ovn-metadata-agent:/etc/neutron.conf.d:z', '/run/netns:/run/netns:shared', '/var/lib/kolla/config_files/ovn_metadata_agent.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/neutron:/var/lib/neutron:shared,z', '/var/lib/neutron/ovn_metadata_haproxy_wrapper:/usr/local/bin/haproxy:ro', '/var/lib/neutron/kill_scripts:/etc/neutron/kill_scripts:ro', '/var/lib/openstack/cacerts/neutron-metadata/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_metadata_agent:/openstack:ro,z']}, managed_by=edpm_ansible, org.label-schema.build-date=20251125, org.label-schema.vendor=CentOS, tcib_build_tag=fa2bb8efef6782c26ea7f1675eeb36dd, io.buildah.version=1.41.3, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, config_id=ovn_metadata_agent, org.label-schema.license=GPLv2)

○ 5c31229430d6adbfb6e358463fdefc47061e3db1d274ed40ca82e25890f29998-3c4e81cff09bf014.service - /usr/bin/podman healthcheck run 5c31229430d6adbfb6e358463fdefc47061e3db1d274ed40ca82e25890f29998
     Loaded: loaded (/run/systemd/transient/5c31229430d6adbfb6e358463fdefc47061e3db1d274ed40ca82e25890f29998-3c4e81cff09bf014.service; transient)
  Transient: yes
     Active: inactive (dead) since Tue 2025-12-02 11:39:10 UTC; 3s ago
   Duration: 141ms
TriggeredBy: ● 5c31229430d6adbfb6e358463fdefc47061e3db1d274ed40ca82e25890f29998-3c4e81cff09bf014.timer
    Process: 307437 ExecStart=/usr/bin/podman healthcheck run 5c31229430d6adbfb6e358463fdefc47061e3db1d274ed40ca82e25890f29998 (code=exited, status=0/SUCCESS)
   Main PID: 307437 (code=exited, status=0/SUCCESS)
        CPU: 79ms

Dec 02 11:39:10 compute-0 podman[307437]: 2025-12-02 11:39:10.054366307 +0000 UTC m=+0.123551454 container health_status 5c31229430d6adbfb6e358463fdefc47061e3db1d274ed40ca82e25890f29998 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, org.label-schema.name=CentOS Stream 9 Base Image, tcib_managed=true, container_name=ovn_controller, org.label-schema.build-date=20251125, managed_by=edpm_ansible, org.label-scheUnit apparmor.service could not be found.
Unit apt-daily.service could not be found.
Unit auto-cpufreq.service could not be found.
Unit autofs.service could not be found.
ma.license=GPLv2, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, config_id=ovn_controller, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, tcib_build_tag=fa2bb8efef6782c26ea7f1675eeb36dd)

● auditd.service - Security Auditing Service
     Loaded: loaded (/usr/lib/systemd/system/auditd.service; enabled; preset: enabled)
     Active: active (running) since Tue 2025-12-02 10:03:32 UTC; 1h 35min ago
       Docs: man:auditd(8)
             https://github.com/linux-audit/audit-documentation
   Main PID: 702 (auditd)
         IO: 4.0K read, 27.5M written
      Tasks: 4 (limit: 48628)
     Memory: 16.1M (peak: 16.6M)
        CPU: 7.253s
     CGroup: /system.slice/auditd.service
             ├─702 /sbin/auditd
             └─704 /usr/sbin/sedispatch

Dec 02 10:03:32 localhost augenrules[722]: failure 1
Dec 02 10:03:32 localhost augenrules[722]: pid 702
Dec 02 10:03:32 localhost augenrules[722]: rate_limit 0
Dec 02 10:03:32 localhost augenrules[722]: backlog_limit 8192
Dec 02 10:03:32 localhost augenrules[722]: lost 0
Dec 02 10:03:32 localhost augenrules[722]: backlog 0
Dec 02 10:03:32 localhost augenrules[722]: backlog_wait_time 60000
Dec 02 10:03:32 localhost augenrules[722]: backlog_wait_time_actual 0
Dec 02 10:03:32 localhost systemd[1]: Started Security Auditing Service.
Dec 02 11:04:01 compute-0 auditd[702]: Audit daemon rotating log files

○ auth-rpcgss-module.service - Kernel Module supporting RPCSEC_GSS
     Loaded: loaded (/usr/lib/systemd/system/auth-rpcgss-module.service; static)
     Active: inactive (dead)
  Condition: start condition failed at Tue 2025-12-02 10:03:31 UTC; 1h 35min ago

● ceph-95bc4eaa-1a14-59bf-acf2-4b3da055547d@crash.compute-0.service - Ceph crash.compute-0 for 95bc4eaa-1a14-59bf-acf2-4b3da055547d
     Loaded: loaded (/etc/systemd/system/ceph-95bc4eaa-1a14-59bf-acf2-4b3da055547d@.service; enabled; preset: disabled)
     Active: active (running) since Tue 2025-12-02 10:49:54 UTC; 49min ago
   Main PID: 83160 (conmon)
         IO: 0B read, 1.1M written
      Tasks: 3 (limit: 48628)
     Memory: 12.2M (peak: 33.3M)
        CPU: 751ms
     CGroup: /system.slice/system-ceph\x2d95bc4eaa\x2d1a14\x2d59bf\x2dacf2\x2d4b3da055547d.slice/ceph-95bc4eaa-1a14-59bf-acf2-4b3da055547d@crash.compute-0.service
             ├─libpod-payload-3db2848945635a23ef05a5bb5103b9327936169af4da62c76a6e841d53c77f4b
             │ ├─83162 /run/podman-init -- /usr/bin/ceph-crash -n client.crash.compute-0
             │ └─83164 /usr/bin/python3 -s /usr/bin/ceph-crash -n client.crash.compute-0
             └─runtime
               └─83160 /usr/bin/conmon --api-version 1 -c 3db2848945635a23ef05a5bb5103b9327936169af4da62c76a6e841d53c77f4b -u 3db2848945635a23ef05a5bb5103b9327936169af4da62c76a6e841d53c77f4b -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/3db2848945635a23ef05a5bb5103b9327936169af4da62c76a6e841d53c77f4b/userdata -p /run/containers/storage/overlay-containers/3db2848945635a23ef05a5bb5103b9327936169af4da62c76a6e841d53c77f4b/userdata/pidfile -n ceph-95bc4eaa-1a14-59bf-acf2-4b3da055547d-crash-compute-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/3db2848945635a23ef05a5bb5103b9327936169af4da62c76a6e841d53c77f4b --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/3db2848945635a23ef05a5bb5103b9327936169af4da62c76a6e841d53c77f4b/userdata/oci-log --conmon-pidfile /run/ceph-95bc4eaa-1a14-59bf-acf2-4b3da055547d@crash.compute-0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 3db2848945635a23ef05a5bb5103b9327936169af4da62c76a6e841d53c77f4b

Dec 02 10:49:54 compute-0 systemd[1]: Started Ceph crash.compute-0 for 95bc4eaa-1a14-59bf-acf2-4b3da055547d.
Dec 02 10:49:54 compute-0 ceph-95bc4eaa-1a14-59bf-acf2-4b3da055547d-crash-compute-0[83160]: INFO:ceph-crash:pinging cluster to exercise our key
Dec 02 10:49:54 compute-0 ceph-95bc4eaa-1a14-59bf-acf2-4b3da055547d-crash-compute-0[83160]: 2025-12-02T10:49:54.957+0000 7f4975b83640 -1 auth: unable to find a keyring on /etc/ceph/ceph.client.admin.keyring,/etc/ceph/ceph.keyring,/etc/ceph/keyring,/etc/ceph/keyring.bin: (2) No such file or directory
Dec 02 10:49:54 compute-0 ceph-95bc4eaa-1a14-59bf-acf2-4b3da055547d-crash-compute-0[83160]: 2025-12-02T10:49:54.957+0000 7f4975b83640 -1 AuthRegistry(0x7f4970066fe0) no keyring found at /etc/ceph/ceph.client.admin.keyring,/etc/ceph/ceph.keyring,/etc/ceph/keyring,/etc/ceph/keyring.bin, disabling cephx
Dec 02 10:49:54 compute-0 ceph-95bc4eaa-1a14-59bf-acf2-4b3da055547d-crash-compute-0[83160]: 2025-12-02T10:49:54.958+0000 7f4975b83640 -1 auth: unable to find a keyring on /etc/ceph/ceph.client.admin.keyring,/etc/ceph/ceph.keyring,/etc/ceph/keyring,/etc/ceph/keyring.bin: (2) No such file or directory
Dec 02 10:49:54 compute-0 ceph-95bc4eaa-1a14-59bf-acf2-4b3da055547d-crash-compute-0[83160]: 2025-12-02T10:49:54.958+0000 7f4975b83640 -1 AuthRegistry(0x7f4975b82000) no keyring found at /etc/ceph/ceph.client.admin.keyring,/etc/ceph/ceph.keyring,/etc/ceph/keyring,/etc/ceph/keyring.bin, disabling cephx
Dec 02 10:49:54 compute-0 ceph-95bc4eaa-1a14-59bf-acf2-4b3da055547d-crash-compute-0[83160]: 2025-12-02T10:49:54.959+0000 7f496f7fe640 -1 monclient(hunting): handle_auth_bad_method server allowed_methods [2] but i only support [1]
Dec 02 10:49:54 compute-0 ceph-95bc4eaa-1a14-59bf-acf2-4b3da055547d-crash-compute-0[83160]: 2025-12-02T10:49:54.959+0000 7f4975b83640 -1 monclient: authenticate NOTE: no keyring found; disabled cephx authentication
Dec 02 10:49:54 compute-0 ceph-95bc4eaa-1a14-59bf-acf2-4b3da055547d-crash-compute-0[83160]: [errno 13] RADOS permission denied (error connecting to the cluster)
Dec 02 10:49:54 compute-0 ceph-95bc4eaa-1a14-59bf-acf2-4b3da055547d-crash-compute-0[83160]: INFO:ceph-crash:monitoring path /var/lib/ceph/crash, delay 600s

● ceph-95bc4eaa-1a14-59bf-acf2-4b3da055547d@mds.cephfs.compute-0.bydekr.service - Ceph mds.cephfs.compute-0.bydekr for 95bc4eaa-1a14-59bf-acf2-4b3da055547d
     Loaded: loaded (/etc/systemd/system/ceph-95bc4eaa-1a14-59bf-acf2-4b3da055547d@.service; enabled; preset: disabled)
     Active: active (running) since Tue 2025-12-02 10:51:52 UTC; 47min ago
   Main PID: 101610 (conmon)
         IO: 0B read, 1.3M written
      Tasks: 28 (limit: 48628)
     Memory: 25.9M (peak: 26.7M)
        CPU: 6.339s
     CGroup: /system.slice/system-ceph\x2d95bc4eaa\x2d1a14\x2d59bf\x2dacf2\x2d4b3da055547d.slice/ceph-95bc4eaa-1a14-59bf-acf2-4b3da055547d@mds.cephfs.compute-0.bydekr.service
             ├─libpod-payload-f64fbb716bbb10d4999b599073f025f197d0e4bf9a2dfe0b7e86e89fb078831c
             │ ├─101612 /run/podman-init -- /usr/bin/ceph-mds -n mds.cephfs.compute-0.bydekr -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ └─101614 /usr/bin/ceph-mds -n mds.cephfs.compute-0.bydekr -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             └─runtime
               └─101610 /usr/bin/conmon --api-version 1 -c f64fbb716bbb10d4999b599073f025f197d0e4bf9a2dfe0b7e86e89fb078831c -u f64fbb716bbb10d4999b599073f025f197d0e4bf9a2dfe0b7e86e89fb078831c -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/f64fbb716bbb10d4999b599073f025f197d0e4bf9a2dfe0b7e86e89fb078831c/userdata -p /run/containers/storage/overlay-containers/f64fbb716bbb10d4999b599073f025f197d0e4bf9a2dfe0b7e86e89fb078831c/userdata/pidfile -n ceph-95bc4eaa-1a14-59bf-acf2-4b3da055547d-mds-cephfs-compute-0-bydekr --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/f64fbb716bbb10d4999b599073f025f197d0e4bf9a2dfe0b7e86e89fb078831c --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/f64fbb716bbb10d4999b599073f025f197d0e4bf9a2dfe0b7e86e89fb078831c/userdata/oci-log --conmon-pidfile /run/ceph-95bc4eaa-1a14-59bf-acf2-4b3da055547d@mds.cephfs.compute-0.bydekr.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg f64fbb716bbb10d4999b599073f025f197d0e4bf9a2dfe0b7e86e89fb078831c

Dec 02 11:38:36 compute-0 ceph-mds[101614]: mds.cephfs.compute-0.bydekr asok_command: dump loads {prefix=dump loads} (starting...)
Dec 02 11:38:36 compute-0 ceph-mds[101614]: mds.cephfs.compute-0.bydekr asok_command: dump tree {prefix=dump tree,root=/} (starting...)
Dec 02 11:38:36 compute-0 ceph-mds[101614]: mds.cephfs.compute-0.bydekr asok_command: dump_blocked_ops {prefix=dump_blocked_ops} (starting...)
Dec 02 11:38:37 compute-0 ceph-mds[101614]: mds.cephfs.compute-0.bydekr asok_command: dump_historic_ops {prefix=dump_historic_ops} (starting...)
Dec 02 11:38:37 compute-0 ceph-mds[101614]: mds.cephfs.compute-0.bydekr asok_command: dump_historic_ops_by_duration {prefix=dump_historic_ops_by_duration} (starting...)
Dec 02 11:38:37 compute-0 ceph-mds[101614]: mds.cephfs.compute-0.bydekr asok_command: dump_ops_in_flight {prefix=dump_ops_in_flight} (starting...)
Dec 02 11:38:37 compute-0 ceph-mds[101614]: mds.cephfs.compute-0.bydekr asok_command: get subtrees {prefix=get subtrees} (starting...)
Dec 02 11:38:37 compute-0 ceph-mds[101614]: mds.cephfs.compute-0.bydekr asok_command: ops {prefix=ops} (starting...)
Dec 02 11:38:38 compute-0 ceph-mds[101614]: mds.cephfs.compute-0.bydekr asok_command: session ls {prefix=session ls} (starting...)
Dec 02 11:38:38 compute-0 ceph-mds[101614]: mds.cephfs.compute-0.bydekr asok_command: status {prefix=status} (starting...)

● ceph-95bc4eaa-1a14-59bf-acf2-4b3da055547d@mgr.compute-0.ntxcvs.service - Ceph mgr.compute-0.ntxcvs for 95bc4eaa-1a14-59bf-acf2-4b3da055547d
     Loaded: loaded (/etc/systemd/system/ceph-95bc4eaa-1a14-59bf-acf2-4b3da055547d@.service; enabled; preset: disabled)
     Active: active (running) since Tue 2025-12-02 10:48:41 UTC; 50min ago
   Main PID: 75368 (conmon)
         IO: 804.0K read, 2.7M written
      Tasks: 149 (limit: 48628)
     Memory: 532.5M (peak: 533.9M)
        CPU: 1min 30.431s
     CGroup: /system.slice/system-ceph\x2d95bc4eaa\x2d1a14\x2d59bf\x2dacf2\x2d4b3da055547d.slice/ceph-95bc4eaa-1a14-59bf-acf2-4b3da055547d@mgr.compute-0.ntxcvs.service
             ├─libpod-payload-e03605b236b56660f5174f63911714aeac7aabe1e5dbe09eb60fe0086b6a7093
             │ ├─75370 /run/podman-init -- /usr/bin/ceph-mgr -n mgr.compute-0.ntxcvs -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ └─75372 /usr/bin/ceph-mgr -n mgr.compute-0.ntxcvs -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             └─runtime
               └─75368 /usr/bin/conmon --api-version 1 -c e03605b236b56660f5174f63911714aeac7aabe1e5dbe09eb60fe0086b6a7093 -u e03605b236b56660f5174f63911714aeac7aabe1e5dbe09eb60fe0086b6a7093 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/e03605b236b56660f5174f63911714aeac7aabe1e5dbe09eb60fe0086b6a7093/userdata -p /run/containers/storage/overlay-containers/e03605b236b56660f5174f63911714aeac7aabe1e5dbe09eb60fe0086b6a7093/userdata/pidfile -n ceph-95bc4eaa-1a14-59bf-acf2-4b3da055547d-mgr-compute-0-ntxcvs --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/e03605b236b56660f5174f63911714aeac7aabe1e5dbe09eb60fe0086b6a7093 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/e03605b236b56660f5174f63911714aeac7aabe1e5dbe09eb60fe0086b6a7093/userdata/oci-log --conmon-pidfile /run/ceph-95bc4eaa-1a14-59bf-acf2-4b3da055547d@mgr.compute-0.ntxcvs.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg e03605b236b56660f5174f63911714aeac7aabe1e5dbe09eb60fe0086b6a7093

Dec 02 11:39:09 compute-0 ceph-mgr[75372]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Dec 02 11:39:09 compute-0 ceph-mgr[75372]: [pg_autoscaler INFO root] Pool 'default.rgw.log' root_id -1 using 2.1620840658982875e-06 of space, bias 1.0, pg target 0.0006486252197694863 quantized to 32 (current 32)
Dec 02 11:39:09 compute-0 ceph-mgr[75372]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Dec 02 11:39:09 compute-0 ceph-mgr[75372]: [pg_autoscaler INFO root] Pool 'default.rgw.control' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Dec 02 11:39:09 compute-0 ceph-mgr[75372]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Dec 02 11:39:09 compute-0 ceph-mgr[75372]: [pg_autoscaler INFO root] Pool 'default.rgw.meta' root_id -1 using 1.2718141564107572e-07 of space, bias 4.0, pg target 0.00015261769876929088 quantized to 32 (current 32)
Dec 02 11:39:10 compute-0 ceph-mgr[75372]: log_channel(audit) log [DBG] : from='client.19419 -' entity='client.admin' cmd=[{"prefix": "pg dump", "target": ["mon-mgr", ""], "format": "json-pretty"}]: dispatch
Dec 02 11:39:10 compute-0 ceph-mgr[75372]: log_channel(cluster) log [DBG] : pgmap v1990: 321 pgs: 321 active+clean; 271 MiB data, 693 MiB used, 59 GiB / 60 GiB avail
Dec 02 11:39:10 compute-0 ceph-mgr[75372]: log_channel(audit) log [DBG] : from='client.19421 -' entity='client.admin' cmd=[{"prefix": "pg stat", "target": ["mon-mgr", ""], "format": "json-pretty"}]: dispatch
Dec 02 11:39:12 compute-0 ceph-mgr[75372]: log_channel(cluster) log [DBG] : pgmap v1991: 321 pgs: 321 active+clean; 271 MiB data, 693 MiB used, 59 GiB / 60 GiB avail

● ceph-95bc4eaa-1a14-59bf-acf2-4b3da055547d@mon.compute-0.service - Ceph mon.compute-0 for 95bc4eaa-1a14-59bf-acf2-4b3da055547d
     Loaded: loaded (/etc/systemd/system/ceph-95bc4eaa-1a14-59bf-acf2-4b3da055547d@.service; enabled; preset: disabled)
     Active: active (running) since Tue 2025-12-02 10:48:39 UTC; 50min ago
   Main PID: 75077 (conmon)
         IO: 1.5M read, 366.2M written
      Tasks: 27 (limit: 48628)
     Memory: 93.7M (peak: 106.5M)
        CPU: 49.109s
     CGroup: /system.slice/system-ceph\x2d95bc4eaa\x2d1a14\x2d59bf\x2dacf2\x2d4b3da055547d.slice/ceph-95bc4eaa-1a14-59bf-acf2-4b3da055547d@mon.compute-0.service
             ├─libpod-payload-cfead6f8cdae3fb33ff10b470724c55f63ec4997c8e0a95beaf5732ac7b8da1b
             │ ├─75079 /run/podman-init -- /usr/bin/ceph-mon -n mon.compute-0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false
             │ └─75081 /usr/bin/ceph-mon -n mon.compute-0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false
             └─runtime
               └─75077 /usr/bin/conmon --api-version 1 -c cfead6f8cdae3fb33ff10b470724c55f63ec4997c8e0a95beaf5732ac7b8da1b -u cfead6f8cdae3fb33ff10b470724c55f63ec4997c8e0a95beaf5732ac7b8da1b -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/cfead6f8cdae3fb33ff10b470724c55f63ec4997c8e0a95beaf5732ac7b8da1b/userdata -p /run/containers/storage/overlay-containers/cfead6f8cdae3fb33ff10b470724c55f63ec4997c8e0a95beaf5732ac7b8da1b/userdata/pidfile -n ceph-95bc4eaa-1a14-59bf-acf2-4b3da055547d-mon-compute-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/cfead6f8cdae3fb33ff10b470724c55f63ec4997c8e0a95beaf5732ac7b8da1b --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/cfead6f8cdae3fb33ff10b470724c55f63ec4997c8e0a95beaf5732ac7b8da1b/userdata/oci-log --conmon-pidfile /run/ceph-95bc4eaa-1a14-59bf-acf2-4b3da055547d@mon.compute-0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg cfead6f8cdae3fb33ff10b470724c55f63ec4997c8e0a95beaf5732ac7b8da1b

Dec 02 11:39:11 compute-0 ceph-mon[75081]: from='client.19419 -' entity='client.admin' cmd=[{"prefix": "pg dump", "target": ["mon-mgr", ""], "format": "json-pretty"}]: dispatch
Dec 02 11:39:11 compute-0 ceph-mon[75081]: pgmap v1990: 321 pgs: 321 active+clean; 271 MiB data, 693 MiB used, 59 GiB / 60 GiB avail
Dec 02 11:39:11 compute-0 ceph-mon[75081]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "status", "format": "json-pretty"} v 0) v1
Dec 02 11:39:11 compute-0 ceph-mon[75081]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/424382596' entity='client.admin' cmd=[{"prefix": "status", "format": "json-pretty"}]: dispatch
Dec 02 11:39:11 compute-0 ceph-mon[75081]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "time-sync-status", "format": "json-pretty"} v 0) v1
Dec 02 11:39:11 compute-0 ceph-mon[75081]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/858553117' entity='client.admin' cmd=[{"prefix": "time-sync-status", "format": "json-pretty"}]: dispatch
Dec 02 11:39:12 compute-0 ceph-mon[75081]: from='client.19421 -' entity='client.admin' cmd=[{"prefix": "pg stat", "target": ["mon-mgr", ""], "format": "json-pretty"}]: dispatch
Dec 02 11:39:12 compute-0 ceph-mon[75081]: from='client.? 192.168.122.100:0/424382596' entity='client.admin' cmd=[{"prefix": "status", "format": "json-pretty"}]: dispatch
Dec 02 11:39:12 compute-0 ceph-mon[75081]: from='client.? 192.168.122.100:0/858553117' entity='client.admin' cmd=[{"prefix": "time-sync-status", "format": "json-pretty"}]: dispatch
Dec 02 11:39:13 compute-0 ceph-mon[75081]: pgmap v1991: 321 pgs: 321 active+clean; 271 MiB data, 693 MiB used, 59 GiB / 60 GiB avail

● ceph-95bc4eaa-1a14-59bf-acf2-4b3da055547d@osd.0.service - Ceph osd.0 for 95bc4eaa-1a14-59bf-acf2-4b3da055547d
     Loaded: loaded (/etc/systemd/system/ceph-95bc4eaa-1a14-59bf-acf2-4b3da055547d@.service; enabled; preset: disabled)
     Active: active (running) since Tue 2025-12-02 10:50:23 UTC; 48min ago
   Main PID: 88957 (conmon)
         IO: 587.3M read, 8.7G written
      Tasks: 60 (limit: 48628)
     Memory: 1014.8M (peak: 1.3G)
        CPU: 1min 160ms
     CGroup: /system.slice/system-ceph\x2d95bc4eaa\x2d1a14\x2d59bf\x2dacf2\x2d4b3da055547d.slice/ceph-95bc4eaa-1a14-59bf-acf2-4b3da055547d@osd.0.service
             ├─libpod-payload-13b62c875bf29e1fd647b09f26aeb88392fd8e7fd6f27f5d56d59c8433a0b222
             │ ├─88959 /run/podman-init -- /usr/bin/ceph-osd -n osd.0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ └─88961 /usr/bin/ceph-osd -n osd.0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             └─runtime
               └─88957 /usr/bin/conmon --api-version 1 -c 13b62c875bf29e1fd647b09f26aeb88392fd8e7fd6f27f5d56d59c8433a0b222 -u 13b62c875bf29e1fd647b09f26aeb88392fd8e7fd6f27f5d56d59c8433a0b222 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/13b62c875bf29e1fd647b09f26aeb88392fd8e7fd6f27f5d56d59c8433a0b222/userdata -p /run/containers/storage/overlay-containers/13b62c875bf29e1fd647b09f26aeb88392fd8e7fd6f27f5d56d59c8433a0b222/userdata/pidfile -n ceph-95bc4eaa-1a14-59bf-acf2-4b3da055547d-osd-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/13b62c875bf29e1fd647b09f26aeb88392fd8e7fd6f27f5d56d59c8433a0b222 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/13b62c875bf29e1fd647b09f26aeb88392fd8e7fd6f27f5d56d59c8433a0b222/userdata/oci-log --conmon-pidfile /run/ceph-95bc4eaa-1a14-59bf-acf2-4b3da055547d@osd.0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 13b62c875bf29e1fd647b09f26aeb88392fd8e7fd6f27f5d56d59c8433a0b222

Dec 02 11:38:52 compute-0 ceph-osd[88961]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-12-02T11:38:20.632805+0000)
Dec 02 11:38:52 compute-0 ceph-osd[88961]: prioritycache tune_memory target: 4294967296 mapped: 229654528 unmapped: 31604736 heap: 261259264 old mem: 2845415832 new mem: 2845415832
Dec 02 11:38:52 compute-0 ceph-osd[88961]: monclient: tick
Dec 02 11:38:52 compute-0 ceph-osd[88961]: monclient: _check_auth_tickets
Dec 02 11:38:52 compute-0 ceph-osd[88961]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-12-02T11:38:21.632993+0000)
Dec 02 11:38:52 compute-0 ceph-osd[88961]: prioritycache tune_memory target: 4294967296 mapped: 229777408 unmapped: 31481856 heap: 261259264 old mem: 2845415832 new mem: 2845415832
Dec 02 11:38:52 compute-0 ceph-osd[88961]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Dec 02 11:38:52 compute-0 ceph-osd[88961]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Dec 02 11:38:52 compute-0 ceph-osd[88961]: bluestore.MempoolThread(0x55628352bb60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 4002295 data_alloc: 234881024 data_used: 21610496
Dec 02 11:38:52 compute-0 ceph-osd[88961]: do_command 'log dump' '{prefix=log dump}'

● ceph-95bc4eaa-1a14-59bf-acf2-4b3da055547d@osd.1.service - Ceph osd.1 for 95bc4eaa-1a14-59bf-acf2-4b3da055547d
     Loaded: loaded (/etc/systemd/system/ceph-95bc4eaa-1a14-59bf-acf2-4b3da055547d@.service; enabled; preset: disabled)
     Active: active (running) since Tue 2025-12-02 10:50:29 UTC; 48min ago
   Main PID: 89962 (conmon)
         IO: 559.9M read, 8.4G written
      Tasks: 60 (limit: 48628)
     Memory: 952.9M (peak: 1.4G)
        CPU: 58.359s
     CGroup: /system.slice/system-ceph\x2d95bc4eaa\x2d1a14\x2d59bf\x2dacf2\x2d4b3da055547d.slice/ceph-95bc4eaa-1a14-59bf-acf2-4b3da055547d@osd.1.service
             ├─libpod-payload-2ab4bdfb1336673e273752f56f87fc01cb41214970130231b1c970afc70361db
             │ ├─89964 /run/podman-init -- /usr/bin/ceph-osd -n osd.1 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ └─89966 /usr/bin/ceph-osd -n osd.1 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             └─runtime
               └─89962 /usr/bin/conmon --api-version 1 -c 2ab4bdfb1336673e273752f56f87fc01cb41214970130231b1c970afc70361db -u 2ab4bdfb1336673e273752f56f87fc01cb41214970130231b1c970afc70361db -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/2ab4bdfb1336673e273752f56f87fc01cb41214970130231b1c970afc70361db/userdata -p /run/containers/storage/overlay-containers/2ab4bdfb1336673e273752f56f87fc01cb41214970130231b1c970afc70361db/userdata/pidfile -n ceph-95bc4eaa-1a14-59bf-acf2-4b3da055547d-osd-1 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/2ab4bdfb1336673e273752f56f87fc01cb41214970130231b1c970afc70361db --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/2ab4bdfb1336673e273752f56f87fc01cb41214970130231b1c970afc70361db/userdata/oci-log --conmon-pidfile /run/ceph-95bc4eaa-1a14-59bf-acf2-4b3da055547d@osd.1.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 2ab4bdfb1336673e273752f56f87fc01cb41214970130231b1c970afc70361db

Dec 02 11:38:47 compute-0 ceph-osd[89966]: monclient: _check_auth_tickets
Dec 02 11:38:47 compute-0 ceph-osd[89966]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-12-02T11:38:16.209781+0000)
Dec 02 11:38:47 compute-0 ceph-osd[89966]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Dec 02 11:38:47 compute-0 ceph-osd[89966]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Dec 02 11:38:47 compute-0 ceph-osd[89966]: bluestore.MempoolThread(0x556dc938fb60) _resize_shards cache_size: 2845415832 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 3600166 data_alloc: 234881024 data_used: 22253568
Dec 02 11:38:47 compute-0 ceph-osd[89966]: monclient: tick
Dec 02 11:38:47 compute-0 ceph-osd[89966]: monclient: _check_auth_tickets
Dec 02 11:38:47 compute-0 ceph-osd[89966]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-12-02T11:38:17.209921+0000)
Dec 02 11:38:47 compute-0 ceph-osd[89966]: prioritycache tune_memory target: 4294967296 mapped: 199925760 unmapped: 33505280 heap: 233431040 old mem: 2845415832 new mem: 2845415832
Dec 02 11:38:47 compute-0 ceph-osd[89966]: do_command 'log dump' '{prefix=log dump}'

● ceph-95bc4eaa-1a14-59bf-acf2-4b3da055547d@osd.2.service - Ceph osd.2 for 95bc4eaa-1a14-59bf-acf2-4b3da055547d
     Loaded: loaded (/etc/systemd/system/ceph-95bc4eaa-1a14-59bf-acf2-4b3da055547d@.service; enabled; preset: disabled)
     Active: active (running) since Tue 2025-12-02 10:50:34 UTC; 48min ago
   Main PID: 91050 (conmon)
         IO: 520.8M read, 7.8G written
      Tasks: 60 (limit: 48628)
     Memory: 903.5M (peak: 1.0G)
        CPU: 51.021s
     CGroup: /system.slice/system-ceph\x2d95bc4eaa\x2d1a14\x2d59bf\x2dacf2\x2d4b3da055547d.slice/ceph-95bc4eaa-1a14-59bf-acf2-4b3da055547d@osd.2.service
             ├─libpod-payload-227e7141028a66a33b9f9b116360c43ad8422375cf0151e5d0bdde0e35411ed5
             │ ├─91052 /run/podman-init -- /usr/bin/ceph-osd -n osd.2 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ └─91055 /usr/bin/ceph-osd -n osd.2 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             └─runtime
               └─91050 /usr/bin/conmon --api-version 1 -c 227e7141028a66a33b9f9b116360c43ad8422375cf0151e5d0bdde0e35411ed5 -u 227e7141028a66a33b9f9b116360c43ad8422375cf0151e5d0bdde0e35411ed5 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/227e7141028a66a33b9f9b116360c43ad8422375cf0151e5d0bdde0e35411ed5/userdata -p /run/containers/storage/overlay-containers/227e7141028a66a33b9f9b116360c43ad8422375cf0151e5d0bdde0e35411ed5/userdata/pidfile -n ceph-95bc4eaa-1a14-59bf-acf2-4b3da055547d-osd-2 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/227e7141028a66a33b9f9b116360c43ad8422375cf0151e5d0bdde0e35411ed5 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/227e7141028a66a33b9f9b116360c43ad8422375cf0151e5d0bdde0e35411ed5/userdata/oci-log --conmon-pidfile /run/ceph-95bc4eaa-1a14-59bf-acf2-4b3da055547d@osd.2.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 227e7141028a66a33b9f9b116360c43ad8422375cf0151e5d0bdde0e35411ed5

Dec 02 11:38:42 compute-0 ceph-osd[91055]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-12-02T11:38:10.553195+0000)
Dec 02 11:38:42 compute-0 ceph-osd[91055]: prioritycache tune_memory target: 4294967296 mapped: 195059712 unmapped: 35053568 heap: 230113280 old mem: 2845415832 new mem: 2845415832
Dec 02 11:38:42 compute-0 ceph-osd[91055]: monclient: tick
Dec 02 11:38:42 compute-0 ceph-osd[91055]: monclient: _check_auth_tickets
Dec 02 11:38:42 compute-0 ceph-osd[91055]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-12-02T11:38:11.553349+0000)
Dec 02 11:38:42 compute-0 ceph-osd[91055]: prioritycache tune_memory target: 4294967296 mapped: 195076096 unmapped: 35037184 heap: 230113280 old mem: 2845415832 new mem: 2845415832
Dec 02 11:38:42 compute-0 ceph-osd[91055]: monclient: tick
Dec 02 11:38:42 compute-0 ceph-osd[91055]: monclient: _check_auth_tickets
Dec 02 11:38:42 compute-0 ceph-osd[91055]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-12-02T11:38:12.553465+0000)
Dec 02 11:38:42 compute-0 ceph-osd[91055]: do_command 'log dump' '{prefix=log dump}'

● ceph-95bc4eaa-1a14-59bf-acf2-4b3da055547d@rgw.rgw.compute-0.ssuoka.service - Ceph rgw.rgw.compute-0.ssuoka for 95bc4eaa-1a14-59bf-acf2-4b3da055547d
     Loaded: loaded (/etc/systemd/system/ceph-95bc4eaa-1a14-59bf-acf2-4b3da055547d@.service; enabled; preset: disabled)
     Active: active (running) since Tue 2025-12-02 10:51:48 UTC; 47min ago
   Main PID: 101147 (conmon)
         IO: 0B read, 522.0K written
      Tasks: 605 (limit: 48628)
     Memory: 98.9M (peak: 99.6M)
        CPU: 14.752s
     CGroup: /system.slice/system-ceph\x2d95bc4eaa\x2d1a14\x2d59bf\x2dacf2\x2d4b3da055547d.slice/ceph-95bc4eaa-1a14-59bf-acf2-4b3da055547d@rgw.rgw.compute-0.ssuoka.service
             ├─libpod-payload-ab2d5290b253be90bd5477bcee4a9bca9d7800a28bd9dd88c5ef94245f38483b
             │ ├─101149 /run/podman-init -- /usr/bin/radosgw -n client.rgw.rgw.compute-0.ssuoka -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ └─101151 /usr/bin/radosgw -n client.rgw.rgw.compute-0.ssuoka -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             └─runtime
               └─101147 /usr/bin/conmon --api-version 1 -c ab2d5290b253be90bd5477bcee4a9bca9d7800a28bd9dd88c5ef94245f38483b -u ab2d5290b253be90bd5477bcee4a9bca9d7800a28bd9dd88c5ef94245f38483b -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/ab2d5290b253be90bd5477bcee4a9bca9d7800a28bd9dd88c5ef94245f38483b/userdata -p /run/containers/storage/overlay-containers/ab2d5290b253be90bd5477bcee4a9bca9d7800a28bd9dd88c5ef94245f38483b/userdata/pidfile -n ceph-95bc4eaa-1a14-59bf-acf2-4b3da055547d-rgw-rgw-compute-0-ssuoka --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/ab2d5290b253be90bd5477bcee4a9bca9d7800a28bd9dd88c5ef94245f38483b --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/ab2d5290b253be90bd5477bcee4a9bca9d7800a28bd9dd88c5ef94245f38483b/userdata/oci-log --conmon-pidfile /run/ceph-95bc4eaa-1a14-59bf-acf2-4b3da055547d@rgw.rgw.compute-0.ssuoka.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg ab2d5290b253be90bd5477bcee4a9bca9d7800a28bd9dd88c5ef94245f38483b

Dec 02 10:51:49 compute-0 radosgw[101151]: framework conf key: endpoint, val: 192.168.122.100:8082
Dec 02 10:51:49 compute-0 radosgw[101151]: init_numa not setting numa affinity
Dec 02 10:51:59 compute-0 radosgw[101151]: LDAP not started since no server URIs were provided in the configuration.
Dec 02 10:51:59 compute-0 radosgw[101151]: framework: beast
Dec 02 10:51:59 compute-0 radosgw[101151]: framework conf key: ssl_certificate, val: config://rgw/cert/$realm/$zone.crt
Dec 02 10:51:59 compute-0 radosgw[101151]: framework conf key: ssl_private_key, val: config://rgw/cert/$realm/$zone.key
Dec 02 10:51:59 compute-0 ceph-95bc4eaa-1a14-59bf-acf2-4b3da055547d-rgw-rgw-compute-0-ssuoka[101147]: 2025-12-02T10:51:59.987+0000 7fc90f0e0940 -1 LDAP not started since no server URIs were provided in the configuration.
Dec 02 10:52:00 compute-0 radosgw[101151]: starting handler: beast
Dec 02 10:52:00 compute-0 radosgw[101151]: set uid:gid to 167:167 (ceph:ceph)
Dec 02 10:52:00 compute-0 radosgw[101151]: mgrc service_daemon_register rgw.14271 metadata {arch=x86_64,ceph_release=reef,ceph_version=ceph version 18.2.7 (6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad) reef (stable),ceph_version_short=18.2.7,container_hostname=compute-0,container_image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0,cpu=AMD EPYC-Rome Processor,distro=centos,distro_description=CentOS Stream 9,distro_version=9,frontend_config#0=beast endpoint=192.168.122.100:8082,frontend_type#0=beast,hostname=compute-0,id=rgw.compute-0.ssuoka,kernel_description=#1 SMP PREEMPT_DYNAMIC Fri Nov 28 14:01:17 UTC 2025,kernel_version=5.14.0-645.el9.x86_64,mem_swap_kb=1048572,mem_total_kb=7864320,num_handles=1,os=Linux,pid=2,realm_id=,realm_name=,zone_id=5cea1f27-8eab-4d02-a414-5e7ca61e7dc2,zone_name=default,zonegroup_id=3b343abf-7d33-41ab-8076-898b1837a7d4,zonegroup_name=default}

● ceph-osd-losetup-0.service - Ceph OSD losetup
     Loaded: loaded (/etc/systemd/system/ceph-osd-losetup-0.service; enabled; preset: disabled)
     Active: active (exited) since Tue 2025-12-02 10:47:52 UTC; 51min ago
   Main PID: 72511 (code=exited, status=0/SUCCESS)
        CPU: 31ms

Dec 02 10:47:52 compute-0 systemd[1]: Starting Ceph OSD losetup...
Dec 02 10:47:52 compute-0 bash[72512]: /dev/loop3: [64513]:4194940 (/var/lib/ceph-osd-0.img)
Dec 02 10:47:52 compute-0 systemd[1]: Finished Ceph OSD losetup.

● ceph-osd-losetup-1.service - Ceph OSD losetup
     Loaded: loaded (/etc/systemd/system/ceph-osd-losetup-1.service; enabled; preset: disabled)
     Active: active (exited) since Tue 2025-12-02 10:47:57 UTC; 51min ago
   Main PID: 72881 (code=exited, status=0/SUCCESS)
        CPU: 22ms

Dec 02 10:47:57 compute-0 systemd[1]: Starting Ceph OSD losetup...
Dec 02 10:47:57 compute-0 bash[72882]: /dev/loop4: [64513]:4327923 (/var/lib/ceph-osd-1.img)
Dec 02 10:47:57 compute-0 systemd[1]: Finished Ceph OSD losetup.

● ceph-osd-losetup-2.service - Ceph OSD losetup
     Loaded: loaded (/etc/systemd/system/ceph-osd-losetup-2.service; enabled; preset: disabled)
     Active: active (exited) since Tue 2025-12-02 10:48:02 UTC; 51min ago
   Main PID: 73251 (code=exited, status=0/SUCCESS)
        CPU: 26ms

Dec 02 10:48:02 compute-0 systemd[1]: Starting Ceph OSD losetup...
Dec 02 10:48:02 compute-0 bash[73252]: /dev/loop5: [64513]:4327932 (/var/lib/ceph-osd-2.img)
Dec 02 10:48:02 compute-0 systemd[1]: Finished Ceph OSD losetup.

● chronyd.service - NTP client/server
     Loaded: loaded (/usr/lib/systemd/system/chronyd.service; enabled; preset: enabled)
     Active: active (running) since Tue 2025-12-02 10:45:40 UTC; 53min ago
       Docs: man:chronyd(8)
             man:chrony.conf(5)
   Main PID: 58549 (chronyd)
         IO: 0B read, 0B written
      Tasks: 1 (limit: 48628)
     Memory: 1.0M (peak: 1.9M)
        CPU: 73ms
     CGroup: /system.slice/chronyd.service
             └─58549 /usr/sbin/chronyd -F 2

Dec 02 10:45:40 compute-0 systemd[1]: Starting NTP client/server...
Dec 02 10:45:40 compute-0 chronyd[58549]: chronyd version 4.8 starting (+CMDMON +REFCLOCK +RTC +PRIVDROP +SCFILTER +SIGND +NTS +SECHASH +IPV6 +DEBUG)
Dec 02 10:45:40 compute-0 chronyd[58549]: Frequency -26.278 +/- 0.172 ppm read from /var/lib/chrony/drift
Dec 02 10:45:40 compute-0 chronyd[58549]: Loaded seccomp filter (level 2)
Dec 02 10:45:40 compute-0 systemd[1]: Started NTP client/server.
Dec 02 10:47:51 compute-0 chronyd[58549]: Selected source 167.160.187.12 (pool.ntp.org)

● cloud-config.service - Cloud-init: Config Stage
     Loaded: loaded (/usr/lib/systemd/system/cloud-config.service; enabled; preset: disabled)
     Active: active (exited) since Tue 2025-12-02 10:03:36 UTC; 1h 35min ago
   Main PID: 1002 (code=exited, status=0/SUCCESS)
        CPU: 386ms

Dec 02 10:03:36 np0005542249.novalocal systemd[1]: Starting Cloud-init: Config Stage...
Dec 02 10:03:36 np0005542249.novalocal cloud-init[1141]: Cloud-init v. 24.4-7.el9 running 'modules:config' at Tue, 02 Dec 2025 10:03:36 +0000. Up 9.47 seconds.
Dec 02 10:03:36 np0005542249.novalocal systemd[1]: Finished Cloud-init: Config Stage.

● cloud-final.service - Cloud-init: Final Stage
     Loaded: loaded (/usr/lib/systemd/system/cloud-final.service; enabled; preset: disabled)
     Active: active (exited) since Tue 2025-12-02 10:03:37 UTC; 1h 35min ago
   Main PID: 1174 (code=exited, status=0/SUCCESS)
        CPU: 513ms

Dec 02 10:03:36 np0005542249.novalocal cloud-init[1241]: Cloud-init v. 24.4-7.el9 running 'modules:final' at Tue, 02 Dec 2025 10:03:36 +0000. Up 9.85 seconds.
Dec 02 10:03:37 np0005542249.novalocal cloud-init[1265]: #############################################################
Dec 02 10:03:37 np0005542249.novalocal cloud-init[1270]: -----BEGIN SSH HOST KEY FINGERPRINTS-----
Dec 02 10:03:37 np0005542249.novalocal cloud-init[1276]: 256 SHA256:wmhU5k2ybtyXd2dwxhCFS9OJIK+/yYqDCxrC0zWSyO4 root@np0005542249.novalocal (ECDSA)
Dec 02 10:03:37 np0005542249.novalocal cloud-init[1283]: 256 SHA256:hyYiJ+B5sdUEmgsRMw/xlW94xxmeo18QBtujmK7qfHo root@np0005542249.novalocal (ED25519)
Dec 02 10:03:37 np0005542249.novalocal cloud-init[1291]: 3072 SHA256:SU0Z+l8ZfpycDRm/9dAgy6ZHjpTyfKNxZaMolbBNvF8 root@np0005542249.novalocal (RSA)
Dec 02 10:03:37 np0005542249.novalocal cloud-init[1292]: -----END SSH HOST KEY FINGERPRINTS-----
Dec 02 10:03:37 np0005542249.novalocal cloud-init[1293]: #############################################################
Dec 02 10:03:37 np0005542249.novalocal cloud-init[1241]: Cloud-init v. 24.4-7.el9 finished at Tue, 02 Dec 2025 10:03:37 +0000. Datasource DataSourceConfigDrive [net,ver=2][source=/dev/sr0].  Up 10.18 seconds
Dec 02 10:03:37 np0005542249.novalocal systemd[1]: Finished Cloud-init: Final Stage.

● cloud-init-local.service - Cloud-init: Local Stage (pre-network)
     Loaded: loaded (/usr/lib/systemd/system/cloud-init-local.service; enabled; preset: disabled)
     Active: active (exited) since Tue 2025-12-02 10:03:34 UTC; 1h 35min ago
   Main PID: 777 (code=exited, status=0/SUCCESS)
        CPU: 710ms

Dec 02 10:03:32 localhost systemd[1]: Starting Cloud-init: Local Stage (pre-network)...
Dec 02 10:03:33 localhost cloud-init[839]: Cloud-init v. 24.4-7.el9 running 'init-local' at Tue, 02 Dec 2025 10:03:33 +0000. Up 6.61 seconds.
Dec 02 10:03:34 np0005542249.novalocal systemd[1]: Finished Cloud-init: Local Stage (pre-network).

● cloud-init.service - Cloud-init: Network Stage
     Loaded: loaded (/usr/lib/systemd/system/cloud-init.service; enabled; preset: disabled)
     Active: active (exited) since Tue 2025-12-02 10:03:36 UTC; 1h 35min ago
   Main PID: 889 (code=exited, status=0/SUCCESS)
        CPU: 1.342s

Dec 02 10:03:36 np0005542249.novalocal cloud-init[922]: |  B +.o o+       |
Dec 02 10:03:36 np0005542249.novalocal cloud-init[922]: |.. * .oo+o=      |
Dec 02 10:03:36 np0005542249.novalocal cloud-init[922]: |..o =.o+.X.      |
Dec 02 10:03:36 np0005542249.novalocal cloud-init[922]: | oo+oo+.S +      |
Dec 02 10:03:36 np0005542249.novalocal cloud-init[922]: |  .+.. + . .     |
Dec 02 10:03:36 np0005542249.novalocal cloud-init[922]: |     .  . .      |
Dec 02 10:03:36 np0005542249.novalocal cloud-init[922]: |.  E.    .       |
Dec 02 10:03:36 np0005542249.novalocal cloud-init[922]: |.==.             |
Dec 02 10:03:36 np0005542249.novalocal cloud-init[922]: +----[SHA256]-----+
Dec 02 10:03:36 np0005542249.novalocal systemd[1]: Finished Cloud-init: Network Stage.

○ cpupower.service - Configure CPU power related settings
     Loaded: loaded (/usr/lib/systemd/system/cpupower.service; disabled; preset: disabled)
     Active: inactive (dead)

● crond.service - Command Scheduler
     Loaded: loaded (/usr/lUnit display-manager.service could not be found.
ib/systemd/system/crond.service; enabled; preset: enabled)
     Active: active (running) since Tue 2025-12-02 10:03:36 UTC; 1h 35min ago
   Main PID: 1009 (crond)
         IO: 160.0K read, 8.0K written
      Tasks: 2 (limit: 48628)
     Memory: 1.6M (peak: 4.5M)
        CPU: 150ms
     CGroup: /system.slice/crond.service
             ├─  1009 /usr/sbin/crond -n
             └─162071 /usr/sbin/anacron -s

Dec 02 11:01:01 compute-0 anacron[162071]: Will run job `cron.daily' in 17 min.
Dec 02 11:01:01 compute-0 anacron[162071]: Will run job `cron.weekly' in 37 min.
Dec 02 11:01:01 compute-0 anacron[162071]: Will run job `cron.monthly' in 57 min.
Dec 02 11:01:01 compute-0 anacron[162071]: Jobs will be executed sequentially
Dec 02 11:01:01 compute-0 run-parts[162076]: (/etc/cron.hourly) finished 0anacron
Dec 02 11:01:01 compute-0 CROND[162047]: (root) CMDEND (run-parts /etc/cron.hourly)
Dec 02 11:18:01 compute-0 anacron[162071]: Job `cron.daily' started
Dec 02 11:18:01 compute-0 anacron[162071]: Job `cron.daily' terminated
Dec 02 11:38:01 compute-0 anacron[162071]: Job `cron.weekly' started
Dec 02 11:38:01 compute-0 anacron[162071]: Job `cron.weekly' terminated

● dbus-broker.service - D-Bus System Message Bus
     Loaded: loaded (/usr/lib/systemd/system/dbus-broker.service; enabled; preset: enabled)
     Active: active (running) since Tue 2025-12-02 10:03:32 UTC; 1h 35min ago
TriggeredBy: ● dbus.socket
       Docs: man:dbus-broker-launch(1)
   Main PID: 756 (dbus-broker-lau)
         IO: 0B read, 0B written
      Tasks: 2 (limit: 48628)
     Memory: 2.9M (peak: 3.9M)
        CPU: 8.150s
     CGroup: /system.slice/dbus-broker.service
             ├─756 /usr/bin/dbus-broker-launch --scope system --audit
             └─773 dbus-broker --log 4 --controller 9 --machine-id 1f988c78c563e12389ab342aced42dbb --max-bytes 536870912 --max-fds 4096 --max-matches 131072 --audit

Dec 02 10:43:09 compute-0 dbus-broker-launch[756]: Noticed file-system modification, trigger reload.
Dec 02 10:43:53 compute-0 dbus-broker-launch[773]: avc:  op=load_policy lsm=selinux seqno=9 res=1
Dec 02 10:44:02 compute-0 dbus-broker-launch[773]: avc:  op=load_policy lsm=selinux seqno=10 res=1
Dec 02 10:59:05 compute-0 dbus-broker-launch[773]: avc:  op=load_policy lsm=selinux seqno=11 res=1
Dec 02 11:02:43 compute-0 dbus-broker-launch[773]: avc:  op=load_policy lsm=selinux seqno=12 res=1
Dec 02 11:02:44 compute-0 dbus-broker-launch[773]: avc:  op=load_policy lsm=selinux seqno=13 res=1
Dec 02 11:03:44 compute-0 dbus-broker-launch[756]: Noticed file-system modification, trigger reload.
Dec 02 11:03:44 compute-0 dbus-broker-launch[773]: avc:  op=load_policy lsm=selinux seqno=14 res=1
Dec 02 11:03:44 compute-0 dbus-broker-launch[756]: Noticed file-system modification, trigger reload.
Dec 02 11:05:13 compute-0 dbus-broker-launch[773]: avc:  op=load_policy lsm=selinux seqno=15 res=1

○ dm-event.service - Device-mapper event daemon
     Loaded: loaded (/usr/lib/systemd/system/dm-event.service; static)
     Active: inactive (dead)
TriggeredBy: ● dm-event.socket
       Docs: man:dmeventd(8)

○ dnf-makecache.service - dnf makecache
     Loaded: loaded (/usr/lib/systemd/system/dnf-makecache.service; static)
     Active: inactive (dead) since Tue 2025-12-02 10:15:25 UTC; 1h 23min ago
TriggeredBy: ● dnf-makecache.timer
   Main PID: 9120 (code=exited, status=0/SUCCESS)
        CPU: 658ms

Dec 02 10:15:24 np0005542249.novalocal systemd[1]: Starting dnf makecache...
Dec 02 10:15:24 np0005542249.novalocal dnf[9120]: Failed determining last makecache time.
Dec 02 10:15:25 np0005542249.novalocal dnf[9120]: CentOS Stream 9 - BaseOS                         56 kB/s | 5.8 kB     00:00
Dec 02 10:15:25 np0005542249.novalocal dnf[9120]: CentOS Stream 9 - AppStream                      51 kB/s | 5.8 kB     00:00
Dec 02 10:15:25 np0005542249.novalocal dnf[9120]: CentOS Stream 9 - CRB                            61 kB/s | 5.7 kB     00:00
Dec 02 10:15:25 np0005542249.novalocal dnf[9120]: CentOS Stream 9 - Extras packages                80 kB/s | 8.1 kB     00:00
Dec 02 10:15:25 np0005542249.novalocal dnf[9120]: Metadata cache created.
Dec 02 10:15:25 np0005542249.novalocal systemd[1]: dnf-makecache.service: Deactivated successfully.
Dec 02 10:15:25 np0005542249.novalocal systemd[1]: Finished dnf makecache.

○ dracut-cmdline.service - dracut cmdline hook
     Loaded: loaded (/usr/lib/systemd/system/dracut-cmdline.service; static)
     Active: inactive (dead) since Tue 2025-12-02 10:03:31 UTC; 1h 35min ago
   Duration: 1.826s
       Docs: man:dracut-cmdline.service(8)
             man:dracut.bootup(7)
   Main PID: 326 (code=exited, status=0/SUCCESS)
        CPU: 130ms

Dec 02 10:03:29 localhost systemd[1]: Starting dracut cmdline hook...
Dec 02 10:03:29 localhost dracut-cmdline[326]: dracut-9 dracut-057-102.git20250818.el9
Dec 02 10:03:29 localhost dracut-cmdline[326]: Using kernel command line parameters:    BOOT_IMAGE=(hd0,msdos1)/boot/vmlinuz-5.14.0-645.el9.x86_64 root=UUID=b277050f-8ace-464d-abb6-4c46d4c45253 ro console=ttyS0,115200n8 no_timer_check net.ifnames=0 crashkernel=1G-2G:192M,2G-64G:256M,64G-:512M
Dec 02 10:03:29 localhost systemd[1]: Finished dracut cmdline hook.
Dec 02 10:03:31 localhost systemd[1]: dracut-cmdline.service: Deactivated successfully.
Dec 02 10:03:31 localhost systemd[1]: Stopped dracut cmdline hook.

○ dracut-initqueue.service - dracut initqueue hook
     Loaded: loaded (/usr/lib/systemd/system/dracut-initqueue.service; static)
     Active: inactive (dead) since Tue 2025-12-02 10:03:31 UTC; 1h 35min ago
   Duration: 838ms
       Docs: man:dracut-initqueue.service(8)
             man:dracut.bootup(7)
   Main PID: 489 (code=exited, status=0/SUCCESS)
        CPU: 41ms

Dec 02 10:03:29 localhost systemd[1]: Starting dracut initqueue hook...
Dec 02 10:03:30 localhost systemd[1]: Finished dracut initqueue hook.
Dec 02 10:03:31 localhost systemd[1]: dracut-initqueue.service: Deactivated successfully.
Dec 02 10:03:31 localhost systemd[1]: Stopped dracut initqueue hook.

○ dracut-mount.service - dracut mount hook
     Loaded: loaded (/usr/lib/systemd/system/dracut-mount.service; static)
     Active: inactive (dead) since Tue 2025-12-02 10:03:31 UTC; 1h 35min ago
   Duration: 149ms
       Docs: man:dracut-mount.service(8)
             man:dracut.bootup(7)
   Main PID: 571 (code=exited, status=0/SUCCESS)
        CPU: 12ms

Dec 02 10:03:31 localhost systemd[1]: Starting dracut mount hook...
Dec 02 10:03:31 localhost systemd[1]: Finished dracut mount hook.
Dec 02 10:03:31 localhost systemd[1]: dracut-mount.service: Deactivated successfully.
Dec 02 10:03:31 localhost systemd[1]: Stopped dracut mount hook.

○ dracut-pre-mount.service - dracut pre-mount hook
     Loaded: loaded (/usr/lib/systemd/system/dracut-pre-mount.service; static)
     Active: inactive (dead) since Tue 2025-12-02 10:03:31 UTC; 1h 35min ago
   Duration: 804ms
       Docs: man:dracut-pre-mount.service(8)
             man:dracut.bootup(7)
   Main PID: 548 (code=exited, status=0/SUCCESS)
        CPU: 9ms

Dec 02 10:03:30 localhost systemd[1]: Starting dracut pre-mount hook...
Dec 02 10:03:30 localhost systemd[1]: Finished dracut pre-mount hook.
Dec 02 10:03:31 localhost systemd[1]: dracut-pre-mount.service: Deactivated successfully.
Dec 02 10:03:31 localhost systemd[1]: Stopped dracut pre-mount hook.

○ dracut-pre-pivot.service - dracut pre-pivot and cleanup hook
     Loaded: loaded (/usr/lib/systemd/system/dracut-pre-pivot.service; static)
     Active: inactive (dead) since Tue 2025-12-02 10:03:31 UTC; 1h 35min ago
   Duration: 28ms
       Docs: man:dracut-pre-pivot.service(8)
             man:dracut.bootup(7)
   Main PID: 576 (code=exited, status=0/SUCCESS)
        CPU: 89ms

Dec 02 10:03:31 localhost systemd[1]: Starting dracut pre-pivot and cleanup hook...
Dec 02 10:03:31 localhost systemd[1]: Finished dracut pre-pivot and cleanup hook.
Dec 02 10:03:31 localhost systemd[1]: dracut-pre-pivot.service: Deactivated successfully.
Dec 02 10:03:31 localhost systemd[1]: Stopped dracut pre-pivot and cleanup hook.

○ dracut-pre-trigger.service - dracut pre-trigger hook
     Loaded: loaded (/usr/lib/systemd/system/dracut-pre-trigger.service; static)
     Active: inactive (dead) since Tue 2025-12-02 10:03:31 UTC; 1h 35min ago
   Duration: 1.404s
       Docs: man:dracut-pre-trigger.service(8)
             man:dracut.bootup(7)
   Main PID: 465 (code=exited, status=0/SUCCESS)
        CPU: 19ms

Dec 02 10:03:29 localhost systemd[1]: Starting dracut pre-trigger hook...
Dec 02 10:03:29 localhost systemd[1]: Finished dracut pre-trigger hook.
Dec 02 10:03:31 localhost systemd[1]: dracut-pre-trigger.service: Deactivated successfully.
Dec 02 10:03:31 localhost systemd[1]: Stopped dracut pre-trigger hook.

○ dracut-pre-udev.service - dracut pre-udev hook
     Loaded: loaded (/usr/lib/systemd/system/dracut-pre-udev.service; static)
     Active: inactive (dead) since Tue 2025-12-02 10:03:31 UTC; 1h 35min ago
   Duration: 1.534s
       Docs: man:dracut-pre-udev.service(8)
             man:dracut.bootup(7)
   Main PID: 415 (code=exited, status=0/SUCCESS)
        CPU: 291ms

Dec 02 10:03:29 localhost systemd[1]: Starting dracut pre-udev hook...
Dec 02 10:03:29 localhost rpc.statd[442]: Version 2.5.4 starting
Dec 02 10:03:29 localhost rpc.statd[442]: Initializing NSM state
Dec 02 10:03:29 localhost rpc.idmapd[447]: Setting log level to 0
Dec 02 10:03:29 localhost systemd[1]: Finished dracut pre-udev hook.
Dec 02 10:03:31 localhost rpc.idmapd[447]: exiting on signal 15
Dec 02 10:03:31 localhost systemd[1]: dracut-pre-udev.service: Deactivated successfully.
Dec 02 10:03:31 localhost systemd[1]: Stopped dracut pre-udev hook.

○ dracut-shutdown-onfailure.service - Service executing upon dracut-shutdown failure to perform cleanup
     Loaded: loaded (/usr/lib/systemd/system/dracut-shutdown-onfailure.service; static)
     Active: inactive (dead)
       Docs: man:dracut-shutdown.service(8)

● dracut-shutdown.service - Restore /run/initramfs on shutdown
     Loaded: loaded (/usr/lib/systemd/system/dracut-shutdown.service; static)
     Active: active (exited) since Tue 2025-12-02 10:03:33 UTC; 1h 35min ago
       Docs: man:dracut-shutdown.service(8)
   Main PID: 778 (code=exited, status=0/SUCCESS)
        CPU: 2ms

Dec 02 10:03:32 localhost systemd[1]: Starting Restore /run/initramfs on shutdown...
Dec 02 10:03:33 localhost systemd[1]: Finished Restore /run/initramfs on shutdown.

● edpm-container-shutdown.service - EDPM Container Shutdown
     Loaded: loaded (/etc/systemd/system/edpm-container-shutdown.service; enabled; preset: enabled)
     Active: active (exited) since Tue 2025-12-02 10:46:09 UTC; 53min ago
       Docs: https://github.com/openstack-k8s-operators/docs
   Main PID: 61543 (code=exited, status=0/SUCCESS)
        CPU: 4ms

Dec 02 10:46:09 compute-0 systemd[1]: Starting EDPM Container Shutdown...
Dec 02 10:46:09 compute-0 systemd[1]: Finished EDPM Container Shutdown.

○ edpm_libvirt_guests.service - Suspend libvirt Guests in edpm
     Loaded: loaded (/etc/systemd/system/edpm_libvirt_guests.service; enabled; preset: disabled)
     Active: inactive (dead)
       Docs: man:libvirtd(8)
             https://libvirt.org

● edpm_multipathd.service - multipathd container
     Loaded: loaded (/etc/systemd/system/edpm_multipathd.service; enabled; preset: disabled)
     Active: active (running) since Tue 2025-12-02 11:07:36 UTC; 31min ago
   Main PID: 236122 (conmon)
         IO: 0B read, 111.5K written
      Tasks: 1 (limit: 48628)
     Memory: 668.0K (peak: 17.6M)
        CPU: 141ms
     CGroup: /system.slice/edpm_multipathd.service
             └─236122 /usr/bin/conmon --api-version 1 -c 130400eaf961ceaaa203e2cc0a5af0fe03396f0667ec510c1291b1ee03bff193 -u 130400eaf961ceaaa203e2cc0a5af0fe03396f0667ec510c1291b1ee03bff193 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/130400eaf961ceaaa203e2cc0a5af0fe03396f0667ec510c1291b1ee03bff193/userdata -p /run/containers/storage/overlay-containers/130400eaf961ceaaa203e2cc0a5af0fe03396f0667ec510c1291b1ee03bff193/userdata/pidfile -n multipathd --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/130400eaf961ceaaa203e2cc0a5af0fe03396f0667ec510c1291b1ee03bff193 --full-attach -s -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/130400eaf961ceaaa203e2cc0a5af0fe03396f0667ec510c1291b1ee03bff193/userdata/oci-log --conmon-pidfile /run/multipathd.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg 130400eaf961ceaaa203e2cc0a5af0fe03396f0667ec510c1291b1ee03bff193

Dec 02 11:07:36 compute-0 multipathd[236122]: + sudo kolla_copy_cacerts
Dec 02 11:07:36 compute-0 multipathd[236122]: + [[ ! -n '' ]]
Dec 02 11:07:36 compute-0 multipathd[236122]: + . kolla_extend_start
Dec 02 11:07:36 compute-0 multipathd[236122]: + echo 'Running command: '\''/usr/sbin/multipathd -d'\'''
Dec 02 11:07:36 compute-0 multipathd[236122]: Running command: '/usr/sbin/multipathd -d'
Dec 02 11:07:36 compute-0 multipathd[236122]: + umask 0022
Dec 02 11:07:36 compute-0 multipathd[236122]: + exec /usr/sbin/multipathd -d
Dec 02 11:07:36 compute-0 multipathd[236122]: 3849.709578 | --------start up--------
Dec 02 11:07:36 compute-0 multipathd[236122]: 3849.709601 | read /etc/multipath.conf
Dec 02 11:07:36 compute-0 multipathd[236122]: 3849.717984 | path checkers start up

● edpm_nova_compute.service - nova_compute container
     Loaded: loaded (/etc/systemd/system/edpm_nova_compute.service; enabled; preset: disabled)
     Active: active (running) since Tue 2025-12-02 11:09:26 UTC; 29min ago
    Process: 254885 ExecStart=/usr/bin/podman start nova_compute (code=exited, status=0/SUCCESS)
   Main PID: 254900 (conmon)
         IO: 0B read, 96.0K written
      Tasks: 1 (limit: 48628)
     Memory: 676.0K (peak: 17.3M)
        CPU: 1.435s
     CGroup: /system.slice/edpm_nova_compute.service
             └─254900 /usr/bin/conmon --api-version 1 -c 5fbc74cebe070d8cc77fb5ba95cab60fe5a6a788996a0004958af316ccf471ad -u 5fbc74cebe070d8cc77fb5ba95cab60fe5a6a788996a0004958af316ccf471ad -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/5fbc74cebe070d8cc77fb5ba95cab60fe5a6a788996a0004958af316ccf471ad/userdata -p /run/containers/storage/overlay-containers/5fbc74cebe070d8cc77fb5ba95cab60fe5a6a788996a0004958af316ccf471ad/userdata/pidfile -n nova_compute --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/5fbc74cebe070d8cc77fb5ba95cab60fe5a6a788996a0004958af316ccf471ad --full-attach -s -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/5fbc74cebe070d8cc77fb5ba95cab60fe5a6a788996a0004958af316ccf471ad/userdata/oci-log --conmon-pidfile /run/nova_compute.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg 5fbc74cebe070d8cc77fb5ba95cab60fe5a6a788996a0004958af316ccf471ad

Dec 02 11:39:00 compute-0 nova_compute[254900]: 2025-12-02 11:39:00.693 254904 DEBUG nova.compute.resource_tracker [None req-ace0d83d-922f-4e7c-880c-577b02d195a4 - - - - - -] Compute_service record updated for compute-0.ctlplane.example.com:compute-0.ctlplane.example.com _update_available_resource /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:995[00m
Dec 02 11:39:00 compute-0 nova_compute[254900]: 2025-12-02 11:39:00.693 254904 DEBUG oslo_concurrency.lockutils [None req-ace0d83d-922f-4e7c-880c-577b02d195a4 - - - - - -] Lock "compute_resources" "released" by "nova.compute.resource_tracker.ResourceTracker._update_available_resource" :: held 0.611s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423[00m
Dec 02 11:39:03 compute-0 nova_compute[254900]: 2025-12-02 11:39:03.692 254904 DEBUG oslo_service.periodic_task [None req-ace0d83d-922f-4e7c-880c-577b02d195a4 - - - - - -] Running periodic task ComputeManager._check_instance_build_time run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210[00m
Dec 02 11:39:03 compute-0 nova_compute[254900]: 2025-12-02 11:39:03.693 254904 DEBUG oslo_service.periodic_task [None req-ace0d83d-922f-4e7c-880c-577b02d195a4 - - - - - -] Running periodic task ComputeManager._poll_rescued_instances run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210[00m
Dec 02 11:39:03 compute-0 nova_compute[254900]: 2025-12-02 11:39:03.694 254904 DEBUG oslo_service.periodic_task [None req-ace0d83d-922f-4e7c-880c-577b02d195a4 - - - - - -] Running periodic task ComputeManager._poll_unconfirmed_resizes run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210[00m
Dec 02 11:39:03 compute-0 nova_compute[254900]: 2025-12-02 11:39:03.694 254904 DEBUG oslo_service.periodic_task [None req-ace0d83d-922f-4e7c-880c-577b02d195a4 - - - - - -] Running periodic task ComputeManager._instance_usage_audit run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210[00m
Dec 02 11:39:04 compute-0 nova_compute[254900]: 2025-12-02 11:39:04.123 254904 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 25 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263[00m
Dec 02 11:39:05 compute-0 nova_compute[254900]: 2025-12-02 11:39:05.316 254904 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 25 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263[00m
Dec 02 11:39:09 compute-0 nova_compute[254900]: 2025-12-02 11:39:09.166 254904 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 25 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263[00m
Dec 02 11:39:10 compute-0 nova_compute[254900]: 2025-12-02 11:39:10.364 254904 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 25 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263[00m

● edpm_ovn_controller.service - ovn_controller container
     Loaded: loaded (/etc/systemd/system/edpm_ovn_controller.service; enabled; preset: disabled)
     Active: active (running) since Tue 2025-12-02 11:00:09 UTC; 39min ago
   Main PID: 153849 (conmon)
         IO: 0B read, 145.0K written
      Tasks: 1 (limit: 48628)
     Memory: 688.0K (peak: 17.6M)
        CPU: 411ms
     CGroup: /system.slice/edpm_ovn_controller.service
             └─153849 /usr/bin/conmon --api-version 1 -c 5c31229430d6adbfb6e358463fdefc47061e3db1d274ed40ca82e25890f29998 -u 5c31229430d6adbfb6e358463fdefc47061e3db1d274ed40ca82e25890f29998 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/5c31229430d6adbfb6e358463fdefc47061e3db1d274ed40ca82e25890f29998/userdata -p /run/containers/storage/overlay-containers/5c31229430d6adbfb6e358463fdefc47061e3db1d274ed40ca82e25890f29998/userdata/pidfile -n ovn_controller --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/5c31229430d6adbfb6e358463fdefc47061e3db1d274ed40ca82e25890f29998 --full-attach -s -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/5c31229430d6adbfb6e358463fdefc47061e3db1d274ed40ca82e25890f29998/userdata/oci-log --conmon-pidfile /run/ovn_controller.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg 5c31229430d6adbfb6e358463fdefc47061e3db1d274ed40ca82e25890f29998

Dec 02 11:36:15 compute-0 ovn_controller[153849]: 2025-12-02T11:36:15Z|00075|pinctrl(ovn_pinctrl0)|INFO|DHCPNAK fa:16:3e:52:3c:ac 10.100.0.3
Dec 02 11:36:20 compute-0 ovn_controller[153849]: 2025-12-02T11:36:20Z|00076|pinctrl(ovn_pinctrl0)|WARN|DHCPREQUEST requested IP 10.100.0.14 does not match offer 10.100.0.3
Dec 02 11:36:20 compute-0 ovn_controller[153849]: 2025-12-02T11:36:20Z|00077|pinctrl(ovn_pinctrl0)|INFO|DHCPNAK fa:16:3e:52:3c:ac 10.100.0.3
Dec 02 11:36:20 compute-0 ovn_controller[153849]: 2025-12-02T11:36:20Z|00078|pinctrl(ovn_pinctrl0)|INFO|DHCPOFFER fa:16:3e:52:3c:ac 10.100.0.3
Dec 02 11:36:20 compute-0 ovn_controller[153849]: 2025-12-02T11:36:20Z|00079|pinctrl(ovn_pinctrl0)|INFO|DHCPACK fa:16:3e:52:3c:ac 10.100.0.3
Dec 02 11:36:35 compute-0 ovn_controller[153849]: 2025-12-02T11:36:35Z|00284|memory_trim|INFO|Detected inactivity (last active 30001 ms ago): trimming memory
Dec 02 11:36:38 compute-0 ovn_controller[153849]: 2025-12-02T11:36:38Z|00285|binding|INFO|Releasing lport ac73987a-aa98-40fc-a185-3eb1a23885de from this chassis (sb_readonly=0)
Dec 02 11:36:38 compute-0 ovn_controller[153849]: 2025-12-02T11:36:38Z|00286|binding|INFO|Setting lport ac73987a-aa98-40fc-a185-3eb1a23885de down in Southbound
Dec 02 11:36:38 compute-0 ovn_controller[153849]: 2025-12-02T11:36:38Z|00287|binding|INFO|Removing iface tapac73987a-aa ovn-installed in OVS
Dec 02 11:37:30 compute-0 ovn_controller[153849]: 2025-12-02T11:37:30Z|00288|memory_trim|INFO|Detected inactivity (last active 30003 ms ago): trimming memory

● edpm_ovn_metadata_agent.service - ovn_metadata_agent container
     Loaded: loaded (/etc/systemd/system/edpm_ovn_metadata_agent.service; enabled; preset: disabled)
     Active: active (running) since Tue 2025-12-02 11:01:18 UTC; 37min ago
   Main PID: 163733 (conmon)
         IO: 0B read, 111.0K written
      Tasks: 1 (limit: 48628)
     Memory: 712.0K (peak: 19.1M)
        CPU: 541ms
     CGroup: /system.slice/edpm_ovn_metadata_agent.service
             └─163733 /usr/bin/conmon --api-version 1 -c 301660b5961629ac564857138dcba46d0947a2a1c7d3debbb9f5976c1df04193 -u 301660b5961629ac564857138dcba46d0947a2a1c7d3debbb9f5976c1df04193 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/301660b5961629ac564857138dcba46d0947a2a1c7d3debbb9f5976c1df04193/userdata -p /run/containers/storage/overlay-containers/301660b5961629ac564857138dcba46d0947a2a1c7d3debbb9f5976c1df04193/userdata/pidfile -n ovn_metadata_agent --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/301660b5961629ac564857138dcba46d0947a2a1c7d3debbb9f5976c1df04193 --full-attach -s -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/301660b5961629ac564857138dcba46d0947a2a1c7d3debbb9f5976c1df04193/userdata/oci-log --conmon-pidfile /run/ovn_metadata_agent.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg 301660b5961629ac564857138dcba46d0947a2a1c7d3debbb9f5976c1df04193

Dec 02 11:36:39 compute-0 ovn_metadata_agent[163733]: 2025-12-02 11:36:39.213 262398 DEBUG oslo.privsep.daemon [-] privsep: reply[c135dfd8-fa05-45c6-ae6c-49b2c726d32c]: (4, True) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501[00m
Dec 02 11:36:39 compute-0 ovn_metadata_agent[163733]: 2025-12-02 11:36:39.229 262398 DEBUG oslo.privsep.daemon [-] privsep: reply[c1b4b6d3-ef1f-467c-8a73-e3e271cc1724]: (4, [{'family': 0, '__align': (), 'ifi_type': 772, 'index': 1, 'flags': 65609, 'change': 0, 'attrs': [['IFLA_IFNAME', 'lo'], ['IFLA_TXQLEN', 1000], ['IFLA_OPERSTATE', 'UNKNOWN'], ['IFLA_LINKMODE', 0], ['IFLA_MTU', 65536], ['IFLA_MIN_MTU', 0], ['IFLA_MAX_MTU', 0], ['IFLA_GROUP', 0], ['IFLA_PROMISCUITY', 0], ['UNKNOWN', {'header': {'length': 8, 'type': 61}}], ['IFLA_NUM_TX_QUEUES', 1], ['IFLA_GSO_MAX_SEGS', 65535], ['IFLA_GSO_MAX_SIZE', 65536], ['IFLA_GRO_MAX_SIZE', 65536], ['UNKNOWN', {'header': {'length': 8, 'type': 63}}], ['UNKNOWN', {'header': {'length': 8, 'type': 64}}], ['IFLA_TSO_MAX_SIZE', 524280], ['IFLA_TSO_MAX_SEGS', 65535], ['UNKNOWN', {'header': {'length': 8, 'type': 66}}], ['IFLA_NUM_RX_QUEUES', 1], ['IFLA_CARRIER', 1], ['IFLA_CARRIER_CHANGES', 0], ['IFLA_CARRIER_UP_COUNT', 0], ['IFLA_CARRIER_DOWN_COUNT', 0], ['IFLA_PROTO_DOWN', 0], ['IFLA_ADDRESS', '00:00:00:00:00:00'], ['IFLA_BROADCAST', '00:00:00:00:00:00'], ['IFLA_STATS64', {'rx_packets': 1, 'tx_packets': 1, 'rx_bytes': 28, 'tx_bytes': 28, 'rx_errors': 0, 'tx_errors': 0, 'rx_dropped': 0, 'tx_dropped': 0, 'multicast': 0, 'collisions': 0, 'rx_length_errors': 0, 'rx_over_errors': 0, 'rx_crc_errors': 0, 'rx_frame_errors': 0, 'rx_fifo_errors': 0, 'rx_missed_errors': 0, 'tx_aborted_errors': 0, 'tx_carrier_errors': 0, 'tx_fifo_errors': 0, 'tx_heartbeat_errors': 0, 'tx_window_errors': 0, 'rx_compressed': 0, 'tx_compressed': 0}], ['IFLA_STATS', {'rx_packets': 1, 'tx_packets': 1, 'rx_bytes': 28, 'tx_bytes': 28, 'rx_errors': 0, 'tx_errors': 0, 'rx_dropped': 0, 'tx_dropped': 0, 'multicast': 0, 'collisions': 0, 'rx_length_errors': 0, 'rx_over_errors': 0, 'rx_crc_errors': 0, 'rx_frame_errors': 0, 'rx_fifo_errors': 0, 'rx_missed_errors': 0, 'tx_aborted_errors': 0, 'tx_carrier_errors': 0, 'tx_fifo_errors': 0, 'tx_heartbeat_errors': 0, 'tx_window_errors': 0, 'rx_compressed': 0, 'tx_compressed': 0}], ['IFLA_XDP', {'attrs': [['IFLA_XDP_ATTACHED', None]]}], ['IFLA_QDISC', 'noqueue'], ['IFLA_AF_SPEC', {'attrs': [['AF_INET', {'dummy': 65668, 'forwarding': 1, 'mc_forwarding': 0, 'proxy_arp': 0, 'accept_redirects': 0, 'secure_redirects': 0, 'send_redirects': 0, 'shared_media': 1, 'rp_filter': 1, 'accept_source_route': 0, 'bootp_relay': 0, 'log_martians': 0, 'tag': 0, 'arpfilter': 0, 'medium_id': 0, 'noxfrm': 1, 'nopolicy': 1, 'force_igmp_version': 0, 'arp_announce': 0, 'arp_ignore': 0, 'promote_secondaries': 1, 'arp_accept': 0, 'arp_notify': 0, 'accept_local': 0, 'src_vmark': 0, 'proxy_arp_pvlan': 0, 'route_localnet': 0, 'igmpv2_unsolicited_report_interval': 10000, 'igmpv3_unsolicited_report_interval': 1000}], ['AF_INET6', {'attrs': [['IFLA_INET6_FLAGS', 2147483648], ['IFLA_INET6_CACHEINFO', {'max_reasm_len': 65535, 'tstamp': 555227, 'reachable_time': 24433, 'retrans_time': 1000}], ['IFLA_INET6_CONF', {'forwarding': 0, 'hop_limit': 64, 'mtu': 65536, 'accept_ra': 1, 'accept_redirects': 1, 'autoconf': 1, 'dad_transmits': 1, 'router_solicitations': 4294967295, 'router_solicitation_interval': 4000, 'router_solicitation_delay': 1000, 'use_tempaddr': 4294967295, 'temp_valid_lft': 604800, 'temp_preferred_lft': 86400, 'regen_max_retry': 3, 'max_desync_factor': 600, 'max_addresses': 16, 'force_mld_version': 0, 'accept_ra_defrtr': 1, 'accept_ra_pinfo': 1, 'accept_ra_rtr_pref': 1, 'router_probe_interval': 60000, 'accept_ra_rt_info_max_plen': 0, 'proxy_ndp': 0, 'optimistic_dad': 0, 'accept_source_route': 0, 'mc_forwarding': 0, 'disable_ipv6': 0, 'accept_dad': 4294967295, 'force_tllao': 0, 'ndisc_notify': 0}], ['IFLA_INET6_STATS', {'num': 38, 'inpkts': 0, 'inoctets': 0, 'indelivers': 0, 'outforwdatagrams': 0, 'outpkts': 0, 'outoctets': 0, 'inhdrerrors': 0, 'intoobigerrors': 0, 'innoroutes': 0, 'inaddrerrors': 0, 'inunknownprotos': 0, 'intruncatedpkts': 0, 'indiscards': 0, 'outdiscards': 0, 'outnoroutes': 0, 'reasmtimeout': 0, 'reasmreqds': 0, 'reasmoks': 0, 'reasmfails': 0, 'fragoks': 0, 'fragfails': 0, 'fragcreates': 0, 'inmcastpkts': 0, 'outmcastpkts': 0, 'inbcastpkts': 0, 'outbcastpkts': 0, 'inmcastoctets': 0, 'outmcastoctets': 0, 'inbcastoctets': 0, 'outbcastoctets': 0, 'csumerrors': 0, 'noectpkts': 0, 'ect1pkts': 0, 'ect0pkts': 0, 'cepkts': 0}], ['IFLA_INET6_ICMP6STATS', {'num': 7, 'inmsgs': 0, 'inerrors': 0, 'outmsgs': 0, 'outerrors': 0, 'csumerrors': 0}], ['IFLA_INET6_TOKEN', '::'], ['IFLA_INET6_ADDR_GEN_MODE', 0]]}]]}], ['IFLA_MAP', {'mem_start': 0, 'mem_end': 0, 'base_addr': 0, 'irq': 0, 'dma': 0, 'port': 0}], ['UNKNOWN', {'header': {'length': 4, 'type': 32830}}], ['UNKNOWN', {'header': {'length': 4, 'type': 32833}}]], 'header': {'length': 1404, 'type': 16, 'flags': 2, 'sequence_number': 255, 'pid': 299020, 'error': None, 'target': 'ovnmeta-28b69a92-5b45-421b-9985-afeebc6820aa', 'stats': (0, 0, 0)}, 'state': 'up', 'event': 'RTM_NEWLINK'}]) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501[00m
Dec 02 11:36:39 compute-0 ovn_metadata_agent[163733]: 2025-12-02 11:36:39.234 164036 DEBUG neutron.privileged.agent.linux.ip_lib [-] Namespace ovnmeta-28b69a92-5b45-421b-9985-afeebc6820aa deleted. remove_netns /usr/lib/python3.9/site-packages/neutron/privileged/agent/linux/ip_lib.py:607[00m
Dec 02 11:36:39 compute-0 ovn_metadata_agent[163733]: 2025-12-02 11:36:39.234 164036 DEBUG oslo.privsep.daemon [-] privsep: reply[7970aee9-e5aa-41aa-bd77-81e8dd22c0ca]: (4, None) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501[00m
Dec 02 11:37:19 compute-0 ovn_metadata_agent[163733]: 2025-12-02 11:37:19.852 163757 DEBUG oslo_concurrency.lockutils [-] Acquiring lock "_check_child_processes" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" inner /usr/lib/python3.9/Unit hv_kvp_daemon.service could not be found.
site-packages/oslo_concurrency/lockutils.py:404[00m
Dec 02 11:37:19 compute-0 ovn_metadata_agent[163733]: 2025-12-02 11:37:19.853 163757 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" acquired by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409[00m
Dec 02 11:37:19 compute-0 ovn_metadata_agent[163733]: 2025-12-02 11:37:19.853 163757 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" "released" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: held 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423[00m
Dec 02 11:38:19 compute-0 ovn_metadata_agent[163733]: 2025-12-02 11:38:19.854 163757 DEBUG oslo_concurrency.lockutils [-] Acquiring lock "_check_child_processes" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404[00m
Dec 02 11:38:19 compute-0 ovn_metadata_agent[163733]: 2025-12-02 11:38:19.854 163757 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" acquired by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409[00m
Dec 02 11:38:19 compute-0 ovn_metadata_agent[163733]: 2025-12-02 11:38:19.854 163757 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" "released" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: held 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423[00m

○ emergency.service - Emergency Shell
     Loaded: loaded (/usr/lib/systemd/system/emergency.service; static)
     Active: inactive (dead)
       Docs: man:sulogin(8)

● getty@tty1.service - Getty on tty1
     Loaded: loaded (/usr/lib/systemd/system/getty@.service; enabled; preset: enabled)
     Active: active (running) since Tue 2025-12-02 10:03:36 UTC; 1h 35min ago
       Docs: man:agetty(8)
             man:systemd-getty-generator(8)
             http://0pointer.de/blog/projects/serial-console.html
   Main PID: 1010 (agetty)
         IO: 0B read, 0B written
      Tasks: 1 (limit: 48628)
     Memory: 220.0K (peak: 440.0K)
        CPU: 9ms
     CGroup: /system.slice/system-getty.slice/getty@tty1.service
             └─1010 /sbin/agetty -o "-p -- \\u" --noclear - linux

Dec 02 10:03:36 np0005542249.novalocal systemd[1]: Started Getty on tty1.

● gssproxy.service - GSSAPI Proxy Daemon
     Loaded: loaded (/usr/lib/systemd/system/gssproxy.service; disabled; preset: disabled)
     Active: active (running) since Tue 2025-12-02 10:03:34 UTC; 1h 35min ago
   Main PID: 870 (gssproxy)
         IO: 0B read, 0B written
      Tasks: 6 (limit: 48628)
     Memory: 1.8M (peak: 2.8M)
        CPU: 17ms
     CGroup: /system.slice/gssproxy.service
             └─870 /usr/sbin/gssproxy -D

Dec 02 10:03:34 np0005542249.novalocal systemd[1]: Starting GSSAPI Proxy Daemon...
Dec 02 10:03:34 np0005542249.novalocal systemd[1]: Started GSSAPI Proxy Daemon.

○ import-state.service - Import network configuration from initramfs
     Loaded: loaded (/usr/lib/systemd/system/import-state.service; enabled; preset: enabled)
     Active: inactive (dead)

○ initrd-cleanup.service - Cleaning Up and Shutting Down Daemons
     Loaded: loaded (/usr/lib/systemd/system/initrd-cleanup.service; static)
     Active: inactive (dead) since Tue 2025-12-02 10:03:31 UTC; 1h 35min ago
   Main PID: 619 (code=exited, status=0/SUCCESS)
        CPU: 9ms

Dec 02 10:03:31 localhost systemd[1]: Starting Cleaning Up and Shutting Down Daemons...
Dec 02 10:03:31 localhost systemd[1]: initrd-cleanup.service: Deactivated successfully.
Dec 02 10:03:31 localhost systemd[1]: Finished Cleaning Up and Shutting Down Daemons.

○ initrd-parse-etc.service - Mountpoints Configured in the Real Root
     Loaded: loaded (/usr/lib/systemd/system/initrd-parse-etc.service; static)
     Active: inactive (dead) since Tue 2025-12-02 10:03:31 UTC; 1h 35min ago
   Main PID: 569 (code=exited, status=0/SUCCESS)
        CPU: 13ms

Dec 02 10:03:31 localhost systemd[1]: Starting Mountpoints Configured in the Real Root...
Dec 02 10:03:31 localhost systemd[1]: initrd-parse-etc.service: Deactivated successfully.
Dec 02 10:03:31 localhost systemd[1]: Finished Mountpoints Configured in the Real Root.

○ initrd-switch-root.service - Switch Root
     Loaded: loaded (/usr/lib/systemd/system/initrd-switch-root.service; static)
     Active: inactive (dead) since Tue 2025-12-02 10:03:31 UTC; 1h 35min ago
   Main PID: 623 (code=exited, status=0/SUCCESS)
        CPU: 7ms

Dec 02 10:03:31 localhost systemd[1]: Starting Switch Root...

○ initrd-udevadm-cleanup-db.service - Cleanup udev Database
     Loaded: loaded (/usr/lib/systemd/system/initrd-udevadm-cleanup-db.service; static)
     Active: inactive (dead) since Tue 2025-12-02 10:03:31 UTC; 1h 35min ago
   Main PID: 621 (code=exited, status=0/SUCCESS)
        CPU: 9ms

Dec 02 10:03:31 localhost systemd[1]: Starting Cleanup udev Database...
Dec 02 10:03:31 localhost systemd[1]: initrd-udevadm-cleanup-db.service: Deactivated successfully.
Dec 02 10:03:31 localhost systemd[1]: Finished Cleanup udev Database.

○ ip6tables.service - IPv6 firewall with ip6tables
     Loaded: loaded (/usr/lib/systemd/system/ip6tables.service; disabled; preset: disabled)
     Active: inactive (dead)

○ iptables.service - IPv4 firewall with iptables
     Loaded: loaded (/usr/lib/systemd/system/iptables.service; disabled; preset: disabled)
     Active: inactive (dead) since Tue 2025-12-02 10:46:19 UTC; 52min ago
   Duration: 42min 45.515s
   Main PID: 780 (code=exited, status=0/SUCCESS)
        CPU: 128ms

Dec 02 10:03:32 localhost systemd[1]: Starting IPv4 firewall with iptables...
Dec 02 10:03:33 localhost iptables.init[780]: iptables: Applying firewall rules: [  OK  ]
Dec 02 10:03:33 localhost systemd[1]: Finished IPv4 firewall with iptables.
Dec 02 10:46:18 compute-0 systemd[1]: Stopping IPv4 firewall with iptables...
Dec 02 10:46:19 compute-0 iptables.init[62792]: iptables: Setting chains to policy ACCEPT: raw mangle filter nat [  OK  ]
Dec 02 10:46:19 compute-0 iptables.init[62792]: iptables: Flushing firewall rules: [  OK  ]
Dec 02 10:46:19 compute-0 systemd[1]: iptables.service: Deactivated successfully.
Dec 02 10:46:19 compute-0 systemd[1]: Stopped IPv4 firewall with iptables.

● irqbalance.service - irqbalance daemon
     Loaded: loaded (/usr/lib/systemd/system/irqbalance.service; enabled; preset: enabled)
     Active: active (running) since Tue 2025-12-02 10:03:32 UTC; 1h 35min ago
       Docs: man:irqbalance(1)
             https://github.com/Irqbalance/irqbalance
   Main PID: 782 (irqbalance)
         IO: 0B read, 0B written
      Tasks: 2 (limit: 48628)
     Memory: 1.1M (peak: 1.5M)
        CPU: 584ms
     CGroup: /system.slice/irqbalance.service
             └─782 /usr/sbin/irqbalance

Dec 02 10:03:44 np0005542249.novalocal irqbalance[782]: Cannot change IRQ 26 affinity: Operation not permitted
Dec 02 10:03:44 np0005542249.novalocal irqbalance[782]: IRQ 26 affinity is now unmanaged
Dec 02 10:03:44 np0005542249.novalocal irqbalance[782]: Cannot change IRQ 29 affinity: Operation not permitted
Dec 02 10:03:44 np0005542249.novalocal irqbalance[782]: IRQ 29 affinity is now unmanaged
Dec 02 10:04:44 np0005542249.novalocal irqbalance[782]: Cannot change IRQ 28 affinity: Operation not permitted
Dec 02 10:04:44 np0005542249.novalocal irqbalance[782]: IRQ 28 affinity is now unmanaged
Dec 02 10:16:04 compute-0 irqbalance[782]: Cannot change IRQ 27 affinity: Operation not permitted
Dec 02 10:16:04 compute-0 irqbalance[782]: IRQ 27 affinity is now unmanaged
Dec 02 10:48:06 compute-0 irqbalance[782]: Cannot change IRQ 33 affinity: Operation not permitted
Dec 02 10:48:06 compute-0 irqbalance[782]: IRQ 33 affinity is now unmanaged

○ iscsi-init.service - One time configuration for iscsi.service
     Loaded: loaded (/usr/lib/systemd/system/iscsi-init.service; disabled; preset: disabled)
     Active: inactive (dead)
  Condition: start condition failed at Tue 2025-12-02 11:07:52 UTC; 31min ago

Dec 02 11:06:36 compute-0 systemd[1]: One time configuration for iscsi.service was skipped because of an unmet condition check (ConditionPathExists=!/etc/iscsi/initiatorname.iscsi).
Dec 02 11:07:52 compute-0 systemd[1]: One time configuration for iscsi.service was skipped because of an unmet condition check (ConditionPathExists=!/etc/iscsi/initiatorname.iscsi).

○ iscsi-onboot.service - Special handling of early boot iSCSI sessions
     Loaded: loaded (/usr/lib/systemd/system/iscsi-onboot.service; enabled; preset: enabled)
     Active: inactive (dead)
       Docs: man:iscsiadm(8)
             man:iscsid(8)

● iscsi-shutdown.service - Logout off all iSCSI sessions on shutdown
     Loaded: loaded (/usr/lib/systemd/system/iscsi-shutdown.service; static)
     Active: active (exited) since Tue 2025-12-02 11:06:36 UTC; 32min ago
       Docs: man:iscsid(8)
             man:iscsiadm(8)
   Main PID: 226261 (code=exited, status=0/SUCCESS)
        CPU: 3ms

Dec 02 11:06:36 compute-0 systemd[1]: Starting Logout off all iSCSI sessions on shutdown...
Dec 02 11:06:36 compute-0 systemd[1]: Finished Logout off all iSCSI sessions on shutdown.

○ iscsi-starter.service
     Loaded: loaded (/usr/lib/systemd/system/iscsi-starter.service; enabled; preset: disabled)
     Active: inactive (dead)

○ iscsi.service - Login and scanning of iSCSI devices
     Loaded: loaded (/usr/lib/systemd/system/iscsi.service; indirect; preset: enabled)
     Active: inactive (dead)
       Docs: man:iscsiadm(8)
             man:iscsid(8)

● iscsid.service - Open-iSCSI
     Loaded: loaded (/usr/lib/systemd/system/iscsid.service; enabled; preset: disabled)
     Active: active (running) since Tue 2025-12-02 11:07:52 UTC; 31min ago
TriggeredBy: ● iscsid.socket
       Docs: man:iscsid(8)
             man:iscsiuio(8)
             man:iscsiadm(8)
   Main PID: 238852 (iscsid)
     Status: "Ready to process requests"
         IO: 0B read, 0B written
      Tasks: 1 (limit: 48628)
     Memory: 1.9M (peak: 2.0M)
        CPU: 15ms
     CGroup: /system.slice/iscsid.service
             └─238852 /usr/sbin/iscsid -f

Dec 02 11:07:52 compute-0 systemd[1]: Starting Open-iSCSI...
Dec 02 11:07:52 compute-0 systemd[1]: Started Open-iSCSI.

○ iscsiuio.service - iSCSI UserSpace I/O driver
     Loaded: loaded (/usr/lib/systemd/system/iscsiuio.service; disabled; preset: disabled)
     Active: inactive (dead)
TriggeredBy: ○ iscsiuio.socket
       Docs: man:iscsiuio(8)

● kdump.service - Crash recovery kernel arming
     Loaded: loaded (/usr/lib/systemd/system/kdump.service; enabled; preset: enabled)
     Active: active (exited) since Tue 2025-12-02 10:03:46 UTC; 1h 35min ago
   Main PID: 1008 (code=exited, status=0/SUCCESS)
        CPU: 16.492s

Dec 02 10:03:45 np0005542249.novalocal dracut[1302]: Linked:         0 files
Dec 02 10:03:45 np0005542249.novalocal dracut[1302]: Compared:       0 xattrs
Dec 02 10:03:45 np0005542249.novalocal dracut[1302]: Compared:       0 files
Dec 02 10:03:45 np0005542249.novalocal dracut[1302]: Saved:          0 B
Dec 02 10:03:45 np0005542249.novalocal dracut[1302]: Duration:       0.000428 seconds
Dec 02 10:03:45 np0005542249.novalocal dracut[1302]: *** Hardlinking files done ***
Dec 02 10:03:46 np0005542249.novalocal dracut[1302]: *** Creating initramfs image file '/boot/initramfs-5.14.0-645.el9.x86_64kdump.img' done ***
Dec 02 10:03:46 np0005542249.novalocal kdumpctl[1017]: kdump: kexec: loaded kdump kernel
Dec 02 10:03:46 np0005542249.novalocal kdumpctl[1017]: kdump: Starting kdump: [OK]
Dec 02 10:03:46 np0005542249.novalocal systemd[1]: Finished Crash recovery kernel arming.

● kmod-static-nodes.service - Create List of Static Device Nodes
     Loaded: loaded (/usr/lib/systemd/system/kmod-static-nodes.service; static)
     Active: active (exited) since Tue 2025-12-02 10:03:31 UTC; 1h 35min ago
   Main PID: 673 (code=exited, status=0/SUCCESS)
        CPU: 5ms

● ldconfig.service - Rebuild Dynamic Linker Cache
     Loaded: loaded (/usr/lib/systemd/system/ldconfig.service; static)
     Active: active (exited) since Tue 2025-12-02 10:03:32 UTC; 1h 35min ago
       Docs: man:ldconfig(8)
   Main PID: 695 (code=exited, status=0/SUCCESS)
        CPU: 57ms

Dec 02 10:03:32 localhost systemd[1]: Starting Rebuild Dynamic Linker Cache...
Dec 02 10:03:32 localhost systemd[1]: Finished Rebuild Dynamic Linker Cache.

○ libvirtd.service
     Loaded: masked (Reason: Unit libvirtd.service is masked.)
     Active: inactive (dead)
TriggeredBy: ○ libvirtd-admin.socket
             ○ libvirtd-ro.socket
             ○ libvirtd.socket

○ loadmodules.service - Load legacy module configuration
     Loaded: loaded (/usr/lib/systemd/system/loadmodules.service; enabled; preset: enabled)
     Active: inactive (dead)

○ logrotate.service - Rotate log files
     Loaded: loaded (/usr/lib/systemd/system/logrotate.service; static)
     Active: inactive (dead)
TriggeredBy: ● logrotate.timer
       Docs: man:logrotate(8)
             man:logrotate.conf(5)

○ lvm2-lvmpolld.service - LVM2 poll daemon
     Loaded: loaded (/usr/lib/systemd/system/lvm2-lvmpolld.service; static)
     Active: inactive (dead)
TriggeredBy: ● lvm2-lvmpolld.socket
       Docs: man:lvmpolld(8)

● lvm2-monitor.service - Monitoring of LVM2 mirrors, snapshots etc. using dmeventd or progress polling
     Loaded: loaded (/usr/lib/systemd/system/lvm2-monitor.service; enabled; preset: enabled)
     Active: active (exited) since Tue 2025-12-02 10:41:15 UTC; 57min ago
       Docs: man:dmeventd(8)
             man:lvcreate(8)
             man:lvchange(8)
             man:vgchange(8)
   Main PID: 34029 (code=exited, status=0/SUCCESS)
        CPU: 42ms

Dec 02 10:41:15 compute-0 systemd[1]: Starting Monitoring of LVM2 mirrors, snapshots etc. using dmeventd or progress polling...
Dec 02 10:41:15 compute-0 systemd[1]: Finished Monitoring of LVM2 mirrors, snapshots etc. using dmeventd or progress polling.

○ mdmonitor.service - Software RAID monitoring and management
     Loaded: loaded (/usr/lib/systemd/system/mdmonitor.service; enabled; preset: enabled)
     Active: inactive (dead)

○ microcode.service - Load CPU microcode update
     Loaded: loaded (/usr/lib/systemd/system/microcode.service; enabled; preset: enabled)
     Active: inactive (dead)
  Condition: start condition failed at Tue 2025-12-02 10:03:32 UTC; 1h 35min ago

Dec 02 10:03:32 localhost systemd[1]: Load CPU microcode update was skipped because of an unmet condition check (ConditionPathExists=/sys/devices/system/cpu/microcode/reload).

○ modprobe@configfs.service - Load Kernel Module configfs
     Loaded: loaded (/usr/lib/systemd/system/modprobe@.service; static)
     Active: inactive (dead) since Tue 2025-12-02 10:03:32 UTC; 1h 35min ago
       Docs: man:modprobe(8)
   Main PID: 769 (code=exited, status=0/SUCCESS)
        CPU: 7ms

Dec 02 10:03:32 localhost systemd[1]: Starting Load Kernel Module configfs...
Dec 02 10:03:32 localhost systemd[1]: modprobe@configfs.service: Deactivated successfully.
Dec 02 10:03:32 localhost systemd[1]: Finished Load Kernel Module configfs.

○ modprobe@drm.service - Load Kernel Module drm
     Loaded: loaded (/usr/lib/systemd/system/modprobe@.service; static)
     Active: inactive (dead) since Tue 2025-12-02 10:03:32 UTC; 1h 35min ago
       Docs: man:modprobe(8)
   Main PID: 675 (code=exited, status=0/SUCCESS)
        CPU: 102ms

Dec 02 10:03:32 localhost systemd[1]: modprobe@drm.service: Deactivated successfully.
Dec 02 10:03:32 localhost systemd[1]: Finished Load Kernel Module drm.

○ modprobe@efi_pstore.service - Load Kernel Module efi_pstore
     Loaded: loaded (/usr/lib/systemd/system/modprobe@.service; static)
     Active: inactive (dead) since Tue 2025-12-02 10:03:32 UTC; 1h 35min ago
       Docs: man:modprobe(8)
   Main PID: 676 (code=exited, status=0/SUCCESS)
        CPU: 3ms

Dec 02 10:03:32 localhost systemd[1]: modprobe@efi_pstore.service: Deactivated successfully.
Dec 02 10:03:32 localhost systemd[1]: Finished Load Kernel Module efi_pstore.

○ modprobe@fuse.service - Load Kernel Module fuse
     Loaded: loaded (/usr/lib/systemd/system/modprobe@.service; static)
     Active: inactive (dead) since Tue 2025-12-02 10:03:32 UTC; 1h 35min ago
       Docs: man:modprobe(8)
   Main PID: 677 (code=exited, status=0/SUCCESS)
        CPU: 40ms

Dec 02 10:03:32 localhost systemd[1]: modprobe@fuse.service: Deactivated successfully.
Dec 02 10:03:32 localhost systemd[1]: Finished Load Kernel Module fuse.

○ netns-placeholder.service - Create netns directory
     Loaded: loaded (/etc/systemd/system/netns-placeholder.service; enabled; preset: enabled)
     Active: inactive (dead) since Tue 2025-12-02 11:07:12 UTC; 32min ago
   Main PID: 232702 (code=exited, status=0/SUCCESS)
        CPU: 17ms

Dec 02 11:07:12 compute-0 systemd[1]: Starting Create netns directory...
Dec 02 11:07:12 compute-0 systemd[1]: netns-placeholder.service: Deactivated successfully.
Dec 02 11:07:12 compute-0 systemd[1]: Finished Create netns directory.

○ network.service - LSB: Bring up/down networking
     Loaded: loaded (/etc/rc.d/init.d/network; generated)
     Active: inactive (dead)
       Docs: man:systemd-sysv-generator(8)

● NetworkManager-wait-online.service - Network Manager Wait Online
     Loaded: loaded (/usr/lib/systemd/system/NetworkManager-wait-online.service; enabled; preset: disabled)
     Active: active (exited) since Tue 2025-12-02 10:44:11 UTC; 55min ago
       Docs: man:NetworkManager-wait-online.service(8)
   Main PID: 49007 (code=exited, status=0/SUCCESS)
        CPU: 23ms

Dec 02 10:44:11 compute-0 systemd[1]: Starting Network Manager Wait Online...
Dec 02 10:44:11 compute-0 systemd[1]: Finished Network Manager Wait Online.

● NetworkManager.service - Network Manager
     Loaded: loaded (/usr/lib/systemd/system/NetworkManager.service; enabled; preset: enabled)
    Drop-In: /usr/lib/systemd/system/NetworkManager.service.d
             └─NetworkManager-ovs.conf
     Active: active (running) since Tue 2025-12-02 10:44:11 UTC; 55min ago
       Docs: man:NetworkManager(8)
   Main PID: 48987 (NetworkManager)
         IO: 104.0K read, 339.5K written
      Tasks: 3 (limit: 48628)
     Memory: 5.5M (peak: 6.7M)
        CPU: 29.283s
     CGroup: /system.slice/NetworkManager.service
             └─48987 /usr/sbin/NetworkManager --no-daemon

Dec 02 11:35:17 compute-0 NetworkManager[48987]: <info>  [1764675317.2137] manager: (tap28b69a92-50): new Open vSwitch Port device (/org/freedesktop/NetworkManager/Devices/145)
Dec 02 11:35:41 compute-0 NetworkManager[48987]: <info>  [1764675341.0041] device (tapf725ebd9-55): state change: disconnected -> unmanaged (reason 'unmanaged', managed-type: 'removed')
Dec 02 11:35:58 compute-0 NetworkManager[48987]: <info>  [1764675358.1150] manager: (tapac73987a-aa): new Open vSwitch Port device (/org/freedesktop/NetworkManager/Devices/146)
Dec 02 11:35:59 compute-0 NetworkManager[48987]: <info>  [1764675359.2845] manager: (tapac73987a-aa): new Tun device (/org/freedesktop/NetworkManager/Devices/147)
Dec 02 11:35:59 compute-0 NetworkManager[48987]: <info>  [1764675359.3887] device (tapac73987a-aa): state change: unmanaged -> unavailable (reason 'connection-assumed', managed-type: 'external')
Dec 02 11:35:59 compute-0 NetworkManager[48987]: <info>  [1764675359.3899] device (tapac73987a-aa): state change: unavailable -> disconnected (reason 'none', managed-type: 'external')
Dec 02 11:35:59 compute-0 NetworkManager[48987]: <info>  [1764675359.4749] manager: (tap28b69a92-50): new Veth device (/org/freedesktop/NetworkManager/Devices/148)
Dec 02 11:35:59 compute-0 NetworkManager[48987]: <info>  [1764675359.5428] device (tap28b69a92-50): carrier: link connected
Dec 02 11:35:59 compute-0 NetworkManager[48987]: <info>  [1764675359.7674] manager: (tap28b69a92-50): new Open vSwitch Port device (/org/freedesktop/NetworkManager/Devices/149)
Dec 02 11:36:38 compute-0 NetworkManager[48987]: <info>  [1764675398.8038] device (tapac73987a-aa): state change: disconnected -> unmanaged (reason 'unmanaged', managed-type: 'removed')

○ nfs-idmapd.service - NFSv4 ID-name mapping service
     Loaded: loadedUnit ntpd.service could not be found.
Unit ntpdate.service could not be found.
 (/usr/lib/systemd/system/nfs-idmapd.service; static)
     Active: inactive (dead)
       Docs: man:idmapd(8)

○ nfs-mountd.service - NFS Mount Daemon
     Loaded: loaded (/usr/lib/systemd/system/nfs-mountd.service; static)
     Active: inactive (dead)
       Docs: man:rpc.mountd(8)

○ nfs-server.service - NFS server and services
     Loaded: loaded (/usr/lib/systemd/system/nfs-server.service; disabled; preset: disabled)
     Active: inactive (dead)
       Docs: man:rpc.nfsd(8)
             man:exportfs(8)

○ nfs-utils.service - NFS server and client services
     Loaded: loaded (/usr/lib/systemd/system/nfs-utils.service; static)
     Active: inactive (dead)

○ nfsdcld.service - NFSv4 Client Tracking Daemon
     Loaded: loaded (/usr/lib/systemd/system/nfsdcld.service; static)
     Active: inactive (dead)
       Docs: man:nfsdcld(8)

● nftables.service - Netfilter Tables
     Loaded: loaded (/usr/lib/systemd/system/nftables.service; enabled; preset: disabled)
     Active: active (exited) since Tue 2025-12-02 10:46:22 UTC; 52min ago
       Docs: man:nft(8)
   Main PID: 63182 (code=exited, status=0/SUCCESS)
        CPU: 19ms

Dec 02 10:46:22 compute-0 systemd[1]: Starting Netfilter Tables...
Dec 02 10:46:22 compute-0 systemd[1]: Finished Netfilter Tables.

● nis-domainname.service - Read and set NIS domainname from /etc/sysconfig/network
     Loaded: loaded (/usr/lib/systemd/system/nis-domainname.service; enabled; preset: enabled)
     Active: active (exited) since Tue 2025-12-02 10:03:32 UTC; 1h 35min ago
   Main PID: 678 (code=exited, status=0/SUCCESS)
        CPU: 6ms

Dec 02 10:03:32 localhost systemd[1]: Finished Read and set NIS domainname from /etc/sysconfig/network.

○ nvmefc-boot-connections.service - Auto-connect to subsystems on FC-NVME devices found during boot
     Loaded: loaded (/usr/lib/systemd/system/nvmefc-boot-connections.service; enabled; preset: enabled)
     Active: inactive (dead)

● openvswitch.service - Open vSwitch
     Loaded: loaded (/usr/lib/systemd/system/openvswitch.service; enabled; preset: disabled)
     Active: active (exited) since Tue 2025-12-02 10:43:57 UTC; 55min ago
   Main PID: 47300 (code=exited, status=0/SUCCESS)
        CPU: 2ms

Dec 02 10:43:57 compute-0 systemd[1]: Starting Open vSwitch...
Dec 02 10:43:57 compute-0 systemd[1]: Finished Open vSwitch.

● ovs-delete-transient-ports.service - Open vSwitch Delete Transient Ports
     Loaded: loaded (/usr/lib/systemd/system/ovs-delete-transient-ports.service; static)
     Active: active (exited) since Tue 2025-12-02 10:43:57 UTC; 55min ago
   Main PID: 47238 (code=exited, status=0/SUCCESS)
        CPU: 32ms

Dec 02 10:43:57 compute-0 systemd[1]: Starting Open vSwitch Delete Transient Ports...
Dec 02 10:43:57 compute-0 systemd[1]: Finished Open vSwitch Delete Transient Ports.

● ovs-vswitchd.service - Open vSwitch Forwarding Unit
     Loaded: loaded (/usr/lib/systemd/system/ovs-vswitchd.service; static)
     Active: active (running) since Tue 2025-12-02 10:43:57 UTC; 55min ago
   Main PID: 47291 (ovs-vswitchd)
         IO: 3.4M read, 464.0K written
      Tasks: 13 (limit: 48628)
     Memory: 246.2M (peak: 248.5M)
        CPU: 22.254s
     CGroup: /system.slice/ovs-vswitchd.service
             └─47291 ovs-vswitchd unix:/var/run/openvswitch/db.sock -vconsole:emer -vsyslog:err -vfile:info --mlockall --user openvswitch:hugetlbfs --no-chdir --log-file=/var/log/openvswitch/ovs-vswitchd.log --pidfile=/var/run/openvswitch/ovs-vswitchd.pid --detach

Dec 02 10:43:57 compute-0 systemd[1]: Starting Open vSwitch Forwarding Unit...
Dec 02 10:43:57 compute-0 ovs-ctl[47281]: Inserting openvswitch module [  OK  ]
Dec 02 10:43:57 compute-0 ovs-ctl[47250]: Starting ovs-vswitchd [  OK  ]
Dec 02 10:43:57 compute-0 ovs-vsctl[47298]: ovs|00001|vsctl|INFO|Called as ovs-vsctl --no-wait add Open_vSwitch . external-ids hostname=compute-0
Dec 02 10:43:57 compute-0 ovs-ctl[47250]: Enabling remote OVSDB managers [  OK  ]
Dec 02 10:43:57 compute-0 systemd[1]: Started Open vSwitch Forwarding Unit.

● ovsdb-server.service - Open vSwitch DatabUnit pacemaker.service could not be found.
Unit plymouth-quit-wait.service could not be found.
Unit plymouth-start.service could not be found.
ase Unit
     Loaded: loaded (/usr/lib/systemd/system/ovsdb-server.service; static)
     Active: active (running) since Tue 2025-12-02 10:43:57 UTC; 55min ago
   Main PID: 47210 (ovsdb-server)
         IO: 1.2M read, 768.0K written
      Tasks: 1 (limit: 48628)
     Memory: 5.0M (peak: 38.9M)
        CPU: 16.811s
     CGroup: /system.slice/ovsdb-server.service
             └─47210 ovsdb-server /etc/openvswitch/conf.db -vconsole:emer -vsyslog:err -vfile:info --remote=punix:/var/run/openvswitch/db.sock --private-key=db:Open_vSwitch,SSL,private_key --certificate=db:Open_vSwitch,SSL,certificate --bootstrap-ca-cert=db:Open_vSwitch,SSL,ca_cert --user openvswitch:hugetlbfs --no-chdir --log-file=/var/log/openvswitch/ovsdb-server.log --pidfile=/var/run/openvswitch/ovsdb-server.pid --detach

Dec 02 10:43:57 compute-0 chown[47157]: /usr/bin/chown: cannot access '/run/openvswitch': No such file or directory
Dec 02 10:43:57 compute-0 ovs-ctl[47162]: /etc/openvswitch/conf.db does not exist ... (warning).
Dec 02 10:43:57 compute-0 ovs-ctl[47162]: Creating empty database /etc/openvswitch/conf.db [  OK  ]
Dec 02 10:43:57 compute-0 ovs-ctl[47162]: Starting ovsdb-server [  OK  ]
Dec 02 10:43:57 compute-0 ovs-vsctl[47211]: ovs|00001|vsctl|INFO|Called as ovs-vsctl --no-wait -- init -- set Open_vSwitch . db-version=8.5.1
Dec 02 10:43:57 compute-0 ovs-vsctl[47231]: ovs|00001|vsctl|INFO|Called as ovs-vsctl --no-wait set Open_vSwitch . ovs-version=3.3.5-115.el9s "external-ids:system-id=\"4ecd1ad4-3ade-413e-b6d7-47ab2fad39ae\"" "external-ids:rundir=\"/var/run/openvswitch\"" "system-type=\"centos\"" "system-version=\"9\""
Dec 02 10:43:57 compute-0 ovs-ctl[47162]: Configuring Open vSwitch system IDs [  OK  ]
Dec 02 10:43:57 compute-0 ovs-ctl[47162]: Enabling remote OVSDB managers [  OK  ]
Dec 02 10:43:57 compute-0 ovs-vsctl[47237]: ovs|00001|vsctl|INFO|Called as ovs-vsctl --no-wait add Open_vSwitch . external-ids hostname=compute-0
Dec 02 10:43:57 compute-0 systemd[1]: Started Open vSwitch Database Unit.

● polkit.service - Authorization Manager
     Loaded: loaded (/usr/lib/systemd/system/polkit.service; static)
     Active: active (running) since Tue 2025-12-02 10:43:14 UTC; 55min ago
       Docs: man:polkit(8)
   Main PID: 43476 (polkitd)
         IO: 11.3M read, 0B written
      Tasks: 12 (limit: 48628)
     Memory: 17.1M (peak: 18.2M)
        CPU: 2.356s
     CGroup: /system.slice/polkit.service
             └─43476 /usr/lib/polkit-1/polkitd --no-debug

Dec 02 11:03:47 compute-0 polkitd[43476]: Collecting garbage unconditionally...
Dec 02 11:03:47 compute-0 polkitd[43476]: Loading rules from directory /etc/polkit-1/rules.d
Dec 02 11:03:47 compute-0 polkitd[43476]: Loading rules from directory /usr/share/polkit-1/rules.d
Dec 02 11:03:47 compute-0 polkitd[43476]: Finished loading, compiling and executing 3 rules
Dec 02 11:05:32 compute-0 polkitd[43476]: Registered Authentication Agent for unix-process:217492:372587 (system bus name :1.2847 [pkttyagent --process 217492 --notify-fd 4 --fallback], object path /org/freedesktop/PolicyKit1/AuthenticationAgent, locale en_US.UTF-8)
Dec 02 11:05:32 compute-0 polkitd[43476]: Unregistered Authentication Agent for unix-process:217492:372587 (system bus name :1.2847, object path /org/freedesktop/PolicyKit1/AuthenticationAgent, locale en_US.UTF-8) (disconnected from bus)
Dec 02 11:05:32 compute-0 polkitd[43476]: Registered Authentication Agent for unix-process:217491:372586 (system bus name :1.2848 [pkttyagent --process 217491 --notify-fd 4 --fallback], object path /org/freedesktop/PolicyKit1/AuthenticationAgent, locale en_US.UTF-8)
Dec 02 11:05:33 compute-0 polkitd[43476]: Unregistered Authentication Agent for unix-process:217491:372586 (system bus name :1.2848, object path /org/freedesktop/PolicyKit1/AuthenticationAgent, locale en_US.UTF-8) (disconnected from bus)
Dec 02 11:05:35 compute-0 polkitd[43476]: Registered Authentication Agent for unix-process:217958:372828 (system bus name :1.2851 [pkttyagent --process 217958 --notify-fd 4 --fallback], object path /org/freedesktop/PolicyKit1/AuthenticatUnit power-profiles-daemon.service could not be found.
Unit rpc-svcgssd.service could not be found.
ionAgent, locale en_US.UTF-8)
Dec 02 11:05:35 compute-0 polkitd[43476]: Unregistered Authentication Agent for unix-process:217958:372828 (system bus name :1.2851, object path /org/freedesktop/PolicyKit1/AuthenticationAgent, locale en_US.UTF-8) (disconnected from bus)

○ raid-check.service - RAID setup health check
     Loaded: loaded (/usr/lib/systemd/system/raid-check.service; static)
     Active: inactive (dead)
TriggeredBy: ○ raid-check.timer

○ rc-local.service - /etc/rc.d/rc.local Compatibility
     Loaded: loaded (/usr/lib/systemd/system/rc-local.service; static)
     Active: inactive (dead)
       Docs: man:systemd-rc-local-generator(8)

○ rescue.service - Rescue Shell
     Loaded: loaded (/usr/lib/systemd/system/rescue.service; static)
     Active: inactive (dead)
       Docs: man:sulogin(8)

○ rpc-gssd.service - RPC security service for NFS client and server
     Loaded: loaded (/usr/lib/systemd/system/rpc-gssd.service; static)
     Active: inactive (dead)
  Condition: start condition failed at Tue 2025-12-02 10:03:34 UTC; 1h 35min ago
       Docs: man:rpc.gssd(8)

Dec 02 10:03:34 np0005542249.novalocal systemd[1]: RPC security service for NFS client and server was skipped because of an unmet condition check (ConditionPathExists=/etc/krb5.keytab).

● rpc-statd-notify.service - Notify NFS peers of a restart
     Loaded: loaded (/usr/lib/systemd/system/rpc-statd-notify.service; static)
     Active: active (exited) since Tue 2025-12-02 10:03:36 UTC; 1h 35min ago
       Docs: man:sm-notify(8)
             man:rpc.statd(8)
        CPU: 7ms

Dec 02 10:03:36 np0005542249.novalocal systemd[1]: Starting Notify NFS peers of a restart...
Dec 02 10:03:36 np0005542249.novalocal sm-notify[1004]: Version 2.5.4 starting
Dec 02 10:03:36 np0005542249.novalocal systemd[1]: Started Notify NFS peers of a restart.

○ rpc-statd.service - NFS status monitor for NFSv2/3 locking.
     Loaded: loaded (/usr/lib/systemd/system/rpc-statd.service; static)
     Active: inactive (dead)
       Docs: man:rpc.statd(8)

● rpcbind.service - RPC Bind
     Loaded: loaded (/usr/lib/systemd/system/rpcbind.service; enabled; preset: enabled)
     Active: active (running) since Tue 2025-12-02 10:03:32 UTC; 1h 35min ago
TriggeredBy: ● rpcbind.socket
       Docs: man:rpcbind(8)
   Main PID: 700 (rpcbind)
         IO: 0B read, 0B written
      Tasks: 1 (limit: 48628)
     Memory: 2.3M (peak: 2.8M)
        CPU: 38ms
     CGroup: /system.slice/rpcbind.service
             └─700 /usr/bin/rpcbind -w -f

Dec 02 10:03:32 localhost systemd[1]: Starting RPC Bind...
Dec 02 10:03:32 localhost systemd[1]: Started RPC Bind.

● rsyslog.service - System Logging Service
     Loaded: loaded (/usr/lib/systemd/system/rsyslog.service; enabled; preset: enabled)
     Active: active (running) since Tue 2025-12-02 10:03:36 UTC; 1h 35min ago
       Docs: man:rsyslogd(8)
             https://www.rsyslog.com/doc/
   Main PID: 1005 (rsyslogd)
         IO: 0B read, 22.7M written
      Tasks: 3 (limit: 48628)
     Memory: 21.8M (peak: 22.3M)
        CPU: 15.995s
     CGroup: /system.slice/rsyslog.service
             └─1005 /usr/sbin/rsyslogd -n

Dec 02 11:16:09 compute-0 rsyslogd[1005]: imjournal from <np0005542249:systemd>: begin to drop messages due to rate-limiting
Dec 02 11:16:42 compute-0 rsyslogd[1005]: imjournal: journal files changed, reloading...  [v8.2510.0-2.el9 try https://www.rsyslog.com/e/0 ]
Dec 02 11:17:26 compute-0 rsyslogd[1005]: imjournal: 2130 messages lost due to rate-limiting (20000 allowed within 600 seconds)
Dec 02 11:25:09 compute-0 rsyslogd[1005]: imjournal: journal files changed, reloading...  [v8.2510.0-2.el9 try https://www.rsyslog.com/e/0 ]
Dec 02 11:25:09 compute-0 rsyslogd[1005]: imjournal: journal files changed, reloading...  [v8.2510.0-2.el9 try https://www.rsyslog.com/e/0 ]
Dec 02 11:33:54 compute-0 rsyslogd[1005]: imjournal: journal files changed, reloading...  [v8.2510.0-2.el9 try https://www.rsyslog.com/e/0 ]
Dec 02 11:33:54 compute-0 rsyslogd[1005]: imjournal: journal files changed, reloading...  [v8.2510.0-2.el9 try httUnit sntp.service could not be found.
Unit sshd-keygen.service could not be found.
ps://www.rsyslog.com/e/0 ]
Dec 02 11:38:43 compute-0 rsyslogd[1005]: imjournal: journal files changed, reloading...  [v8.2510.0-2.el9 try https://www.rsyslog.com/e/0 ]
Dec 02 11:38:48 compute-0 rsyslogd[1005]: imjournal from <np0005542249:ceph-osd>: begin to drop messages due to rate-limiting
Dec 02 11:38:48 compute-0 rsyslogd[1005]: imjournal: journal files changed, reloading...  [v8.2510.0-2.el9 try https://www.rsyslog.com/e/0 ]

○ selinux-autorelabel-mark.service - Mark the need to relabel after reboot
     Loaded: loaded (/usr/lib/systemd/system/selinux-autorelabel-mark.service; enabled; preset: enabled)
     Active: inactive (dead)
  Condition: start condition failed at Tue 2025-12-02 10:03:32 UTC; 1h 35min ago

Dec 02 10:03:32 localhost systemd[1]: Mark the need to relabel after reboot was skipped because of an unmet condition check (ConditionSecurity=!selinux).

● serial-getty@ttyS0.service - Serial Getty on ttyS0
     Loaded: loaded (/usr/lib/systemd/system/serial-getty@.service; enabled-runtime; preset: disabled)
     Active: active (running) since Tue 2025-12-02 10:03:36 UTC; 1h 35min ago
       Docs: man:agetty(8)
             man:systemd-getty-generator(8)
             http://0pointer.de/blog/projects/serial-console.html
   Main PID: 1011 (agetty)
         IO: 0B read, 0B written
      Tasks: 1 (limit: 48628)
     Memory: 292.0K (peak: 536.0K)
        CPU: 8ms
     CGroup: /system.slice/system-serial\x2dgetty.slice/serial-getty@ttyS0.service
             └─1011 /sbin/agetty -o "-p -- \\u" --keep-baud 115200,57600,38400,9600 - vt220

Dec 02 10:03:36 np0005542249.novalocal systemd[1]: Started Serial Getty on ttyS0.

○ sshd-keygen@ecdsa.service - OpenSSH ecdsa Server Key Generation
     Loaded: loaded (/usr/lib/systemd/system/sshd-keygen@.service; disabled; preset: disabled)
    Drop-In: /usr/lib/systemd/system/sshd-keygen@.service.d
             └─disable-sshd-keygen-if-cloud-init-active.conf
     Active: inactive (dead)
  Condition: start condition failed at Tue 2025-12-02 11:03:52 UTC; 35min ago

Dec 02 10:03:32 localhost systemd[1]: OpenSSH ecdsa Server Key Generation was skipped because of an unmet condition check (ConditionPathExists=!/run/systemd/generator.early/multi-user.target.wants/cloud-init.target).
Dec 02 11:03:52 compute-0 systemd[1]: OpenSSH ecdsa Server Key Generation was skipped because of an unmet condition check (ConditionPathExists=!/run/systemd/generator.early/multi-user.target.wants/cloud-init.target).

○ sshd-keygen@ed25519.service - OpenSSH ed25519 Server Key Generation
     Loaded: loaded (/usr/lib/systemd/system/sshd-keygen@.service; disabled; preset: disabled)
    Drop-In: /usr/lib/systemd/system/sshd-keygen@.service.d
             └─disable-sshd-keygen-if-cloud-init-active.conf
     Active: inactive (dead)
  Condition: start condition failed at Tue 2025-12-02 11:03:52 UTC; 35min ago

Dec 02 10:03:32 localhost systemd[1]: OpenSSH ed25519 Server Key Generation was skipped because of an unmet condition check (ConditionPathExists=!/run/systemd/generator.early/multi-user.target.wants/cloud-init.target).
Dec 02 11:03:52 compute-0 systemd[1]: OpenSSH ed25519 Server Key Generation was skipped because of an unmet condition check (ConditionPathExists=!/run/systemd/generator.early/multi-user.target.wants/cloud-init.target).

○ sshd-keygen@rsa.service - OpenSSH rsa Server Key Generation
     Loaded: loaded (/usr/lib/systemd/system/sshd-keygen@.service; disabled; preset: disabled)
    Drop-In: /usr/lib/systemd/system/sshd-keygen@.service.d
             └─disable-sshd-keygen-if-cloud-init-active.conf
     Active: inactive (dead)
  Condition: start condition failed at Tue 2025-12-02 11:03:52 UTC; 35min ago

Dec 02 10:03:32 localhost systemd[1]: OpenSSH rsa Server Key Generation was skipped because of an unmet condition check (ConditionPathExists=!/run/systemd/generator.early/multi-user.target.wants/cloud-init.target).
Dec 02 11:03:52 compute-0 systemd[1]: OpenSSH rsa Server Key Generation was skipped because of an unmet condition check (ConditionPathExists=!/run/systemd/geUnit syslog.service could not be found.
nerator.early/multi-user.target.wants/cloud-init.target).

● sshd.service - OpenSSH server daemon
     Loaded: loaded (/usr/lib/systemd/system/sshd.service; enabled; preset: enabled)
     Active: active (running) since Tue 2025-12-02 11:03:52 UTC; 35min ago
       Docs: man:sshd(8)
             man:sshd_config(5)
   Main PID: 190649 (sshd)
         IO: 700.0K read, 48.0K written
      Tasks: 1 (limit: 48628)
     Memory: 4.1M (peak: 7.5M)
        CPU: 2.752s
     CGroup: /system.slice/sshd.service
             └─190649 "sshd: /usr/sbin/sshd -D [listener] 0 of 10-100 startups"

Dec 02 11:38:22 compute-0 sshd-session[301119]: pam_unix(sshd:session): session opened for user zuul(uid=1000) by zuul(uid=0)
Dec 02 11:38:34 compute-0 sshd-session[301449]: Invalid user admin from 107.155.55.174 port 60036
Dec 02 11:38:34 compute-0 sshd-session[301449]: Received disconnect from 107.155.55.174 port 60036:11: Bye Bye [preauth]
Dec 02 11:38:34 compute-0 sshd-session[301449]: Disconnected from invalid user admin 107.155.55.174 port 60036 [preauth]
Dec 02 11:39:04 compute-0 sshd-session[305925]: Invalid user local from 155.4.244.169 port 37390
Dec 02 11:39:04 compute-0 sshd-session[305925]: Received disconnect from 155.4.244.169 port 37390:11: Bye Bye [preauth]
Dec 02 11:39:04 compute-0 sshd-session[305925]: Disconnected from invalid user local 155.4.244.169 port 37390 [preauth]
Dec 02 11:39:08 compute-0 sshd-session[307174]: Invalid user test from 177.157.199.115 port 51670
Dec 02 11:39:08 compute-0 sshd-session[307174]: Received disconnect from 177.157.199.115 port 51670:11: Bye Bye [preauth]
Dec 02 11:39:08 compute-0 sshd-session[307174]: Disconnected from invalid user test 177.157.199.115 port 51670 [preauth]

○ sssd-kcm.service - SSSD Kerberos Cache Manager
     Loaded: loaded (/usr/lib/systemd/system/sssd-kcm.service; indirect; preset: disabled)
     Active: inactive (dead)
TriggeredBy: ● sssd-kcm.socket
       Docs: man:sssd-kcm(5)

○ sssd.service - System Security Services Daemon
     Loaded: loaded (/usr/lib/systemd/system/sssd.service; enabled; preset: enabled)
     Active: inactive (dead)
  Condition: start condition failed at Tue 2025-12-02 10:03:32 UTC; 1h 35min ago

Dec 02 10:03:32 localhost systemd[1]: System Security Services Daemon was skipped because no trigger condition checks were met.

○ sysstat-collect.service - system activity accounting tool
     Loaded: loaded (/usr/lib/systemd/system/sysstat-collect.service; static)
     Active: inactive (dead)
TriggeredBy: ○ sysstat-collect.timer
       Docs: man:sa1(8)

○ sysstat-summary.service - Generate a daily summary of process accounting
     Loaded: loaded (/usr/lib/systemd/system/sysstat-summary.service; static)
     Active: inactive (dead)
TriggeredBy: ○ sysstat-summary.timer
       Docs: man:sa2(8)

○ sysstat.service - Resets System Activity Logs
     Loaded: loaded (/usr/lib/systemd/system/sysstat.service; enabled; preset: enabled)
     Active: inactive (dead)

○ systemd-ask-password-console.service - Dispatch Password Requests to Console
     Loaded: loaded (/usr/lib/systemd/system/systemd-ask-password-console.service; static)
     Active: inactive (dead)
TriggeredBy: ● systemd-ask-password-console.path
       Docs: man:systemd-ask-password-console.service(8)

○ systemd-ask-password-wall.service - Forward Password Requests to Wall
     Loaded: loaded (/usr/lib/systemd/system/systemd-ask-password-wall.service; static)
     Active: inactive (dead)
TriggeredBy: ● systemd-ask-password-wall.path
       Docs: man:systemd-ask-password-wall.service(8)

○ systemd-binfmt.service - Set Up Additional Binary Formats
     Loaded: loaded (/usr/lib/systemd/system/systemd-binfmt.service; static)
     Active: inactive (dead)
  Condition: start condition failed at Tue 2025-12-02 10:03:32 UTC; 1h 35min ago
       Docs: man:systemd-binfmt.service(8)
             man:binfmt.d(5)
             https://docs.kernel.org/admin-guide/binfmt-misc.html
             https://www.freedesktop.org/wiki/Software/systemd/APIFileSystems

Dec 02 10:03:32 localhost systemd[1]: Set Up Additional Binary Formats was skipped because no trigger condition checks were met.

○ systemd-boot-random-seed.service - Update Boot Loader Random Seed
     Loaded: loaded (/usr/lib/systemd/system/systemd-boot-random-seed.service; static)
     Active: inactive (dead)
  Condition: start condition failed at Tue 2025-12-02 10:03:32 UTC; 1h 35min ago
       Docs: man:systemd-boot-random-seed.service(8)
             man:random(4)

Dec 02 10:03:32 localhost systemd[1]: Update Boot Loader Random Seed was skipped because no trigger condition checks were met.

● systemd-boot-update.service - Automatic Boot Loader Update
     Loaded: loaded (/usr/lib/systemd/system/systemd-boot-update.service; enabled; preset: enabled)
     Active: active (exited) since Tue 2025-12-02 10:03:32 UTC; 1h 35min ago
       Docs: man:bootctl(1)
   Main PID: 696 (code=exited, status=0/SUCCESS)
        CPU: 12ms

Dec 02 10:03:32 localhost systemd[1]: Starting Automatic Boot Loader Update...
Dec 02 10:03:32 localhost bootctl[696]: Couldn't find EFI system partition, skipping.
Dec 02 10:03:32 localhost systemd[1]: Finished Automatic Boot Loader Update.

○ systemd-firstboot.service - First Boot Wizard
     Loaded: loaded (/usr/lib/systemd/system/systemd-firstboot.service; static)
     Active: inactive (dead)
  Condition: start condition failed at Tue 2025-12-02 10:03:32 UTC; 1h 35min ago
       Docs: man:systemd-firstboot(1)

Dec 02 10:03:32 localhost systemd[1]: First Boot Wizard was skipped because of an unmet condition check (ConditionFirstBoot=yes).

○ systemd-fsck-root.service - File System Check on Root Device
     Loaded: loaded (/usr/lib/systemd/system/systemd-fsck-root.service; static)
     Active: inactive (dead) since Tue 2025-12-02 10:03:31 UTC; 1h 35min ago
   Duration: 1.476s
       Docs: man:systemd-fsck-root.service(8)
   Main PID: 552 (code=exited, status=0/SUCCESS)
        CPU: 16ms

Dec 02 10:03:30 localhost systemd[1]: Starting File System Check on /dev/disk/by-uuid/b277050f-8ace-464d-abb6-4c46d4c45253...
Dec 02 10:03:30 localhost systemd-fsck[554]: /usr/sbin/fsck.xfs: XFS file system.
Dec 02 10:03:30 localhost systemd[1]: Finished File System Check on /dev/disk/by-uuid/b277050f-8ace-464d-abb6-4c46d4c45253.

● systemd-hostnamed.service - Hostname Service
     Loaded: loaded (/usr/lib/systemd/system/systemd-hostnamed.service; static)
    Drop-In: /usr/lib/systemd/system/systemd-hostnamed.service.d
             └─disable-privatedevices.conf
     Active: active (running) since Tue 2025-12-02 11:38:55 UTC; 18s ago
       Docs: man:systemd-hostnamed.service(8)
             man:hostname(5)
             man:machine-info(5)
             man:org.freedesktop.resolve1(5)
   Main PID: 304498 (systemd-hostnam)
         IO: 20.0K read, 0B written
      Tasks: 1 (limit: 48628)
     Memory: 2.7M (peak: 3.8M)
        CPU: 131ms
     CGroup: /system.slice/systemd-hostnamed.service
             └─304498 /usr/lib/systemd/systemd-hostnamed

Dec 02 11:38:55 compute-0 systemd[1]: Starting Hostname Service...
Dec 02 11:38:55 compute-0 systemd[1]: Started Hostname Service.

● systemd-hwdb-update.service - Rebuild Hardware Database
     Loaded: loaded (/usr/lib/systemd/system/systemd-hwdb-update.service; static)
     Active: active (exited) since Tue 2025-12-02 10:03:32 UTC; 1h 35min ago
       Docs: man:hwdb(7)
             man:systemd-hwdb(8)
   Main PID: 688 (code=exited, status=0/SUCCESS)
        CPU: 566ms

Dec 02 10:03:32 localhost systemd[1]: Starting Rebuild Hardware Database...
Dec 02 10:03:32 localhost systemd[1]: Finished Rebuild Hardware Database.

○ systemd-initctl.service - initctl Compatibility Daemon
     Loaded: loaded (/usr/lib/systemd/system/systemd-initctl.service; static)
     Active: inactive (dead)
TriggeredBy: ● systemd-initctl.socket
       Docs: man:systemd-initctl.service(8)

● systemd-journal-catalog-update.service - Rebuild Journal Catalog
     Loaded: loaded (/usr/lib/systemd/system/systemd-journal-catalog-update.service; static)
     Active: active (exited) since Tue 2025-12-02 10:03:32 UTC; 1h 35min ago
       Docs: man:systemd-journald.service(8)
             man:journald.conf(5)
   Main PID: 701 (code=exited, status=0/SUCCESS)
        CPU: 18ms

Dec 02 10:03:32 localhost systemd[1]: Starting Rebuild Journal Catalog...
Dec 02 10:03:32 localhost systemd[1]: Finished Rebuild Journal Catalog.

● systemd-journal-flush.service - Flush Journal to Persistent Storage
     Loaded: loaded (/usr/lib/systemd/system/systemd-journal-flush.service; static)
     Active: active (exited) since Tue 2025-12-02 10:03:32 UTC; 1h 35min ago
       Docs: man:systemd-journald.service(8)
             man:journald.conf(5)
   Main PID: 689 (code=exited, status=0/SUCCESS)
        CPU: 9ms

Dec 02 10:03:32 localhost systemd[1]: Starting Flush Journal to Persistent Storage...
Dec 02 10:03:32 localhost systemd[1]: Finished Flush Journal to Persistent Storage.

● systemd-journald.service - Journal Service
     Loaded: loaded (/usr/lib/systemd/system/systemd-journald.service; static)
     Active: active (running) since Tue 2025-12-02 10:03:32 UTC; 1h 35min ago
TriggeredBy: ● systemd-journald-dev-log.socket
             ● systemd-journald.socket
       Docs: man:systemd-journald.service(8)
             man:journald.conf(5)
   Main PID: 679 (systemd-journal)
     Status: "Processing requests..."
         IO: 0B read, 0B written
      Tasks: 1 (limit: 48628)
     Memory: 85.9M (peak: 93.7M)
        CPU: 17.457s
     CGroup: /system.slice/systemd-journald.service
             └─679 /usr/lib/systemd/systemd-journald

Dec 02 10:03:31 localhost systemd-journald[679]: Journal started
Dec 02 10:03:31 localhost systemd-journald[679]: Runtime Journal (/run/log/journal/1f988c78c563e12389ab342aced42dbb) is 8.0M, max 153.6M, 145.6M free.
Dec 02 10:03:31 localhost systemd[1]: systemd-journald.service: Deactivated successfully.
Dec 02 10:03:32 localhost systemd-journald[679]: Runtime Journal (/run/log/journal/1f988c78c563e12389ab342aced42dbb) is 8.0M, max 153.6M, 145.6M free.
Dec 02 10:03:32 localhost systemd-journald[679]: Received client request to flush runtime journal.

● systemd-logind.service - User Login Management
     Loaded: loaded (/usr/lib/systemd/system/systemd-logind.service; static)
    Drop-In: /usr/lib/systemd/system/systemd-logind.service.d
             └─10-grub2-logind-service.conf
     Active: active (running) since Tue 2025-12-02 10:03:33 UTC; 1h 35min ago
       Docs: man:sd-login(3)
             man:systemd-logind.service(8)
             man:logind.conf(5)
             man:org.freedesktop.login1(5)
   Main PID: 787 (systemd-logind)
     Status: "Processing requests..."
         IO: 4.0K read, 0B written
      Tasks: 1 (limit: 48628)
     Memory: 6.5M (peak: 7.7M)
        CPU: 4.484s
     CGroup: /system.slice/systemd-logind.service
             └─787 /usr/lib/systemd/systemd-logind

Dec 02 11:06:04 compute-0 systemd-logind[787]: Removed session 49.
Dec 02 11:06:10 compute-0 systemd-logind[787]: New session 50 of user zuul.
Dec 02 11:07:49 compute-0 systemd-logind[787]: Watching system buttons on /dev/input/event0 (Power Button)
Dec 02 11:07:49 compute-0 systemd-logind[787]: Watching system buttons on /dev/input/event1 (AT Translated Set 2 keyboard)
Dec 02 11:08:42 compute-0 systemd-logind[787]: New session 51 of user zuul.
Dec 02 11:08:43 compute-0 systemd-logind[787]: Session 51 logged out. Waiting for processes to exit.
Dec 02 11:08:43 compute-0 systemd-logind[787]: Removed session 51.
Dec 02 11:09:27 compute-0 systemd-logind[787]: Session 50 logged out. Waiting for processes to exit.
Dec 02 11:09:27 compute-0 systemd-logind[787]: Removed session 50.
Dec 02 11:38:22 compute-0 systemd-logind[787]: New session 52 of user zuul.

○ systemd-machine-id-commit.service - Commit a transient machine-id on disk
     Loaded: loaded (/usr/lib/systemd/system/systemd-machine-id-commit.service; static)
     Active: inactive (dead)
  Condition: start condition failed at Tue 2025-12-02 10:03:32 UTC; 1h 35min ago
       Docs: man:systemd-machine-id-commit.service(8)

Dec 02 10:03:32 localhost systemd[1]: Commit a transient machUnit systemd-networkd-wait-online.service could not be found.
ine-id on disk was skipped because of an unmet condition check (ConditionPathIsMountPoint=/etc/machine-id).

● systemd-machined.service - Virtual Machine and Container Registration Service
     Loaded: loaded (/usr/lib/systemd/system/systemd-machined.service; static)
     Active: active (running) since Tue 2025-12-02 11:05:26 UTC; 33min ago
       Docs: man:systemd-machined.service(8)
             man:org.freedesktop.machine1(5)
   Main PID: 216222 (systemd-machine)
     Status: "Processing requests..."
         IO: 0B read, 0B written
      Tasks: 1 (limit: 48628)
     Memory: 1.5M (peak: 2.0M)
        CPU: 2.109s
     CGroup: /system.slice/systemd-machined.service
             └─216222 /usr/lib/systemd/systemd-machined

Dec 02 11:32:03 compute-0 systemd-machined[216222]: Machine qemu-25-instance-00000019 terminated.
Dec 02 11:32:08 compute-0 systemd-machined[216222]: New machine qemu-27-instance-0000001b.
Dec 02 11:32:10 compute-0 systemd-machined[216222]: Machine qemu-24-instance-00000018 terminated.
Dec 02 11:32:46 compute-0 systemd-machined[216222]: Machine qemu-27-instance-0000001b terminated.
Dec 02 11:33:55 compute-0 systemd-machined[216222]: New machine qemu-28-instance-0000001c.
Dec 02 11:34:25 compute-0 systemd-machined[216222]: Machine qemu-28-instance-0000001c terminated.
Dec 02 11:35:16 compute-0 systemd-machined[216222]: New machine qemu-29-instance-0000001d.
Dec 02 11:35:41 compute-0 systemd-machined[216222]: Machine qemu-29-instance-0000001d terminated.
Dec 02 11:35:59 compute-0 systemd-machined[216222]: New machine qemu-30-instance-0000001e.
Dec 02 11:36:38 compute-0 systemd-machined[216222]: Machine qemu-30-instance-0000001e terminated.

● systemd-modules-load.service - Load Kernel Modules
     Loaded: loaded (/usr/lib/systemd/system/systemd-modules-load.service; static)
     Active: active (exited) since Tue 2025-12-02 11:07:42 UTC; 31min ago
       Docs: man:systemd-modules-load.service(8)
             man:modules-load.d(5)
   Main PID: 237209 (code=exited, status=0/SUCCESS)
        CPU: 18ms

Dec 02 11:07:42 compute-0 systemd[1]: Starting Load Kernel Modules...
Dec 02 11:07:42 compute-0 systemd[1]: Finished Load Kernel Modules.

● systemd-network-generator.service - Generate network units from Kernel command line
     Loaded: loaded (/usr/lib/systemd/system/systemd-network-generator.service; enabled; preset: enabled)
     Active: active (exited) since Tue 2025-12-02 10:03:32 UTC; 1h 35min ago
       Docs: man:systemd-network-generator.service(8)
   Main PID: 680 (code=exited, status=0/SUCCESS)
        CPU: 10ms

Dec 02 10:03:32 localhost systemd[1]: Finished Generate network units from Kernel command line.

○ systemd-pcrmachine.service - TPM2 PCR Machine ID Measurement
     Loaded: loaded (/usr/lib/systemd/system/systemd-pcrmachine.service; static)
     Active: inactive (dead)
  Condition: start condition failed at Tue 2025-12-02 10:03:31 UTC; 1h 35min ago
       Docs: man:systemd-pcrmachine.service(8)

○ systemd-pcrphase-initrd.service - TPM2 PCR Barrier (initrd)
     Loaded: loaded (/usr/lib/systemd/system/systemd-pcrphase-initrd.service; static)
     Active: inactive (dead)
       Docs: man:systemd-pcrphase-initrd.service(8)

○ systemd-pcrphase-sysinit.service - TPM2 PCR Barrier (Initialization)
     Loaded: loaded (/usr/lib/systemd/system/systemd-pcrphase-sysinit.service; static)
     Active: inactive (dead)
  Condition: start condition failed at Tue 2025-12-02 10:03:32 UTC; 1h 35min ago
       Docs: man:systemd-pcrphase-sysinit.service(8)

Dec 02 10:03:32 localhost systemd[1]: TPM2 PCR Barrier (Initialization) was skipped because of an unmet condition check (ConditionPathExists=/sys/firmware/efi/efivars/StubPcrKernelImage-4a67b082-0a4c-41cf-b6c7-440b29bb8c4f).

○ systemd-pcrphase.service - TPM2 PCR Barrier (User)
     Loaded: loaded (/usr/lib/systemd/system/systemd-pcrphase.service; static)
     Active: inactive (dead)
  Condition: start condition failed at Tue 2025-12-02 10:03:34 UTC; 1h 35min ago
       Docs: man:systemd-pcrphase.service(8)

Dec 02 10:03:34 np0005542249.novaUnit systemd-timesyncd.service could not be found.
local systemd[1]: TPM2 PCR Barrier (User) was skipped because of an unmet condition check (ConditionPathExists=/sys/firmware/efi/efivars/StubPcrKernelImage-4a67b082-0a4c-41cf-b6c7-440b29bb8c4f).

○ systemd-pstore.service - Platform Persistent Storage Archival
     Loaded: loaded (/usr/lib/systemd/system/systemd-pstore.service; enabled; preset: enabled)
     Active: inactive (dead)
  Condition: start condition failed at Tue 2025-12-02 10:03:32 UTC; 1h 35min ago
       Docs: man:systemd-pstore(8)

Dec 02 10:03:32 localhost systemd[1]: Platform Persistent Storage Archival was skipped because of an unmet condition check (ConditionDirectoryNotEmpty=/sys/fs/pstore).

● systemd-random-seed.service - Load/Save OS Random Seed
     Loaded: loaded (/usr/lib/systemd/system/systemd-random-seed.service; static)
     Active: active (exited) since Tue 2025-12-02 10:03:32 UTC; 1h 35min ago
       Docs: man:systemd-random-seed.service(8)
             man:random(4)
   Main PID: 690 (code=exited, status=0/SUCCESS)
        CPU: 13ms

Dec 02 10:03:32 localhost systemd[1]: Starting Load/Save OS Random Seed...
Dec 02 10:03:32 localhost systemd[1]: Finished Load/Save OS Random Seed.

● systemd-remount-fs.service - Remount Root and Kernel File Systems
     Loaded: loaded (/usr/lib/systemd/system/systemd-remount-fs.service; enabled-runtime; preset: disabled)
     Active: active (exited) since Tue 2025-12-02 10:03:32 UTC; 1h 35min ago
       Docs: man:systemd-remount-fs.service(8)
             https://www.freedesktop.org/wiki/Software/systemd/APIFileSystems
   Main PID: 681 (code=exited, status=0/SUCCESS)
        CPU: 14ms

Dec 02 10:03:32 localhost systemd[1]: Finished Remount Root and Kernel File Systems.

○ systemd-repart.service - Repartition Root Disk
     Loaded: loaded (/usr/lib/systemd/system/systemd-repart.service; static)
     Active: inactive (dead)
  Condition: start condition failed at Tue 2025-12-02 10:03:31 UTC; 1h 35min ago
       Docs: man:systemd-repart.service(8)

○ systemd-rfkill.service - Load/Save RF Kill Switch Status
     Loaded: loaded (/usr/lib/systemd/system/systemd-rfkill.service; static)
     Active: inactive (dead)
TriggeredBy: ● systemd-rfkill.socket
       Docs: man:systemd-rfkill.service(8)

● systemd-sysctl.service - Apply Kernel Variables
     Loaded: loaded (/usr/lib/systemd/system/systemd-sysctl.service; static)
     Active: active (exited) since Tue 2025-12-02 10:43:25 UTC; 55min ago
       Docs: man:systemd-sysctl.service(8)
             man:sysctl.d(5)
   Main PID: 44963 (code=exited, status=0/SUCCESS)
        CPU: 24ms

Dec 02 10:43:25 compute-0 systemd[1]: Starting Apply Kernel Variables...
Dec 02 10:43:25 compute-0 systemd[1]: Finished Apply Kernel Variables.

○ systemd-sysext.service - Merge System Extension Images into /usr/ and /opt/
     Loaded: loaded (/usr/lib/systemd/system/systemd-sysext.service; disabled; preset: disabled)
     Active: inactive (dead)
       Docs: man:systemd-sysext.service(8)

● systemd-sysusers.service - Create System Users
     Loaded: loaded (/usr/lib/systemd/system/systemd-sysusers.service; static)
     Active: active (exited) since Tue 2025-12-02 10:03:32 UTC; 1h 35min ago
       Docs: man:sysusers.d(5)
             man:systemd-sysusers.service(8)
   Main PID: 691 (code=exited, status=0/SUCCESS)
        CPU: 20ms

Dec 02 10:03:32 localhost systemd[1]: Starting Create System Users...
Dec 02 10:03:32 localhost systemd[1]: Finished Create System Users.

○ systemd-tmpfiles-clean.service - Cleanup of Temporary Directories
     Loaded: loaded (/usr/lib/systemd/system/systemd-tmpfiles-clean.service; static)
     Active: inactive (dead) since Tue 2025-12-02 10:18:41 UTC; 1h 20min ago
TriggeredBy: ● systemd-tmpfiles-clean.timer
       Docs: man:tmpfiles.d(5)
             man:systemd-tmpfiles(8)
   Main PID: 29965 (code=exited, status=0/SUCCESS)
        CPU: 63ms

Dec 02 10:18:41 compute-0 systemd[1]: Starting Cleanup of Temporary Directories...
Dec 02 10:18:41 compute-0 systemd[1]: systemd-tmpfiles-clean.service: Deactivated successfully.
Dec 02 10:18:41 computeUnit systemd-tmpfiles.service could not be found.
-0 systemd[1]: Finished Cleanup of Temporary Directories.

● systemd-tmpfiles-setup-dev.service - Create Static Device Nodes in /dev
     Loaded: loaded (/usr/lib/systemd/system/systemd-tmpfiles-setup-dev.service; static)
     Active: active (exited) since Tue 2025-12-02 10:03:32 UTC; 1h 35min ago
       Docs: man:tmpfiles.d(5)
             man:systemd-tmpfiles(8)
   Main PID: 693 (code=exited, status=0/SUCCESS)
        CPU: 46ms

Dec 02 10:03:32 localhost systemd[1]: Starting Create Static Device Nodes in /dev...
Dec 02 10:03:32 localhost systemd[1]: Finished Create Static Device Nodes in /dev.

● systemd-tmpfiles-setup.service - Create Volatile Files and Directories
     Loaded: loaded (/usr/lib/systemd/system/systemd-tmpfiles-setup.service; static)
     Active: active (exited) since Tue 2025-12-02 10:03:32 UTC; 1h 35min ago
       Docs: man:tmpfiles.d(5)
             man:systemd-tmpfiles(8)
   Main PID: 697 (code=exited, status=0/SUCCESS)
        CPU: 97ms

Dec 02 10:03:32 localhost systemd[1]: Starting Create Volatile Files and Directories...
Dec 02 10:03:32 localhost systemd[1]: Finished Create Volatile Files and Directories.

○ systemd-udev-settle.service - Wait for udev To Complete Device Initialization
     Loaded: loaded (/usr/lib/systemd/system/systemd-udev-settle.service; static)
     Active: inactive (dead)
       Docs: man:systemd-udev-settle.service(8)

● systemd-udev-trigger.service - Coldplug All udev Devices
     Loaded: loaded (/usr/lib/systemd/system/systemd-udev-trigger.service; static)
    Drop-In: /usr/lib/systemd/system/systemd-udev-trigger.service.d
             └─systemd-udev-trigger-no-reload.conf
     Active: active (exited) since Tue 2025-12-02 10:03:32 UTC; 1h 35min ago
       Docs: man:udev(7)
             man:systemd-udevd.service(8)
   Main PID: 684 (code=exited, status=0/SUCCESS)
        CPU: 100ms

Dec 02 10:03:32 localhost systemd[1]: Finished Coldplug All udev Devices.

● systemd-udevd.service - Rule-based Manager for Device Events and Files
     Loaded: loaded (/usr/lib/systemd/system/systemd-udevd.service; static)
     Active: active (running) since Tue 2025-12-02 10:03:32 UTC; 1h 35min ago
TriggeredBy: ● systemd-udevd-control.socket
             ● systemd-udevd-kernel.socket
       Docs: man:systemd-udevd.service(8)
             man:udev(7)
   Main PID: 730 (systemd-udevd)
     Status: "Processing with 32 children at max"
         IO: 135.4M read, 60.0M written
      Tasks: 1
     Memory: 57.0M (peak: 114.4M)
        CPU: 14.304s
     CGroup: /system.slice/systemd-udevd.service
             └─udev
               └─730 /usr/lib/systemd/systemd-udevd

Dec 02 11:33:55 compute-0 systemd-udevd[295673]: Network interface NamePolicy= disabled on kernel command line.
Dec 02 11:33:55 compute-0 systemd-udevd[295678]: Network interface NamePolicy= disabled on kernel command line.
Dec 02 11:35:16 compute-0 systemd-udevd[297262]: Network interface NamePolicy= disabled on kernel command line.
Dec 02 11:35:59 compute-0 systemd-udevd[298576]: Network interface NamePolicy= disabled on kernel command line.
Dec 02 11:38:36 compute-0 lvm[301840]: PV /dev/loop4 online, VG ceph_vg1 is complete.
Dec 02 11:38:36 compute-0 lvm[301840]: VG ceph_vg1 finished
Dec 02 11:38:36 compute-0 lvm[301843]: PV /dev/loop5 online, VG ceph_vg2 is complete.
Dec 02 11:38:36 compute-0 lvm[301843]: VG ceph_vg2 finished
Dec 02 11:38:36 compute-0 lvm[301855]: PV /dev/loop3 online, VG ceph_vg0 is complete.
Dec 02 11:38:36 compute-0 lvm[301855]: VG ceph_vg0 finished

● systemd-update-done.service - Update is Completed
     Loaded: loaded (/usr/lib/systemd/system/systemd-update-done.service; static)
     Active: active (exited) since Tue 2025-12-02 10:03:32 UTC; 1h 35min ago
       Docs: man:systemd-update-done.service(8)
   Main PID: 731 (code=exited, status=0/SUCCESS)
        CPU: 12ms

Dec 02 10:03:32 localhost systemd[1]: Starting Update is Completed...
Dec 02 10:03:32 localhost systemd[1]: Finished Update is Completed.

○ systemd-update-utmp-runlevel.service - Record Runlevel Change in UTMP
     LUnit tlp.service could not be found.
oaded: loaded (/usr/lib/systemd/system/systemd-update-utmp-runlevel.service; static)
     Active: inactive (dead) since Tue 2025-12-02 10:03:36 UTC; 1h 35min ago
       Docs: man:systemd-update-utmp-runlevel.service(8)
             man:utmp(5)
   Main PID: 1019 (code=exited, status=0/SUCCESS)
        CPU: 10ms

Dec 02 10:03:36 np0005542249.novalocal systemd[1]: Starting Record Runlevel Change in UTMP...
Dec 02 10:03:36 np0005542249.novalocal systemd[1]: systemd-update-utmp-runlevel.service: Deactivated successfully.
Dec 02 10:03:36 np0005542249.novalocal systemd[1]: Finished Record Runlevel Change in UTMP.

● systemd-update-utmp.service - Record System Boot/Shutdown in UTMP
     Loaded: loaded (/usr/lib/systemd/system/systemd-update-utmp.service; static)
     Active: active (exited) since Tue 2025-12-02 10:03:32 UTC; 1h 35min ago
       Docs: man:systemd-update-utmp.service(8)
             man:utmp(5)
   Main PID: 729 (code=exited, status=0/SUCCESS)
        CPU: 8ms

Dec 02 10:03:32 localhost systemd[1]: Starting Record System Boot/Shutdown in UTMP...
Dec 02 10:03:32 localhost systemd[1]: Finished Record System Boot/Shutdown in UTMP.

● systemd-user-sessions.service - Permit User Sessions
     Loaded: loaded (/usr/lib/systemd/system/systemd-user-sessions.service; static)
     Active: active (exited) since Tue 2025-12-02 10:03:36 UTC; 1h 35min ago
       Docs: man:systemd-user-sessions.service(8)
   Main PID: 1007 (code=exited, status=0/SUCCESS)
        CPU: 10ms

Dec 02 10:03:36 np0005542249.novalocal systemd[1]: Starting Permit User Sessions...
Dec 02 10:03:36 np0005542249.novalocal systemd[1]: Finished Permit User Sessions.

○ systemd-vconsole-setup.service - Setup Virtual Console
     Loaded: loaded (/usr/lib/systemd/system/systemd-vconsole-setup.service; static)
     Active: inactive (dead) since Tue 2025-12-02 10:03:31 UTC; 1h 35min ago
   Duration: 1.946s
       Docs: man:systemd-vconsole-setup.service(8)
             man:vconsole.conf(5)
   Main PID: 312 (code=exited, status=0/SUCCESS)
        CPU: 208ms

Dec 02 10:03:29 localhost systemd[1]: Finished Setup Virtual Console.
Dec 02 10:03:31 localhost systemd[1]: systemd-vconsole-setup.service: Deactivated successfully.
Dec 02 10:03:31 localhost systemd[1]: Stopped Setup Virtual Console.
Notice: journal has been rotated since unit was started, output may be incomplete.

● tuned.service - Dynamic System Tuning Daemon
     Loaded: loaded (/usr/lib/systemd/system/tuned.service; enabled; preset: enabled)
     Active: active (running) since Tue 2025-12-02 10:54:59 UTC; 44min ago
       Docs: man:tuned(8)
             man:tuned.conf(5)
             man:tuned-adm(8)
   Main PID: 113067 (tuned)
         IO: 0B read, 0B written
      Tasks: 4 (limit: 48628)
     Memory: 13.7M (peak: 16.0M)
        CPU: 1.464s
     CGroup: /system.slice/tuned.service
             └─113067 /usr/bin/python3 -Es /usr/sbin/tuned -l -P

Dec 02 10:54:59 compute-0 systemd[1]: Starting Dynamic System Tuning Daemon...
Dec 02 10:54:59 compute-0 systemd[1]: Started Dynamic System Tuning Daemon.

○ unbound-anchor.service - update of the root trust anchor for DNSSEC validation in unbound
     Loaded: loaded (/usr/lib/systemd/system/unbound-anchor.service; static)
     Active: inactive (dead)
TriggeredBy: ● unbound-anchor.timer
       Docs: man:unbound-anchor(8)

● user-runtime-dir@1000.service - User Runtime Directory /run/user/1000
     Loaded: loaded (/usr/lib/systemd/system/user-runtime-dir@.service; static)
     Active: active (exited) since Tue 2025-12-02 10:04:12 UTC; 1h 35min ago
       Docs: man:user@.service(5)
   Main PID: 4309 (code=exited, status=0/SUCCESS)
        CPU: 23ms

Dec 02 10:04:12 np0005542249.novalocal systemd[1]: Starting User Runtime Directory /run/user/1000...
Dec 02 10:04:12 np0005542249.novalocal systemd[1]: Finished User Runtime Directory /run/user/1000.

● user-runtime-dir@42477.service - User Runtime Directory /run/user/42477
     Loaded: loaded (/usr/lib/systemd/system/user-runtime-dir@.service; static)
     Active: active (exited) since Tue 2025-12-02 10:49:32 UTC; 49min ago
       Docs: man:user@.service(5)
   Main PID: 76633 (code=exited, status=0/SUCCESS)
        CPU: 15ms

Dec 02 10:49:32 compute-0 systemd[1]: Starting User Runtime Directory /run/user/42477...
Dec 02 10:49:32 compute-0 systemd[1]: Finished User Runtime Directory /run/user/42477.

● user@1000.service - User Manager for UID 1000
     Loaded: loaded (/usr/lib/systemd/system/user@.service; static)
    Drop-In: /usr/lib/systemd/system/user@.service.d
             └─10-login-barrier.conf
     Active: active (running) since Tue 2025-12-02 10:04:12 UTC; 1h 35min ago
       Docs: man:user@.service(5)
   Main PID: 4310 (systemd)
     Status: "Ready."
         IO: 676.0K read, 8.0K written
      Tasks: 5
     Memory: 9.2M (peak: 13.4M)
        CPU: 5.160s
     CGroup: /user.slice/user-1000.slice/user@1000.service
             ├─app.slice
             │ └─dbus-broker.service
             │   ├─14561 /usr/bin/dbus-broker-launch --scope user
             │   └─14577 dbus-broker --log 4 --controller 9 --machine-id 1f988c78c563e12389ab342aced42dbb --max-bytes 100000000000000 --max-fds 25000000000000 --max-matches 5000000000
             ├─init.scope
             │ ├─4310 /usr/lib/systemd/systemd --user
             │ └─4312 "(sd-pam)"
             └─user.slice
               └─podman-pause-2aa5b999.scope
                 └─14491 catatonit -P

Dec 02 10:15:31 np0005542249.novalocal dbus-broker-launch[14561]: Policy to allow eavesdropping in /usr/share/dbus-1/session.conf +31: Eavesdropping is deprecated and ignored
Dec 02 10:15:31 np0005542249.novalocal dbus-broker-launch[14561]: Policy to allow eavesdropping in /usr/share/dbus-1/session.conf +33: Eavesdropping is deprecated and ignored
Dec 02 10:15:31 np0005542249.novalocal systemd[4310]: Started D-Bus User Message Bus.
Dec 02 10:15:31 np0005542249.novalocal dbus-broker-lau[14561]: Ready
Dec 02 10:15:31 np0005542249.novalocal systemd[4310]: selinux: avc:  op=load_policy lsm=selinux seqno=6 res=1
Dec 02 10:15:31 np0005542249.novalocal systemd[4310]: Created slice Slice /user.
Dec 02 10:15:31 np0005542249.novalocal systemd[4310]: podman-14480.scope: unit configures an IP firewall, but not running as root.
Dec 02 10:15:31 np0005542249.novalocal systemd[4310]: (This warning is only shown for the first unit using IP firewalling.)
Dec 02 10:15:31 np0005542249.novalocal systemd[4310]: Started podman-14480.scope.
Dec 02 10:15:31 np0005542249.novalocal systemd[4310]: Started podman-pause-2aa5b999.scope.

● user@42477.service - User Manager for UID 42477
     Loaded: loaded (/usr/lib/systemd/system/user@.service; static)
    Drop-In: /usr/lib/systemd/system/user@.service.d
             └─10-login-barrier.conf
     Active: active (running) since Tue 2025-12-02 10:49:32 UTC; 49min ago
       Docs: man:user@.service(5)
   Main PID: 76634 (systemd)
     Status: "Ready."
         IO: 0B read, 0B written
      Tasks: 2
     Memory: 7.2M (peak: 10.6M)
        CPU: 3.761s
     CGroup: /user.slice/user-42477.slice/user@42477.service
             └─init.scope
               ├─76634 /usr/lib/systemd/systemd --user
               └─76636 "(sd-pam)"

Dec 02 10:49:32 compute-0 systemd[76634]: Finished Create User's Volatile Files and Directories.
Dec 02 10:49:32 compute-0 systemd[76634]: Reached target Basic System.
Dec 02 10:49:32 compute-0 systemd[76634]: Reached target Main User Target.
Dec 02 10:49:32 compute-0 systemd[76634]: Startup finished in 131ms.
Dec 02 10:49:32 compute-0 systemd[1]: Started User Manager for UID 42477.
Dec 02 10:51:32 compute-0 systemd[76634]: Starting Mark boot as successful...
Dec 02 10:51:32 compute-0 systemd[76634]: Finished Mark boot as successful.
Dec 02 10:55:11 compute-0 systemd[76634]: Created slice User Background Tasks Slice.
Dec 02 10:55:11 compute-0 systemd[76634]: Starting Cleanup of User's Temporary Files and Directories...
Dec 02 10:55:11 compute-0 systemd[76634]: Finished Cleanup of User's Temporary Files and Directories.

○ virtinterfaced.service - libvirt interface daemon
     Loaded: loaded (/usr/lib/systemd/system/virtinterfaced.service; disabled; preset: disabled)
     Active: inactive (dead)
TriggeredBy: ○ virtinterfaced-ro.socket
             ○ virtinterfaced-admin.socket
             ○ virtinterfaced.socket
       Docs: man:virtinterfaced(8)
             https://libvirt.org/

○ virtlockd.service - libvirt locking daemon
     Loaded: loaded (/usr/lib/systemd/system/virtlockd.service; disabled; preset: disabled)
     Active: inactive (dead)
TriggeredBy: ● virtlockd.socket
             ○ virtlockd-admin.socket
       Docs: man:virtlockd(8)
             https://libvirt.org/

● virtlogd.service - libvirt logging daemon
     Loaded: loaded (/usr/lib/systemd/system/virtlogd.service; enabled; preset: disabled)
     Active: active (running) since Tue 2025-12-02 11:05:22 UTC; 33min ago
TriggeredBy: ● virtlogd-admin.socket
             ● virtlogd.socket
       Docs: man:virtlogd(8)
             https://libvirt.org/
   Main PID: 215589 (virtlogd)
         IO: 644.0K read, 2.6M written
      Tasks: 1 (limit: 48628)
     Memory: 3.6M (peak: 4.1M)
        CPU: 45.793s
     CGroup: /system.slice/virtlogd.service
             └─215589 /usr/sbin/virtlogd

Dec 02 11:05:21 compute-0 systemd[1]: Starting libvirt logging daemon...
Dec 02 11:05:22 compute-0 systemd[1]: Started libvirt logging daemon.

○ virtnetworkd.service - libvirt network daemon
     Loaded: loaded (/usr/lib/systemd/system/virtnetworkd.service; disabled; preset: disabled)
     Active: inactive (dead)
TriggeredBy: ○ virtnetworkd.socket
             ○ virtnetworkd-admin.socket
             ○ virtnetworkd-ro.socket
       Docs: man:virtnetworkd(8)
             https://libvirt.org/

● virtnodedevd.service - libvirt nodedev daemon
     Loaded: loaded (/usr/lib/systemd/system/virtnodedevd.service; enabled; preset: disabled)
     Active: active (running) since Tue 2025-12-02 11:09:30 UTC; 29min ago
TriggeredBy: ● virtnodedevd-ro.socket
             ● virtnodedevd.socket
             ● virtnodedevd-admin.socket
       Docs: man:virtnodedevd(8)
             https://libvirt.org/
   Main PID: 255222 (virtnodedevd)
         IO: 4.1M read, 0B written
      Tasks: 20 (limit: 48628)
     Memory: 10.0M (peak: 11.0M)
        CPU: 3.533s
     CGroup: /system.slice/virtnodedevd.service
             └─255222 /usr/sbin/virtnodedevd --timeout 120

Dec 02 11:09:30 compute-0 systemd[1]: Starting libvirt nodedev daemon...
Dec 02 11:09:30 compute-0 systemd[1]: Started libvirt nodedev daemon.

○ virtnwfilterd.service - libvirt nwfilter daemon
     Loaded: loaded (/usr/lib/systemd/system/virtnwfilterd.service; disabled; preset: disabled)
     Active: inactive (dead)
TriggeredBy: ○ virtnwfilterd.socket
             ○ virtnwfilterd-ro.socket
             ○ virtnwfilterd-admin.socket
       Docs: man:virtnwfilterd(8)
             https://libvirt.org/

○ virtproxyd.service - libvirt proxy daemon
     Loaded: loaded (/usr/lib/systemd/system/virtproxyd.service; enabled; preset: disabled)
     Active: inactive (dead) since Tue 2025-12-02 11:07:24 UTC; 31min ago
   Duration: 2min 40ms
TriggeredBy: ● virtproxyd-admin.socket
             ● virtproxyd-tls.socket
             ● virtproxyd.socket
             ● virtproxyd-ro.socket
       Docs: man:virtproxyd(8)
             https://libvirt.org/
   Main PID: 216011 (code=exited, status=0/SUCCESS)
        CPU: 63ms

Dec 02 11:05:24 compute-0 systemd[1]: Starting libvirt proxy daemon...
Dec 02 11:05:24 compute-0 systemd[1]: Started libvirt proxy daemon.
Dec 02 11:07:24 compute-0 systemd[1]: virtproxyd.service: Deactivated successfully.

● virtqemud.service - libvirt QEMU daemon
     Loaded: loaded (/usr/lib/systemd/system/virtqemud.service; enabled; preset: enabled)
     Active: active (running) since Tue 2025-12-02 11:09:23 UTC; 29min ago
TriggeredBy: ● virtqemud-ro.socket
             ● virtqemud.socket
             ● virtqemud-admin.socket
       Docs: man:virtqemud(8)
             https://libvirt.org/
   Main PID: 254597 (virtqemud)
         IO: 45.7Unit ypbind.service could not be found.
Unit yppasswdd.service could not be found.
Unit ypserv.service could not be found.
Unit ypxfrd.service could not be found.
M read, 1.3M written
      Tasks: 19 (limit: 32768)
     Memory: 67.7M (peak: 86.3M)
        CPU: 11.022s
     CGroup: /system.slice/virtqemud.service
             └─254597 /usr/sbin/virtqemud --timeout 120

Dec 02 11:09:23 compute-0 systemd[1]: Starting libvirt QEMU daemon...
Dec 02 11:09:23 compute-0 systemd[1]: Started libvirt QEMU daemon.
Dec 02 11:09:25 compute-0 virtqemud[254597]: libvirt version: 11.9.0, package: 1.el9 (builder@centos.org, 2025-11-04-09:54:50, )
Dec 02 11:09:25 compute-0 virtqemud[254597]: hostname: compute-0
Dec 02 11:09:25 compute-0 virtqemud[254597]: End of file while reading data: Input/output error
Dec 02 11:38:34 compute-0 virtqemud[254597]: Failed to connect socket to '/var/run/libvirt/virtnetworkd-sock-ro': No such file or directory
Dec 02 11:38:35 compute-0 virtqemud[254597]: Failed to connect socket to '/var/run/libvirt/virtnwfilterd-sock-ro': No such file or directory
Dec 02 11:38:35 compute-0 virtqemud[254597]: Failed to connect socket to '/var/run/libvirt/virtstoraged-sock-ro': No such file or directory
Dec 02 11:39:13 compute-0 virtqemud[254597]: Failed to connect socket to '/var/run/libvirt/virtstoraged-sock-ro': No such file or directory

● virtsecretd.service - libvirt secret daemon
     Loaded: loaded (/usr/lib/systemd/system/virtsecretd.service; enabled; preset: disabled)
     Active: active (running) since Tue 2025-12-02 11:16:11 UTC; 23min ago
TriggeredBy: ● virtsecretd-admin.socket
             ● virtsecretd-ro.socket
             ● virtsecretd.socket
       Docs: man:virtsecretd(8)
             https://libvirt.org/
   Main PID: 262209 (virtsecretd)
         IO: 8.0K read, 170.0K written
      Tasks: 18 (limit: 48628)
     Memory: 4.1M (peak: 5.1M)
        CPU: 560ms
     CGroup: /system.slice/virtsecretd.service
             └─262209 /usr/sbin/virtsecretd --timeout 120

Dec 02 11:16:11 compute-0 systemd[1]: Starting libvirt secret daemon...
Dec 02 11:16:11 compute-0 systemd[1]: Started libvirt secret daemon.

○ virtstoraged.service - libvirt storage daemon
     Loaded: loaded (/usr/lib/systemd/system/virtstoraged.service; disabled; preset: disabled)
     Active: inactive (dead)
TriggeredBy: ○ virtstoraged-admin.socket
             ○ virtstoraged.socket
             ○ virtstoraged-ro.socket
       Docs: man:virtstoraged(8)
             https://libvirt.org/

● -.slice - Root Slice
     Loaded: loaded
     Active: active since Tue 2025-12-02 10:03:29 UTC; 1h 35min ago
      Until: Tue 2025-12-02 10:03:29 UTC; 1h 35min ago
       Docs: man:systemd.special(7)
      Tasks: 1415
     Memory: 3.4G
        CPU: 59min 9.880s
     CGroup: /
             ├─307236 turbostat --debug sleep 10
             ├─307251 sleep 10
             ├─init.scope
             │ └─1 /usr/lib/systemd/systemd --switched-root --system --deserialize 31
             ├─machine.slice
             │ ├─libpod-130400eaf961ceaaa203e2cc0a5af0fe03396f0667ec510c1291b1ee03bff193.scope
             │ │ └─container
             │ │   ├─236124 dumb-init --single-child -- kolla_start
             │ │   └─236127 /usr/sbin/multipathd -d
             │ ├─libpod-301660b5961629ac564857138dcba46d0947a2a1c7d3debbb9f5976c1df04193.scope
             │ │ └─container
             │ │   ├─163736 dumb-init --single-child -- kolla_start
             │ │   ├─163757 "neutron-ovn-metadata-agent (/usr/bin/python3 /usr/bin/neutron-ovn-metadata-agent)"
             │ │   ├─163893 "neutron-ovn-metadata-agent (/usr/bin/python3 /usr/bin/neutron-ovn-metadata-agent)"
             │ │   ├─164036 /usr/bin/python3 /bin/privsep-helper --config-file /etc/neutron/neutron.conf --config-dir /etc/neutron.conf.d --privsep_context neutron.privileged.namespace_cmd --privsep_sock_path /tmp/tmp4q14yauh/privsep.sock
             │ │   ├─262398 /usr/bin/python3 /bin/privsep-helper --config-file /etc/neutron/neutron.conf --config-dir /etc/neutron.conf.d --privsep_context neutron.privileged.default --privsep_sock_path /tmp/tmpti_0kgk9/privsep.sock
             │ │   └─262581 /usr/bin/python3 /bin/privsep-helper --config-file /etc/neutron/neutron.conf --config-dir /etc/neutron.conf.d --privsep_context neutron.privileged.link_cmd --privsep_sock_path /tmp/tmpokt26739/privsep.sock
             │ ├─libpod-5c31229430d6adbfb6e358463fdefc47061e3db1d274ed40ca82e25890f29998.scope
             │ │ └─container
             │ │   ├─153855 dumb-init --single-child -- kolla_start
             │ │   └─153864 /usr/bin/ovn-controller --pidfile unix:/run/openvswitch/db.sock -p /etc/pki/tls/private/ovndb.key -c /etc/pki/tls/certs/ovndb.crt -C /etc/pki/tls/certs/ovndbca.crt
             │ └─libpod-5fbc74cebe070d8cc77fb5ba95cab60fe5a6a788996a0004958af316ccf471ad.scope
             │   └─container
             │     ├─254902 dumb-init --single-child -- kolla_start
             │     ├─254904 /usr/bin/python3 /usr/bin/nova-compute
             │     ├─262068 /usr/bin/python3 /bin/privsep-helper --config-file /etc/nova/nova.conf --config-file /etc/nova/nova-compute.conf --config-dir /etc/nova/nova.conf.d --privsep_context vif_plug_ovs.privsep.vif_plug --privsep_sock_path /tmp/tmp1no9jew6/privsep.sock
             │     ├─262759 /usr/bin/python3 /bin/privsep-helper --config-file /etc/nova/nova.conf --config-file /etc/nova/nova-compute.conf --config-dir /etc/nova/nova.conf.d --privsep_context os_brick.privileged.default --privsep_sock_path /tmp/tmpoqbx9wo3/privsep.sock
             │     └─262878 /usr/bin/python3 /bin/privsep-helper --config-file /etc/nova/nova.conf --config-file /etc/nova/nova-compute.conf --config-dir /etc/nova/nova.conf.d --privsep_context nova.privsep.sys_admin_pctxt --privsep_sock_path /tmp/tmp8u10dppg/privsep.sock
             ├─system.slice
             │ ├─NetworkManager.service
             │ │ └─48987 /usr/sbin/NetworkManager --no-daemon
             │ ├─auditd.service
             │ │ ├─702 /sbin/auditd
             │ │ └─704 /usr/sbin/sedispatch
             │ ├─chronyd.service
             │ │ └─58549 /usr/sbin/chronyd -F 2
             │ ├─crond.service
             │ │ ├─  1009 /usr/sbin/crond -n
             │ │ └─162071 /usr/sbin/anacron -s
             │ ├─dbus-broker.service
             │ │ ├─756 /usr/bin/dbus-broker-launch --scope system --audit
             │ │ └─773 dbus-broker --log 4 --controller 9 --machine-id 1f988c78c563e12389ab342aced42dbb --max-bytes 536870912 --max-fds 4096 --max-matches 131072 --audit
             │ ├─edpm_multipathd.service
             │ │ └─236122 /usr/bin/conmon --api-version 1 -c 130400eaf961ceaaa203e2cc0a5af0fe03396f0667ec510c1291b1ee03bff193 -u 130400eaf961ceaaa203e2cc0a5af0fe03396f0667ec510c1291b1ee03bff193 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/130400eaf961ceaaa203e2cc0a5af0fe03396f0667ec510c1291b1ee03bff193/userdata -p /run/containers/storage/overlay-containers/130400eaf961ceaaa203e2cc0a5af0fe03396f0667ec510c1291b1ee03bff193/userdata/pidfile -n multipathd --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/130400eaf961ceaaa203e2cc0a5af0fe03396f0667ec510c1291b1ee03bff193 --full-attach -s -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/130400eaf961ceaaa203e2cc0a5af0fe03396f0667ec510c1291b1ee03bff193/userdata/oci-log --conmon-pidfile /run/multipathd.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg 130400eaf961ceaaa203e2cc0a5af0fe03396f0667ec510c1291b1ee03bff193
             │ ├─edpm_nova_compute.service
             │ │ └─254900 /usr/bin/conmon --api-version 1 -c 5fbc74cebe070d8cc77fb5ba95cab60fe5a6a788996a0004958af316ccf471ad -u 5fbc74cebe070d8cc77fb5ba95cab60fe5a6a788996a0004958af316ccf471ad -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/5fbc74cebe070d8cc77fb5ba95cab60fe5a6a788996a0004958af316ccf471ad/userdata -p /run/containers/storage/overlay-containers/5fbc74cebe070d8cc77fb5ba95cab60fe5a6a788996a0004958af316ccf471ad/userdata/pidfile -n nova_compute --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/5fbc74cebe070d8cc77fb5ba95cab60fe5a6a788996a0004958af316ccf471ad --full-attach -s -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/5fbc74cebe070d8cc77fb5ba95cab60fe5a6a788996a0004958af316ccf471ad/userdata/oci-log --conmon-pidfile /run/nova_compute.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg 5fbc74cebe070d8cc77fb5ba95cab60fe5a6a788996a0004958af316ccf471ad
             │ ├─edpm_ovn_controller.service
             │ │ └─153849 /usr/bin/conmon --api-version 1 -c 5c31229430d6adbfb6e358463fdefc47061e3db1d274ed40ca82e25890f29998 -u 5c31229430d6adbfb6e358463fdefc47061e3db1d274ed40ca82e25890f29998 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/5c31229430d6adbfb6e358463fdefc47061e3db1d274ed40ca82e25890f29998/userdata -p /run/containers/storage/overlay-containers/5c31229430d6adbfb6e358463fdefc47061e3db1d274ed40ca82e25890f29998/userdata/pidfile -n ovn_controller --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/5c31229430d6adbfb6e358463fdefc47061e3db1d274ed40ca82e25890f29998 --full-attach -s -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/5c31229430d6adbfb6e358463fdefc47061e3db1d274ed40ca82e25890f29998/userdata/oci-log --conmon-pidfile /run/ovn_controller.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg 5c31229430d6adbfb6e358463fdefc47061e3db1d274ed40ca82e25890f29998
             │ ├─edpm_ovn_metadata_agent.service
             │ │ └─163733 /usr/bin/conmon --api-version 1 -c 301660b5961629ac564857138dcba46d0947a2a1c7d3debbb9f5976c1df04193 -u 301660b5961629ac564857138dcba46d0947a2a1c7d3debbb9f5976c1df04193 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/301660b5961629ac564857138dcba46d0947a2a1c7d3debbb9f5976c1df04193/userdata -p /run/containers/storage/overlay-containers/301660b5961629ac564857138dcba46d0947a2a1c7d3debbb9f5976c1df04193/userdata/pidfile -n ovn_metadata_agent --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/301660b5961629ac564857138dcba46d0947a2a1c7d3debbb9f5976c1df04193 --full-attach -s -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/301660b5961629ac564857138dcba46d0947a2a1c7d3debbb9f5976c1df04193/userdata/oci-log --conmon-pidfile /run/ovn_metadata_agent.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg 301660b5961629ac564857138dcba46d0947a2a1c7d3debbb9f5976c1df04193
             │ ├─gssproxy.service
             │ │ └─870 /usr/sbin/gssproxy -D
             │ ├─irqbalance.service
             │ │ └─782 /usr/sbin/irqbalance
             │ ├─iscsid.service
             │ │ └─238852 /usr/sbin/iscsid -f
             │ ├─ovs-vswitchd.service
             │ │ └─47291 ovs-vswitchd unix:/var/run/openvswitch/db.sock -vconsole:emer -vsyslog:err -vfile:info --mlockall --user openvswitch:hugetlbfs --no-chdir --log-file=/var/log/openvswitch/ovs-vswitchd.log --pidfile=/var/run/openvswitch/ovs-vswitchd.pid --detach
             │ ├─ovsdb-server.service
             │ │ └─47210 ovsdb-server /etc/openvswitch/conf.db -vconsole:emer -vsyslog:err -vfile:info --remote=punix:/var/run/openvswitch/db.sock --private-key=db:Open_vSwitch,SSL,private_key --certificate=db:Open_vSwitch,SSL,certificate --bootstrap-ca-cert=db:Open_vSwitch,SSL,ca_cert --user openvswitch:hugetlbfs --no-chdir --log-file=/var/log/openvswitch/ovsdb-server.log --pidfile=/var/run/openvswitch/ovsdb-server.pid --detach
             │ ├─polkit.service
             │ │ └─43476 /usr/lib/polkit-1/polkitd --no-debug
             │ ├─rpcbind.service
             │ │ └─700 /usr/bin/rpcbind -w -f
             │ ├─rsyslog.service
             │ │ └─1005 /usr/sbin/rsyslogd -n
             │ ├─sshd.service
             │ │ └─190649 "sshd: /usr/sbin/sshd -D [listener] 0 of 10-100 startups"
             │ ├─system-ceph\x2d95bc4eaa\x2d1a14\x2d59bf\x2dacf2\x2d4b3da055547d.slice
             │ │ ├─ceph-95bc4eaa-1a14-59bf-acf2-4b3da055547d@crash.compute-0.service
             │ │ │ ├─libpod-payload-3db2848945635a23ef05a5bb5103b9327936169af4da62c76a6e841d53c77f4b
             │ │ │ │ ├─83162 /run/podman-init -- /usr/bin/ceph-crash -n client.crash.compute-0
             │ │ │ │ └─83164 /usr/bin/python3 -s /usr/bin/ceph-crash -n client.crash.compute-0
             │ │ │ └─runtime
             │ │ │   └─83160 /usr/bin/conmon --api-version 1 -c 3db2848945635a23ef05a5bb5103b9327936169af4da62c76a6e841d53c77f4b -u 3db2848945635a23ef05a5bb5103b9327936169af4da62c76a6e841d53c77f4b -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/3db2848945635a23ef05a5bb5103b9327936169af4da62c76a6e841d53c77f4b/userdata -p /run/containers/storage/overlay-containers/3db2848945635a23ef05a5bb5103b9327936169af4da62c76a6e841d53c77f4b/userdata/pidfile -n ceph-95bc4eaa-1a14-59bf-acf2-4b3da055547d-crash-compute-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/3db2848945635a23ef05a5bb5103b9327936169af4da62c76a6e841d53c77f4b --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/3db2848945635a23ef05a5bb5103b9327936169af4da62c76a6e841d53c77f4b/userdata/oci-log --conmon-pidfile /run/ceph-95bc4eaa-1a14-59bf-acf2-4b3da055547d@crash.compute-0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 3db2848945635a23ef05a5bb5103b9327936169af4da62c76a6e841d53c77f4b
             │ │ ├─ceph-95bc4eaa-1a14-59bf-acf2-4b3da055547d@mds.cephfs.compute-0.bydekr.service
             │ │ │ ├─libpod-payload-f64fbb716bbb10d4999b599073f025f197d0e4bf9a2dfe0b7e86e89fb078831c
             │ │ │ │ ├─101612 /run/podman-init -- /usr/bin/ceph-mds -n mds.cephfs.compute-0.bydekr -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ │ │ └─101614 /usr/bin/ceph-mds -n mds.cephfs.compute-0.bydekr -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ │ └─runtime
             │ │ │   └─101610 /usr/bin/conmon --api-version 1 -c f64fbb716bbb10d4999b599073f025f197d0e4bf9a2dfe0b7e86e89fb078831c -u f64fbb716bbb10d4999b599073f025f197d0e4bf9a2dfe0b7e86e89fb078831c -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/f64fbb716bbb10d4999b599073f025f197d0e4bf9a2dfe0b7e86e89fb078831c/userdata -p /run/containers/storage/overlay-containers/f64fbb716bbb10d4999b599073f025f197d0e4bf9a2dfe0b7e86e89fb078831c/userdata/pidfile -n ceph-95bc4eaa-1a14-59bf-acf2-4b3da055547d-mds-cephfs-compute-0-bydekr --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/f64fbb716bbb10d4999b599073f025f197d0e4bf9a2dfe0b7e86e89fb078831c --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/f64fbb716bbb10d4999b599073f025f197d0e4bf9a2dfe0b7e86e89fb078831c/userdata/oci-log --conmon-pidfile /run/ceph-95bc4eaa-1a14-59bf-acf2-4b3da055547d@mds.cephfs.compute-0.bydekr.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg f64fbb716bbb10d4999b599073f025f197d0e4bf9a2dfe0b7e86e89fb078831c
             │ │ ├─ceph-95bc4eaa-1a14-59bf-acf2-4b3da055547d@mgr.compute-0.ntxcvs.service
             │ │ │ ├─libpod-payload-e03605b236b56660f5174f63911714aeac7aabe1e5dbe09eb60fe0086b6a7093
             │ │ │ │ ├─75370 /run/podman-init -- /usr/bin/ceph-mgr -n mgr.compute-0.ntxcvs -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ │ │ └─75372 /usr/bin/ceph-mgr -n mgr.compute-0.ntxcvs -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ │ └─runtime
             │ │ │   └─75368 /usr/bin/conmon --api-version 1 -c e03605b236b56660f5174f63911714aeac7aabe1e5dbe09eb60fe0086b6a7093 -u e03605b236b56660f5174f63911714aeac7aabe1e5dbe09eb60fe0086b6a7093 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/e03605b236b56660f5174f63911714aeac7aabe1e5dbe09eb60fe0086b6a7093/userdata -p /run/containers/storage/overlay-containers/e03605b236b56660f5174f63911714aeac7aabe1e5dbe09eb60fe0086b6a7093/userdata/pidfile -n ceph-95bc4eaa-1a14-59bf-acf2-4b3da055547d-mgr-compute-0-ntxcvs --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/e03605b236b56660f5174f63911714aeac7aabe1e5dbe09eb60fe0086b6a7093 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/e03605b236b56660f5174f63911714aeac7aabe1e5dbe09eb60fe0086b6a7093/userdata/oci-log --conmon-pidfile /run/ceph-95bc4eaa-1a14-59bf-acf2-4b3da055547d@mgr.compute-0.ntxcvs.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg e03605b236b56660f5174f63911714aeac7aabe1e5dbe09eb60fe0086b6a7093
             │ │ ├─ceph-95bc4eaa-1a14-59bf-acf2-4b3da055547d@mon.compute-0.service
             │ │ │ ├─libpod-payload-cfead6f8cdae3fb33ff10b470724c55f63ec4997c8e0a95beaf5732ac7b8da1b
             │ │ │ │ ├─75079 /run/podman-init -- /usr/bin/ceph-mon -n mon.compute-0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false
             │ │ │ │ └─75081 /usr/bin/ceph-mon -n mon.compute-0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false
             │ │ │ └─runtime
             │ │ │   └─75077 /usr/bin/conmon --api-version 1 -c cfead6f8cdae3fb33ff10b470724c55f63ec4997c8e0a95beaf5732ac7b8da1b -u cfead6f8cdae3fb33ff10b470724c55f63ec4997c8e0a95beaf5732ac7b8da1b -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/cfead6f8cdae3fb33ff10b470724c55f63ec4997c8e0a95beaf5732ac7b8da1b/userdata -p /run/containers/storage/overlay-containers/cfead6f8cdae3fb33ff10b470724c55f63ec4997c8e0a95beaf5732ac7b8da1b/userdata/pidfile -n ceph-95bc4eaa-1a14-59bf-acf2-4b3da055547d-mon-compute-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/cfead6f8cdae3fb33ff10b470724c55f63ec4997c8e0a95beaf5732ac7b8da1b --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/cfead6f8cdae3fb33ff10b470724c55f63ec4997c8e0a95beaf5732ac7b8da1b/userdata/oci-log --conmon-pidfile /run/ceph-95bc4eaa-1a14-59bf-acf2-4b3da055547d@mon.compute-0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg cfead6f8cdae3fb33ff10b470724c55f63ec4997c8e0a95beaf5732ac7b8da1b
             │ │ ├─ceph-95bc4eaa-1a14-59bf-acf2-4b3da055547d@osd.0.service
             │ │ │ ├─libpod-payload-13b62c875bf29e1fd647b09f26aeb88392fd8e7fd6f27f5d56d59c8433a0b222
             │ │ │ │ ├─88959 /run/podman-init -- /usr/bin/ceph-osd -n osd.0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ │ │ └─88961 /usr/bin/ceph-osd -n osd.0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ │ └─runtime
             │ │ │   └─88957 /usr/bin/conmon --api-version 1 -c 13b62c875bf29e1fd647b09f26aeb88392fd8e7fd6f27f5d56d59c8433a0b222 -u 13b62c875bf29e1fd647b09f26aeb88392fd8e7fd6f27f5d56d59c8433a0b222 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/13b62c875bf29e1fd647b09f26aeb88392fd8e7fd6f27f5d56d59c8433a0b222/userdata -p /run/containers/storage/overlay-containers/13b62c875bf29e1fd647b09f26aeb88392fd8e7fd6f27f5d56d59c8433a0b222/userdata/pidfile -n ceph-95bc4eaa-1a14-59bf-acf2-4b3da055547d-osd-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/13b62c875bf29e1fd647b09f26aeb88392fd8e7fd6f27f5d56d59c8433a0b222 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/13b62c875bf29e1fd647b09f26aeb88392fd8e7fd6f27f5d56d59c8433a0b222/userdata/oci-log --conmon-pidfile /run/ceph-95bc4eaa-1a14-59bf-acf2-4b3da055547d@osd.0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 13b62c875bf29e1fd647b09f26aeb88392fd8e7fd6f27f5d56d59c8433a0b222
             │ │ ├─ceph-95bc4eaa-1a14-59bf-acf2-4b3da055547d@osd.1.service
             │ │ │ ├─libpod-payload-2ab4bdfb1336673e273752f56f87fc01cb41214970130231b1c970afc70361db
             │ │ │ │ ├─89964 /run/podman-init -- /usr/bin/ceph-osd -n osd.1 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ │ │ └─89966 /usr/bin/ceph-osd -n osd.1 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ │ └─runtime
             │ │ │   └─89962 /usr/bin/conmon --api-version 1 -c 2ab4bdfb1336673e273752f56f87fc01cb41214970130231b1c970afc70361db -u 2ab4bdfb1336673e273752f56f87fc01cb41214970130231b1c970afc70361db -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/2ab4bdfb1336673e273752f56f87fc01cb41214970130231b1c970afc70361db/userdata -p /run/containers/storage/overlay-containers/2ab4bdfb1336673e273752f56f87fc01cb41214970130231b1c970afc70361db/userdata/pidfile -n ceph-95bc4eaa-1a14-59bf-acf2-4b3da055547d-osd-1 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/2ab4bdfb1336673e273752f56f87fc01cb41214970130231b1c970afc70361db --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/2ab4bdfb1336673e273752f56f87fc01cb41214970130231b1c970afc70361db/userdata/oci-log --conmon-pidfile /run/ceph-95bc4eaa-1a14-59bf-acf2-4b3da055547d@osd.1.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 2ab4bdfb1336673e273752f56f87fc01cb41214970130231b1c970afc70361db
             │ │ ├─ceph-95bc4eaa-1a14-59bf-acf2-4b3da055547d@osd.2.service
             │ │ │ ├─libpod-payload-227e7141028a66a33b9f9b116360c43ad8422375cf0151e5d0bdde0e35411ed5
             │ │ │ │ ├─91052 /run/podman-init -- /usr/bin/ceph-osd -n osd.2 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ │ │ └─91055 /usr/bin/ceph-osd -n osd.2 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ │ └─runtime
             │ │ │   └─91050 /usr/bin/conmon --api-version 1 -c 227e7141028a66a33b9f9b116360c43ad8422375cf0151e5d0bdde0e35411ed5 -u 227e7141028a66a33b9f9b116360c43ad8422375cf0151e5d0bdde0e35411ed5 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/227e7141028a66a33b9f9b116360c43ad8422375cf0151e5d0bdde0e35411ed5/userdata -p /run/containers/storage/overlay-containers/227e7141028a66a33b9f9b116360c43ad8422375cf0151e5d0bdde0e35411ed5/userdata/pidfile -n ceph-95bc4eaa-1a14-59bf-acf2-4b3da055547d-osd-2 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/227e7141028a66a33b9f9b116360c43ad8422375cf0151e5d0bdde0e35411ed5 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/227e7141028a66a33b9f9b116360c43ad8422375cf0151e5d0bdde0e35411ed5/userdata/oci-log --conmon-pidfile /run/ceph-95bc4eaa-1a14-59bf-acf2-4b3da055547d@osd.2.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 227e7141028a66a33b9f9b116360c43ad8422375cf0151e5d0bdde0e35411ed5
             │ │ └─ceph-95bc4eaa-1a14-59bf-acf2-4b3da055547d@rgw.rgw.compute-0.ssuoka.service
             │ │   ├─libpod-payload-ab2d5290b253be90bd5477bcee4a9bca9d7800a28bd9dd88c5ef94245f38483b
             │ │   │ ├─101149 /run/podman-init -- /usr/bin/radosgw -n client.rgw.rgw.compute-0.ssuoka -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │   │ └─101151 /usr/bin/radosgw -n client.rgw.rgw.compute-0.ssuoka -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │   └─runtime
             │ │     └─101147 /usr/bin/conmon --api-version 1 -c ab2d5290b253be90bd5477bcee4a9bca9d7800a28bd9dd88c5ef94245f38483b -u ab2d5290b253be90bd5477bcee4a9bca9d7800a28bd9dd88c5ef94245f38483b -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/ab2d5290b253be90bd5477bcee4a9bca9d7800a28bd9dd88c5ef94245f38483b/userdata -p /run/containers/storage/overlay-containers/ab2d5290b253be90bd5477bcee4a9bca9d7800a28bd9dd88c5ef94245f38483b/userdata/pidfile -n ceph-95bc4eaa-1a14-59bf-acf2-4b3da055547d-rgw-rgw-compute-0-ssuoka --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/ab2d5290b253be90bd5477bcee4a9bca9d7800a28bd9dd88c5ef94245f38483b --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/ab2d5290b253be90bd5477bcee4a9bca9d7800a28bd9dd88c5ef94245f38483b/userdata/oci-log --conmon-pidfile /run/ceph-95bc4eaa-1a14-59bf-acf2-4b3da055547d@rgw.rgw.compute-0.ssuoka.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg ab2d5290b253be90bd5477bcee4a9bca9d7800a28bd9dd88c5ef94245f38483b
             │ ├─system-getty.slice
             │ │ └─getty@tty1.service
             │ │   └─1010 /sbin/agetty -o "-p -- \\u" --noclear - linux
             │ ├─system-serial\x2dgetty.slice
             │ │ └─serial-getty@ttyS0.service
             │ │   └─1011 /sbin/agetty -o "-p -- \\u" --keep-baud 115200,57600,38400,9600 - vt220
             │ ├─systemd-hostnamed.service
             │ │ └─304498 /usr/lib/systemd/systemd-hostnamed
             │ ├─systemd-journald.service
             │ │ └─679 /usr/lib/systemd/systemd-journald
             │ ├─systemd-logind.service
             │ │ └─787 /usr/lib/systemd/systemd-logind
             │ ├─systemd-machined.service
             │ │ └─216222 /usr/lib/systemd/systemd-machined
             │ ├─systemd-udevd.service
             │ │ └─udev
             │ │   └─730 /usr/lib/systemd/systemd-udevd
             │ ├─tuned.service
             │ │ └─113067 /usr/bin/python3 -Es /usr/sbin/tuned -l -P
             │ ├─virtlogd.service
             │ │ └─215589 /usr/sbin/virtlogd
             │ ├─virtnodedevd.service
             │ │ └─255222 /usr/sbin/virtnodedevd --timeout 120
             │ ├─virtqemud.service
             │ │ └─254597 /usr/sbin/virtqemud --timeout 120
             │ └─virtsecretd.service
             │   └─262209 /usr/sbin/virtsecretd --timeout 120
             └─user.slice
               ├─user-1000.slice
               │ ├─session-1.scope
               │ │ └─4520 /usr/bin/python3
               │ ├─session-52.scope
               │ │ ├─301119 "sshd-session: zuul [priv]"
               │ │ ├─301122 "sshd-session: zuul@notty"
               │ │ ├─301123 sudo bash -c "rm -rf /var/tmp/sos-osp && mkdir /var/tmp/sos-osp && sos report --batch --all-logs --tmp-dir=/var/tmp/sos-osp  -p container,openstack_edpm,system,storage,virt"
               │ │ ├─301147 /usr/bin/python3 -s /sbin/sos report --batch --all-logs --tmp-dir=/var/tmp/sos-osp -p container,openstack_edpm,system,storage,virt
               │ │ ├─307235 timeout 15s turbostat --debug sleep 10
               │ │ ├─307887 timeout 300s systemctl status --all
               │ │ └─307888 systemctl status --all
               │ └─user@1000.service
               │   ├─app.slice
               │   │ └─dbus-broker.service
               │   │   ├─14561 /usr/bin/dbus-broker-launch --scope user
               │   │   └─14577 dbus-broker --log 4 --controller 9 --machine-id 1f988c78c563e12389ab342aced42dbb --max-bytes 100000000000000 --max-fds 25000000000000 --max-matches 5000000000
               │   ├─init.scope
               │   │ ├─4310 /usr/lib/systemd/systemd --user
               │   │ └─4312 "(sd-pam)"
               │   └─user.slice
               │     └─podman-pause-2aa5b999.scope
               │       └─14491 catatonit -P
               └─user-42477.slice
                 ├─session-21.scope
                 │ ├─76630 "sshd-session: ceph-admin [priv]"
                 │ └─76652 "sshd-session: ceph-admin"
                 ├─session-23.scope
                 │ ├─76647 "sshd-session: ceph-admin [priv]"
                 │ └─76653 "sshd-session: ceph-admin@notty"
                 ├─session-24.scope
                 │ ├─76704 "sshd-session: ceph-admin [priv]"
                 │ └─76707 "sshd-session: ceph-admin@notty"
                 ├─session-25.scope
                 │ ├─76758 "sshd-session: ceph-admin [priv]"
                 │ └─76761 "sshd-session: ceph-admin@notty"
                 ├─session-26.scope
                 │ ├─76812 "sshd-session: ceph-admin [priv]"
                 │ └─76816 "sshd-session: ceph-admin@notty"
                 ├─session-27.scope
                 │ ├─76867 "sshd-session: ceph-admin [priv]"
                 │ └─76870 "sshd-session: ceph-admin@notty"
                 ├─session-28.scope
                 │ ├─76921 "sshd-session: ceph-admin [priv]"
                 │ └─76924 "sshd-session: ceph-admin@notty"
                 ├─session-29.scope
                 │ ├─76975 "sshd-session: ceph-admin [priv]"
                 │ └─76978 "sshd-session: ceph-admin@notty"
                 ├─session-30.scope
                 │ ├─77029 "sshd-session: ceph-admin [priv]"
                 │ └─77032 "sshd-session: ceph-admin@notty"
                 ├─session-31.scope
                 │ ├─77083 "sshd-session: ceph-admin [priv]"
                 │ └─77086 "sshd-session: ceph-admin@notty"
                 ├─session-32.scope
                 │ ├─77110 "sshd-session: ceph-admin [priv]"
                 │ └─77113 "sshd-session: ceph-admin@notty"
                 ├─session-33.scope
                 │ ├─77164 "sshd-session: ceph-admin [priv]"
                 │ └─77167 "sshd-session: ceph-admin@notty"
                 └─user@42477.service
                   └─init.scope
                     ├─76634 /usr/lib/systemd/systemd --user
                     └─76636 "(sd-pam)"

Dec 02 11:38:16 compute-0 systemd[1]: libpod-conmon-cb488b55b1b61f85d1a62219d00cc34dc0b80dea28af3b9bbdc72d51097e7239.scope: Deactivated successfully.
Dec 02 11:38:16 compute-0 systemd[1]: Started libpod-conmon-c00ce674ac8e3776d6d6b71f1a07e4148b4ecc2421b1cc3c193f03c4617a4dc2.scope.
Dec 02 11:38:16 compute-0 systemd[1]: Started libcrun container.
Dec 02 11:38:18 compute-0 systemd[1]: libpod-c00ce674ac8e3776d6d6b71f1a07e4148b4ecc2421b1cc3c193f03c4617a4dc2.scope: Deactivated successfully.
Dec 02 11:38:18 compute-0 systemd[1]: libpod-c00ce674ac8e3776d6d6b71f1a07e4148b4ecc2421b1cc3c193f03c4617a4dc2.scope: Consumed 1.188s CPU time.
Dec 02 11:38:18 compute-0 systemd[1]: var-lib-containers-storage-overlay-a1778cd07b89f3efdacb6bebb5be0e5156ee3680241594b00541e30da33de4c2-merged.mount: Deactivated successfully.
Dec 02 11:38:18 compute-0 systemd[1]: libpod-conmon-c00ce674ac8e3776d6d6b71f1a07e4148b4ecc2421b1cc3c193f03c4617a4dc2.scope: Deactivated successfully.
Dec 02 11:38:22 compute-0 systemd[1]: Started Session 52 of User zuul.
Dec 02 11:38:55 compute-0 systemd[1]: Starting Hostname Service...
Dec 02 11:38:55 compute-0 systemd[1]: Started Hostname Service.

● machine.slice - Virtual Machine and Container Slice
     Loaded: loaded (/usr/lib/systemd/system/machine.slice; static)
     Active: active since Tue 2025-12-02 10:48:31 UTC; 50min ago
      Until: Tue 2025-12-02 10:48:31 UTC; 50min ago
       Docs: man:systemd.special(7)
         IO: 383.7M read, 91.5M written
      Tasks: 54
     Memory: 998.9M (peak: 1.6G)
        CPU: 14min 50.609s
     CGroup: /machine.slice
             ├─libpod-130400eaf961ceaaa203e2cc0a5af0fe03396f0667ec510c1291b1ee03bff193.scope
             │ └─container
             │   ├─236124 dumb-init --single-child -- kolla_start
             │   └─236127 /usr/sbin/multipathd -d
             ├─libpod-301660b5961629ac564857138dcba46d0947a2a1c7d3debbb9f5976c1df04193.scope
             │ └─container
             │   ├─163736 dumb-init --single-child -- kolla_start
             │   ├─163757 "neutron-ovn-metadata-agent (/usr/bin/python3 /usr/bin/neutron-ovn-metadata-agent)"
             │   ├─163893 "neutron-ovn-metadata-agent (/usr/bin/python3 /usr/bin/neutron-ovn-metadata-agent)"
             │   ├─164036 /usr/bin/python3 /bin/privsep-helper --config-file /etc/neutron/neutron.conf --config-dir /etc/neutron.conf.d --privsep_context neutron.privileged.namespace_cmd --privsep_sock_path /tmp/tmp4q14yauh/privsep.sock
             │   ├─262398 /usr/bin/python3 /bin/privsep-helper --config-file /etc/neutron/neutron.conf --config-dir /etc/neutron.conf.d --privsep_context neutron.privileged.default --privsep_sock_path /tmp/tmpti_0kgk9/privsep.sock
             │   └─262581 /usr/bin/python3 /bin/privsep-helper --config-file /etc/neutron/neutron.conf --config-dir /etc/neutron.conf.d --privsep_context neutron.privileged.link_cmd --privsep_sock_path /tmp/tmpokt26739/privsep.sock
             ├─libpod-5c31229430d6adbfb6e358463fdefc47061e3db1d274ed40ca82e25890f29998.scope
             │ └─container
             │   ├─153855 dumb-init --single-child -- kolla_start
             │   └─153864 /usr/bin/ovn-controller --pidfile unix:/run/openvswitch/db.sock -p /etc/pki/tls/private/ovndb.key -c /etc/pki/tls/certs/ovndb.crt -C /etc/pki/tls/certs/ovndbca.crt
             └─libpod-5fbc74cebe070d8cc77fb5ba95cab60fe5a6a788996a0004958af316ccf471ad.scope
               └─container
                 ├─254902 dumb-init --single-child -- kolla_start
                 ├─254904 /usr/bin/python3 /usr/bin/nova-compute
                 ├─262068 /usr/bin/python3 /bin/privsep-helper --config-file /etc/nova/nova.conf --config-file /etc/nova/nova-compute.conf --config-dir /etc/nova/nova.conf.d --privsep_context vif_plug_ovs.privsep.vif_plug --privsep_sock_path /tmp/tmp1no9jew6/privsep.sock
                 ├─262759 /usr/bin/python3 /bin/privsep-helper --config-file /etc/nova/nova.conf --config-file /etc/nova/nova-compute.conf --config-dir /etc/nova/nova.conf.d --privsep_context os_brick.privileged.default --privsep_sock_path /tmp/tmpoqbx9wo3/privsep.sock
                 └─262878 /usr/bin/python3 /bin/privsep-helper --config-file /etc/nova/nova.conf --config-file /etc/nova/nova-compute.conf --config-dir /etc/nova/nova.conf.d --privsep_context nova.privsep.sys_admin_pctxt --privsep_sock_path /tmp/tmp8u10dppg/privsep.sock

Dec 02 11:38:18 compute-0 quizzical_solomon[301021]:         "type": "bluestore"
Dec 02 11:38:18 compute-0 quizzical_solomon[301021]:     },
Dec 02 11:38:18 compute-0 quizzical_solomon[301021]:     "cb22d311-a01e-4327-afb4-565a5b394930": {
Dec 02 11:38:18 compute-0 quizzical_solomon[301021]:         "ceph_fsid": "95bc4eaa-1a14-59bf-acf2-4b3da055547d",
Dec 02 11:38:18 compute-0 quizzical_solomon[301021]:         "device": "/dev/mapper/ceph_vg1-ceph_lv1",
Dec 02 11:38:18 compute-0 quizzical_solomon[301021]:         "osd_id": 1,
Dec 02 11:38:18 compute-0 quizzical_solomon[301021]:         "osd_uuid": "cb22d311-a01e-4327-afb4-565a5b394930",
Dec 02 11:38:18 compute-0 quizzical_solomon[301021]:         "type": "bluestore"
Dec 02 11:38:18 compute-0 quizzical_solomon[301021]:     }
Dec 02 11:38:18 compute-0 quizzical_solomon[301021]: }

● system-ceph\x2d95bc4eaa\x2d1a14\x2d59bf\x2dacf2\x2d4b3da055547d.slice - Slice /system/ceph-95bc4eaa-1a14-59bf-acf2-4b3da055547d
     Loaded: loaded
     Active: active since Tue 2025-12-02 10:48:35 UTC; 50min ago
      Until: Tue 2025-12-02 10:48:35 UTC; 50min ago
         IO: 1.6G read, 25.5G written
      Tasks: 992
     Memory: 3.5G (peak: 4.5G)
        CPU: 5min 39.237s
     CGroup: /system.slice/system-ceph\x2d95bc4eaa\x2d1a14\x2d59bf\x2dacf2\x2d4b3da055547d.slice
             ├─ceph-95bc4eaa-1a14-59bf-acf2-4b3da055547d@crash.compute-0.service
             │ ├─libpod-payload-3db2848945635a23ef05a5bb5103b9327936169af4da62c76a6e841d53c77f4b
             │ │ ├─83162 /run/podman-init -- /usr/bin/ceph-crash -n client.crash.compute-0
             │ │ └─83164 /usr/bin/python3 -s /usr/bin/ceph-crash -n client.crash.compute-0
             │ └─runtime
             │   └─83160 /usr/bin/conmon --api-version 1 -c 3db2848945635a23ef05a5bb5103b9327936169af4da62c76a6e841d53c77f4b -u 3db2848945635a23ef05a5bb5103b9327936169af4da62c76a6e841d53c77f4b -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/3db2848945635a23ef05a5bb5103b9327936169af4da62c76a6e841d53c77f4b/userdata -p /run/containers/storage/overlay-containers/3db2848945635a23ef05a5bb5103b9327936169af4da62c76a6e841d53c77f4b/userdata/pidfile -n ceph-95bc4eaa-1a14-59bf-acf2-4b3da055547d-crash-compute-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/3db2848945635a23ef05a5bb5103b9327936169af4da62c76a6e841d53c77f4b --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/3db2848945635a23ef05a5bb5103b9327936169af4da62c76a6e841d53c77f4b/userdata/oci-log --conmon-pidfile /run/ceph-95bc4eaa-1a14-59bf-acf2-4b3da055547d@crash.compute-0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 3db2848945635a23ef05a5bb5103b9327936169af4da62c76a6e841d53c77f4b
             ├─ceph-95bc4eaa-1a14-59bf-acf2-4b3da055547d@mds.cephfs.compute-0.bydekr.service
             │ ├─libpod-payload-f64fbb716bbb10d4999b599073f025f197d0e4bf9a2dfe0b7e86e89fb078831c
             │ │ ├─101612 /run/podman-init -- /usr/bin/ceph-mds -n mds.cephfs.compute-0.bydekr -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ └─101614 /usr/bin/ceph-mds -n mds.cephfs.compute-0.bydekr -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ └─runtime
             │   └─101610 /usr/bin/conmon --api-version 1 -c f64fbb716bbb10d4999b599073f025f197d0e4bf9a2dfe0b7e86e89fb078831c -u f64fbb716bbb10d4999b599073f025f197d0e4bf9a2dfe0b7e86e89fb078831c -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/f64fbb716bbb10d4999b599073f025f197d0e4bf9a2dfe0b7e86e89fb078831c/userdata -p /run/containers/storage/overlay-containers/f64fbb716bbb10d4999b599073f025f197d0e4bf9a2dfe0b7e86e89fb078831c/userdata/pidfile -n ceph-95bc4eaa-1a14-59bf-acf2-4b3da055547d-mds-cephfs-compute-0-bydekr --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/f64fbb716bbb10d4999b599073f025f197d0e4bf9a2dfe0b7e86e89fb078831c --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/f64fbb716bbb10d4999b599073f025f197d0e4bf9a2dfe0b7e86e89fb078831c/userdata/oci-log --conmon-pidfile /run/ceph-95bc4eaa-1a14-59bf-acf2-4b3da055547d@mds.cephfs.compute-0.bydekr.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg f64fbb716bbb10d4999b599073f025f197d0e4bf9a2dfe0b7e86e89fb078831c
             ├─ceph-95bc4eaa-1a14-59bf-acf2-4b3da055547d@mgr.compute-0.ntxcvs.service
             │ ├─libpod-payload-e03605b236b56660f5174f63911714aeac7aabe1e5dbe09eb60fe0086b6a7093
             │ │ ├─75370 /run/podman-init -- /usr/bin/ceph-mgr -n mgr.compute-0.ntxcvs -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ └─75372 /usr/bin/ceph-mgr -n mgr.compute-0.ntxcvs -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ └─runtime
             │   └─75368 /usr/bin/conmon --api-version 1 -c e03605b236b56660f5174f63911714aeac7aabe1e5dbe09eb60fe0086b6a7093 -u e03605b236b56660f5174f63911714aeac7aabe1e5dbe09eb60fe0086b6a7093 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/e03605b236b56660f5174f63911714aeac7aabe1e5dbe09eb60fe0086b6a7093/userdata -p /run/containers/storage/overlay-containers/e03605b236b56660f5174f63911714aeac7aabe1e5dbe09eb60fe0086b6a7093/userdata/pidfile -n ceph-95bc4eaa-1a14-59bf-acf2-4b3da055547d-mgr-compute-0-ntxcvs --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/e03605b236b56660f5174f63911714aeac7aabe1e5dbe09eb60fe0086b6a7093 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/e03605b236b56660f5174f63911714aeac7aabe1e5dbe09eb60fe0086b6a7093/userdata/oci-log --conmon-pidfile /run/ceph-95bc4eaa-1a14-59bf-acf2-4b3da055547d@mgr.compute-0.ntxcvs.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg e03605b236b56660f5174f63911714aeac7aabe1e5dbe09eb60fe0086b6a7093
             ├─ceph-95bc4eaa-1a14-59bf-acf2-4b3da055547d@mon.compute-0.service
             │ ├─libpod-payload-cfead6f8cdae3fb33ff10b470724c55f63ec4997c8e0a95beaf5732ac7b8da1b
             │ │ ├─75079 /run/podman-init -- /usr/bin/ceph-mon -n mon.compute-0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false
             │ │ └─75081 /usr/bin/ceph-mon -n mon.compute-0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false
             │ └─runtime
             │   └─75077 /usr/bin/conmon --api-version 1 -c cfead6f8cdae3fb33ff10b470724c55f63ec4997c8e0a95beaf5732ac7b8da1b -u cfead6f8cdae3fb33ff10b470724c55f63ec4997c8e0a95beaf5732ac7b8da1b -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/cfead6f8cdae3fb33ff10b470724c55f63ec4997c8e0a95beaf5732ac7b8da1b/userdata -p /run/containers/storage/overlay-containers/cfead6f8cdae3fb33ff10b470724c55f63ec4997c8e0a95beaf5732ac7b8da1b/userdata/pidfile -n ceph-95bc4eaa-1a14-59bf-acf2-4b3da055547d-mon-compute-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/cfead6f8cdae3fb33ff10b470724c55f63ec4997c8e0a95beaf5732ac7b8da1b --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/cfead6f8cdae3fb33ff10b470724c55f63ec4997c8e0a95beaf5732ac7b8da1b/userdata/oci-log --conmon-pidfile /run/ceph-95bc4eaa-1a14-59bf-acf2-4b3da055547d@mon.compute-0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg cfead6f8cdae3fb33ff10b470724c55f63ec4997c8e0a95beaf5732ac7b8da1b
             ├─ceph-95bc4eaa-1a14-59bf-acf2-4b3da055547d@osd.0.service
             │ ├─libpod-payload-13b62c875bf29e1fd647b09f26aeb88392fd8e7fd6f27f5d56d59c8433a0b222
             │ │ ├─88959 /run/podman-init -- /usr/bin/ceph-osd -n osd.0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ └─88961 /usr/bin/ceph-osd -n osd.0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ └─runtime
             │   └─88957 /usr/bin/conmon --api-version 1 -c 13b62c875bf29e1fd647b09f26aeb88392fd8e7fd6f27f5d56d59c8433a0b222 -u 13b62c875bf29e1fd647b09f26aeb88392fd8e7fd6f27f5d56d59c8433a0b222 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/13b62c875bf29e1fd647b09f26aeb88392fd8e7fd6f27f5d56d59c8433a0b222/userdata -p /run/containers/storage/overlay-containers/13b62c875bf29e1fd647b09f26aeb88392fd8e7fd6f27f5d56d59c8433a0b222/userdata/pidfile -n ceph-95bc4eaa-1a14-59bf-acf2-4b3da055547d-osd-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/13b62c875bf29e1fd647b09f26aeb88392fd8e7fd6f27f5d56d59c8433a0b222 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/13b62c875bf29e1fd647b09f26aeb88392fd8e7fd6f27f5d56d59c8433a0b222/userdata/oci-log --conmon-pidfile /run/ceph-95bc4eaa-1a14-59bf-acf2-4b3da055547d@osd.0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 13b62c875bf29e1fd647b09f26aeb88392fd8e7fd6f27f5d56d59c8433a0b222
             ├─ceph-95bc4eaa-1a14-59bf-acf2-4b3da055547d@osd.1.service
             │ ├─libpod-payload-2ab4bdfb1336673e273752f56f87fc01cb41214970130231b1c970afc70361db
             │ │ ├─89964 /run/podman-init -- /usr/bin/ceph-osd -n osd.1 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ └─89966 /usr/bin/ceph-osd -n osd.1 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ └─runtime
             │   └─89962 /usr/bin/conmon --api-version 1 -c 2ab4bdfb1336673e273752f56f87fc01cb41214970130231b1c970afc70361db -u 2ab4bdfb1336673e273752f56f87fc01cb41214970130231b1c970afc70361db -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/2ab4bdfb1336673e273752f56f87fc01cb41214970130231b1c970afc70361db/userdata -p /run/containers/storage/overlay-containers/2ab4bdfb1336673e273752f56f87fc01cb41214970130231b1c970afc70361db/userdata/pidfile -n ceph-95bc4eaa-1a14-59bf-acf2-4b3da055547d-osd-1 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/2ab4bdfb1336673e273752f56f87fc01cb41214970130231b1c970afc70361db --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/2ab4bdfb1336673e273752f56f87fc01cb41214970130231b1c970afc70361db/userdata/oci-log --conmon-pidfile /run/ceph-95bc4eaa-1a14-59bf-acf2-4b3da055547d@osd.1.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 2ab4bdfb1336673e273752f56f87fc01cb41214970130231b1c970afc70361db
             ├─ceph-95bc4eaa-1a14-59bf-acf2-4b3da055547d@osd.2.service
             │ ├─libpod-payload-227e7141028a66a33b9f9b116360c43ad8422375cf0151e5d0bdde0e35411ed5
             │ │ ├─91052 /run/podman-init -- /usr/bin/ceph-osd -n osd.2 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ └─91055 /usr/bin/ceph-osd -n osd.2 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ └─runtime
             │   └─91050 /usr/bin/conmon --api-version 1 -c 227e7141028a66a33b9f9b116360c43ad8422375cf0151e5d0bdde0e35411ed5 -u 227e7141028a66a33b9f9b116360c43ad8422375cf0151e5d0bdde0e35411ed5 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/227e7141028a66a33b9f9b116360c43ad8422375cf0151e5d0bdde0e35411ed5/userdata -p /run/containers/storage/overlay-containers/227e7141028a66a33b9f9b116360c43ad8422375cf0151e5d0bdde0e35411ed5/userdata/pidfile -n ceph-95bc4eaa-1a14-59bf-acf2-4b3da055547d-osd-2 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/227e7141028a66a33b9f9b116360c43ad8422375cf0151e5d0bdde0e35411ed5 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/227e7141028a66a33b9f9b116360c43ad8422375cf0151e5d0bdde0e35411ed5/userdata/oci-log --conmon-pidfile /run/ceph-95bc4eaa-1a14-59bf-acf2-4b3da055547d@osd.2.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 227e7141028a66a33b9f9b116360c43ad8422375cf0151e5d0bdde0e35411ed5
             └─ceph-95bc4eaa-1a14-59bf-acf2-4b3da055547d@rgw.rgw.compute-0.ssuoka.service
               ├─libpod-payload-ab2d5290b253be90bd5477bcee4a9bca9d7800a28bd9dd88c5ef94245f38483b
               │ ├─101149 /run/podman-init -- /usr/bin/radosgw -n client.rgw.rgw.compute-0.ssuoka -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
               │ └─101151 /usr/bin/radosgw -n client.rgw.rgw.compute-0.ssuoka -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
               └─runtime
                 └─101147 /usr/bin/conmon --api-version 1 -c ab2d5290b253be90bd5477bcee4a9bca9d7800a28bd9dd88c5ef94245f38483b -u ab2d5290b253be90bd5477bcee4a9bca9d7800a28bd9dd88c5ef94245f38483b -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/ab2d5290b253be90bd5477bcee4a9bca9d7800a28bd9dd88c5ef94245f38483b/userdata -p /run/containers/storage/overlay-containers/ab2d5290b253be90bd5477bcee4a9bca9d7800a28bd9dd88c5ef94245f38483b/userdata/pidfile -n ceph-95bc4eaa-1a14-59bf-acf2-4b3da055547d-rgw-rgw-compute-0-ssuoka --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/ab2d5290b253be90bd5477bcee4a9bca9d7800a28bd9dd88c5ef94245f38483b --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/ab2d5290b253be90bd5477bcee4a9bca9d7800a28bd9dd88c5ef94245f38483b/userdata/oci-log --conmon-pidfile /run/ceph-95bc4eaa-1a14-59bf-acf2-4b3da055547d@rgw.rgw.compute-0.ssuoka.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg ab2d5290b253be90bd5477bcee4a9bca9d7800a28bd9dd88c5ef94245f38483b

Dec 02 11:39:11 compute-0 ceph-mon[75081]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "status", "format": "json-pretty"} v 0) v1
Dec 02 11:39:11 compute-0 ceph-mon[75081]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/424382596' entity='client.admin' cmd=[{"prefix": "status", "format": "json-pretty"}]: dispatch
Dec 02 11:39:11 compute-0 ceph-mon[75081]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "time-sync-status", "format": "json-pretty"} v 0) v1
Dec 02 11:39:11 compute-0 ceph-mon[75081]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/858553117' entity='client.admin' cmd=[{"prefix": "time-sync-status", "format": "json-pretty"}]: dispatch
Dec 02 11:39:12 compute-0 ceph-mon[75081]: from='client.19421 -' entity='client.admin' cmd=[{"prefix": "pg stat", "target": ["mon-mgr", ""], "format": "json-pretty"}]: dispatch
Dec 02 11:39:12 compute-0 ceph-mon[75081]: from='client.? 192.168.122.100:0/424382596' entity='client.admin' cmd=[{"prefix": "status", "format": "json-pretty"}]: dispatch
Dec 02 11:39:12 compute-0 ceph-mon[75081]: from='client.? 192.168.122.100:0/858553117' entity='client.admin' cmd=[{"prefix": "time-sync-status", "format": "json-pretty"}]: dispatch
Dec 02 11:39:12 compute-0 ceph-mgr[75372]: log_channel(cluster) log [DBG] : pgmap v1991: 321 pgs: 321 active+clean; 271 MiB data, 693 MiB used, 59 GiB / 60 GiB avail
Dec 02 11:39:13 compute-0 ceph-mon[75081]: pgmap v1991: 321 pgs: 321 active+clean; 271 MiB data, 693 MiB used, 59 GiB / 60 GiB avail
Dec 02 11:39:14 compute-0 ceph-mon[75081]: mon.compute-0@0(leader).osd e508 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 343932928 full_alloc: 348127232 kv_alloc: 318767104

● system-dbus\x2d:1.1\x2dorg.fedoraproject.SetroubleshootPrivileged.slice - Slice /system/dbus-:1.1-org.fedoraproject.SetroubleshootPrivileged
     Loaded: loaded
     Active: active since Tue 2025-12-02 11:05:23 UTC; 33min ago
      Until: Tue 2025-12-02 11:05:23 UTC; 33min ago
         IO: 4.0K read, 0B written
      Tasks: 0
     Memory: 4.0K (peak: 58.0M)
        CPU: 921ms
     CGroup: /system.slice/system-dbus\x2d:1.1\x2dorg.fedoraproject.SetroubleshootPrivileged.slice

Dec 02 11:05:23 compute-0 systemd[1]: Created slice Slice /system/dbus-:1.1-org.fedoraproject.SetroubleshootPrivileged.

● system-getty.slice - Slice /system/getty
     Loaded: loaded
     Active: active since Tue 2025-12-02 10:03:31 UTC; 1h 35min ago
      Until: Tue 2025-12-02 10:03:31 UTC; 1h 35min ago
         IO: 0B read, 0B written
      Tasks: 1
     Memory: 240.0K (peak: 460.0K)
        CPU: 9ms
     CGroup: /system.slice/system-getty.slice
             └─getty@tty1.service
               └─1010 /sbin/agetty -o "-p -- \\u" --noclear - linux

● system-modprobe.slice - Slice /system/modprobe
     Loaded: loaded
     Active: active since Tue 2025-12-02 10:03:29 UTC; 1h 35min ago
      Until: Tue 2025-12-02 10:03:29 UTC; 1h 35min ago
         IO: 0B read, 0B written
      Tasks: 0
     Memory: 52.0K (peak: 11.5M)
        CPU: 161ms
     CGroup: /system.slice/system-modprobe.slice

Dec 02 10:03:29 localhost systemd[1]: Created slice Slice /system/modprobe.

● system-serial\x2dgetty.slice - Slice /system/serial-getty
     Loaded: loaded
     Active: active since Tue 2025-12-02 10:03:31 UTC; 1h 35min ago
      Until: Tue 2025-12-02 10:03:31 UTC; 1h 35min ago
         IO: 0B read, 0B written
      Tasks: 1
     Memory: 312.0K (peak: 556.0K)
        CPU: 8ms
     CGroup: /system.slice/system-serial\x2dgetty.slice
             └─serial-getty@ttyS0.service
               └─1011 /sbin/agetty -o "-p -- \\u" --keep-baud 115200,57600,38400,9600 - vt220

● system-sshd\x2dkeygen.slice - Slice /system/sshd-keygen
     Loaded: loaded
     Active: active since Tue 2025-12-02 10:03:31 UTC; 1h 35min ago
      Until: Tue 2025-12-02 10:03:31 UTC; 1h 35min ago
         IO: 0B read, 0B written
      Tasks: 0
     Memory: 0B (peak: 0B)
        CPU: 0
     CGroup: /system.slice/system-sshd\x2dkeygen.slice

● system-systemd\x2dcoredump.slice - Slice /system/systemd-coredump
     Loaded: loaded
     Active: active since Tue 2025-12-02 11:16:40 UTC; 22min ago
      Until: Tue 2025-12-02 11:16:40 UTC; 22min ago
         IO: 4.2M read, 1.0M written
      Tasks: 0
     Memory: 4.2M (peak: 292.7M)
        CPU: 1.026s
     CGroup: /system.slice/system-systemd\x2dcoredump.slice

Dec 02 11:16:40 compute-0 systemd[1]: Created slice Slice /system/systemd-coredump.
Dec 02 11:16:41 compute-0 systemd-coredump[262900]: Process 262880 (qemu-img) of user 0 dumped core.
                                                    
                                                    Stack trace of thread 262890:
                                                    #0  0x00007f778ae1803c __pthread_kill_implementation (libc.so.6 + 0x8d03c)
                                                    #1  0x00007f778adcab86 raise (libc.so.6 + 0x3fb86)
                                                    #2  0x00007f778adb4873 abort (libc.so.6 + 0x29873)
                                                    #3  0x0000557cd48e85df ___interceptor_pthread_create (qemu-img + 0x4f5df)
                                                    #4  0x00007f7783928ff4 _ZN6Thread10try_createEm (libceph-common.so.2 + 0x258ff4)
                                                    #5  0x00007f778392b6ae _ZN6Thread6createEPKcm (libceph-common.so.2 + 0x25b6ae)
                                                    #6  0x00007f7788ef826b _ZNSt8_Rb_treeISt4pairINSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEESt10type_indexES0_IKS8_N4ceph12immobile_anyILm576EEEESt10_Select1stISD_ENSA_6common11CephContext19associated_objs_cmpESaISD_EE22_M_emplace_hint_uniqueIJRKSt21piecewise_construct_tSt5tupleIJRSt17basic_string_viewIcS4_ERS7_EESP_IJRKSt15in_place_type_tIN6librbd21TaskFinisherSingletonEERPSH_EEEEESt17_Rb_tree_iteratorISD_ESt23_Rb_tree_const_iteratorISD_EDpOT_.constprop.0 (librbd.so.1 + 0x51126b)
                                                    #7  0x00007f7788b257a6 _ZN6librbd8ImageCtx4initEv (librbd.so.1 + 0x13e7a6)
                                                    #8  0x00007f7788bff2d3 _ZN6librbd5image11OpenRequestINS_8ImageCtxEE12send_refreshEv (librbd.so.1 + 0x2182d3)
                                                    #9  0x00007f7788bfff46 _ZN6librbd5image11OpenRequestINS_8ImageCtxEE23handle_v2_get_data_poolEPi (librbd.so.1 + 0x218f46)
                                                    #10 0x00007f7788c002a7 _ZN6librbd4util6detail20rados_state_callbackINS_5image11OpenRequestINS_8ImageCtxEEEXadL_ZNS6_23handle_v2_get_data_poolEPiEELb1EEEvPvS8_ (librbd.so.1 + 0x2192a7)
                                                    #11 0x00007f77888fe0ac _ZN5boost4asio6detail18completion_handlerINS1_7binder0IN8librados14CB_AioCompleteEEENS0_10io_context19basic_executor_typeISaIvELm0EEEE11do_completeEPvPNS1_19scheduler_operationERKNS_6system10error_codeEm (librados.so.2 + 0xad0ac)
                                                    #12 0x00007f77888fd585 _ZN5boost4asio6detail14strand_service11do_completeEPvPNS1_19scheduler_operationERKNS_6system10error_codeEm (librados.so.2 + 0xac585)
                                                    #13 0x00007f7788978498 _ZN5boost4asio6detail9scheduler3runERNS_6system10error_codeE.constprop.0.isra.0 (librados.so.2 + 0x127498)
                                                    #14 0x00007f77889174e4 _ZNSt6thread11_State_implINS_8_InvokerISt5tupleIJZ17make_named_threadIZN4ceph5async15io_context_pool5startEsEUlvE_JEES_St17basic_string_viewIcSt11char_traitsIcEEOT_DpOT0_EUlSD_SG_E_S7_EEEEE6_M_runEv (librados.so.2 + 0xc64e4)
                                                    #15 0x00007f7783582ae4 execute_native_thread_routine (libstdc++.so.6 + 0xdbae4)
                                                    #16 0x00007f778ae162fa start_thread (libc.so.6 + 0x8b2fa)
                                                    #17 0x00007f778ae9b400 __clone3 (libc.so.6 + 0x110400)
                                                    
                                                    Stack trace of thread 262880:
                                                    #0  0x00007f778ae1338a __futex_abstimed_wait_common (libc.so.6 + 0x8838a)
                                                    #1  0x00007f778ae158e2 pthread_cond_wait@@GLIBC_2.3.2 (libc.so.6 + 0x8a8e2)
                                                    #2  0x00007f778357c6c0 _ZNSt18condition_variable4waitERSt11unique_lockISt5mutexE (libstdc++.so.6 + 0xd56c0)
                                                    #3  0x00007f7788b2ceb3 _ZN6librbd10ImageStateINS_8ImageCtxEE4openEm (librbd.so.1 + 0x145eb3)
                                                    #4  0x00007f7788afcfcb rbd_open (librbd.so.1 + 0x115fcb)
                                                    #5  0x00007f77890a789d qemu_rbd_open (block-rbd.so + 0x489d)
                                                    #6  0x0000557cd48f925c bdrv_open_driver.llvm.1535778247189356743 (qemu-img + 0x6025c)
                                                    #7  0x0000557cd48fe4b7 bdrv_open_inherit.llvm.1535778247189356743 (qemu-img + 0x654b7)
                                                    #8  0x0000557cd490bde1 bdrv_open_child_bs.llvm.1535778247189356743 (qemu-img + 0x72de1)
                                                    #9  0x0000557cd48fdc36 bdrv_open_inherit.llvm.1535778247189356743 (qemu-img + 0x64c36)
                                                    #10 0x0000557cd492d4b3 blk_new_open (qemu-img + 0x944b3)
                                                    #11 0x0000557cd49ed516 img_open_file (qemu-img + 0x154516)
                                                    #12 0x0000557cd49ed0c0 img_open (qemu-img + 0x1540c0)
                                                    #13 0x0000557cd49e903b img_info (qemu-img + 0x15003b)
                                                    #14 0x0000557cd49e26ca main (qemu-img + 0x1496ca)
                                                    #15 0x00007f778adb5610 __libc_start_call_main (libc.so.6 + 0x2a610)
                                                    #16 0x00007f778adb56c0 __libc_start_main@@GLIBC_2.34 (libc.so.6 + 0x2a6c0)
                                                    #17 0x0000557cd48e8285 _start (qemu-img + 0x4f285)
                                                    
                                                    Stack trace of thread 262882:
                                                    #0  0x00007f778ae1338a __futex_abstimed_wait_common (libc.so.6 + 0x8838a)
                                                    #1  0x00007f778ae158e2 pthread_cond_wait@@GLIBC_2.3.2 (libc.so.6 + 0x8a8e2)
                                                    #2  0x00007f778357c6c0 _ZNSt18condition_variable4waitERSt11unique_lockISt5mutexE (libstdc++.so.6 + 0xd56c0)
                                                    #3  0x00007f7783b3b0a2 _ZN4ceph7logging3Log5entryEv (libceph-common.so.2 + 0x46b0a2)
                                                    #4  0x00007f778ae162fa start_thread (libc.so.6 + 0x8b2fa)
                                                    #5  0x00007f778ae9b400 __clone3 (libc.so.6 + 0x110400)
                                                    
                                                    Stack trace of thread 262898:
                                                    #0  0x00007f778ae242a2 sysmalloc (libc.so.6 + 0x992a2)
                                                    #1  0x00007f778ae24e67 _int_malloc (libc.so.6 + 0x99e67)
                                                    #2  0x00007f778ae25821 tcache_init.part.0 (libc.so.6 + 0x9a821)
                                                    #3  0x00007f778ae25f7e __libc_malloc (libc.so.6 + 0x9af7e)
                                                    #4  0x00007f778b521e7e malloc (ld-linux-x86-64.so.2 + 0x12e7e)
                                                    #5  0x00007f778b525eec __tls_get_addr (ld-linux-x86-64.so.2 + 0x16eec)
                                                    #6  0x00007f778396c5a4 ceph_pthread_setname (libceph-common.so.2 + 0x29c5a4)
                                                    #7  0x00007f7783928f38 _ZN6Thread13entry_wrapperEv (libceph-common.so.2 + 0x258f38)
                                                    #8  0x00007f778ae162fa start_thread (libc.so.6 + 0x8b2fa)
                                                    #9  0x00007f778ae9b400 __clone3 (libc.so.6 + 0x110400)
                                                    
                                                    Stack trace of thread 262881:
                                                    #0  0x00007f778ae9382d syscall (libc.so.6 + 0x10882d)
                                                    #1  0x0000557cd4a73193 qemu_event_wait (qemu-img + 0x1da193)
                                                    #2  0x0000557cd4a7e2e7 call_rcu_thread (qemu-img + 0x1e52e7)
                                                    #3  0x0000557cd4a712aa qemu_thread_start.llvm.12875871551448449403 (qemu-img + 0x1d82aa)
                                                    #4  0x00007f778ae162fa start_thread (libc.so.6 + 0x8b2fa)
                                                    #5  0x00007f778ae9b400 __clone3 (libc.so.6 + 0x110400)
                                                    
                                                    Stack trace of thread 262897:
                                                    #0  0x00007f778ae1338a __futex_abstimed_wait_common (libc.so.6 + 0x8838a)
                                                    #1  0x00007f778ae158e2 pthread_cond_wait@@GLIBC_2.3.2 (libc.so.6 + 0x8a8e2)
                                                    #2  0x00007f778357c6c0 _ZNSt18condition_variable4waitERSt11unique_lockISt5mutexE (libstdc++.so.6 + 0xd56c0)
                                                    #3  0x00007f778392e7f8 _ZN15CommonSafeTimerISt5mutexE12timer_threadEv (libceph-common.so.2 + 0x25e7f8)
                                                    #4  0x00007f778392ef81 _ZN21CommonSafeTimerThreadISt5mutexE5entryEv (libceph-common.so.2 + 0x25ef81)
                                                    #5  0x00007f778ae162fa start_thread (libc.so.6 + 0x8b2fa)
                                                    #6  0x00007f778ae9b400 __clone3 (libc.so.6 + 0x110400)
                                                    
                                                    Stack trace of thread 262883:
                                                    #0  0x00007f778ae9aa3e epoll_wait (libc.so.6 + 0x10fa3e)
                                                    #1  0x00007f7783b10618 _ZN11EpollDriver10event_waitERSt6vectorI14FiredFileEventSaIS1_EEP7timeval (libceph-common.so.2 + 0x440618)
                                                    #2  0x00007f7783b0e702 _ZN11EventCenter14process_eventsEjPNSt6chrono8durationImSt5ratioILl1ELl1000000000EEEE (libceph-common.so.2 + 0x43e702)
                                                    #3  0x00007f7783b0f2c6 _ZNSt17_Function_handlerIFvvEZN12NetworkStack10add_threadEP6WorkerEUlvE_E9_M_invokeERKSt9_Any_data (libceph-common.so.2 + 0x43f2c6)
                                                    #4  0x00007f7783582ae4 execute_native_thread_routine (libstdc++.so.6 + 0xdbae4)
                                                    #5  0x00007f778ae162fa start_thread (libc.so.6 + 0x8b2fa)
                                                    #6  0x00007f778ae9b400 __clone3 (libc.so.6 + 0x110400)
                                                    
                                                    Stack trace of thread 262892:
                                                    #0  0x00007f778ae1338a __futex_abstimed_wait_common (libc.so.6 + 0x8838a)
                                                    #1  0x00007f778ae15cc0 pthread_cond_clockwait@GLIBC_2.30 (libc.so.6 + 0x8acc0)
                                                    #2  0x00007f7788950364 _ZN4ceph5timerINS_17coarse_mono_clockEE12timer_threadEv (librados.so.2 + 0xff364)
                                                    #3  0x00007f7783582ae4 execute_native_thread_routine (libstdc++.so.6 + 0xdbae4)
                                                    #4  0x00007f778ae162fa start_thread (libc.so.6 + 0x8b2fa)
                                                    #5  0x00007f778ae9b400 __clone3 (libc.so.6 + 0x110400)
                                                    
                                                    Stack trace of thread 262889:
                                                    #0  0x00007f778ae1338a __futex_abstimed_wait_common (libc.so.6 + 0x8838a)
                                                    #1  0x00007f778ae15cc0 pthread_cond_clockwait@GLIBC_2.30 (libc.so.6 + 0x8acc0)
                                                    #2  0x00007f7783949150 _ZN4ceph6common24CephContextServiceThread5entryEv (libceph-common.so.2 + 0x279150)
                                                    #3  0x00007f778ae162fa start_thread (libc.so.6 + 0x8b2fa)
                                                    #4  0x00007f778ae9b400 __clone3 (libc.so.6 + 0x110400)
                                                    
                                                    Stack trace of thread 262891:
                                                    #0  0x00007f778ae1338a __futex_abstimed_wait_common (libc.so.6 + 0x8838a)
                                                    #1  0x00007f778ae158e2 pthread_cond_wait@@GLIBC_2.3.2 (libc.so.6 + 0x8a8e2)
                                                    #2  0x00007f7788978266 _ZN5boost4asio6detail9scheduler3runERNS_6system10error_codeE.constprop.0.isra.0 (librados.so.2 + 0x127266)
                                                    #3  0x00007f77889174e4 _ZNSt6thread11_State_implINS_8_InvokerISt5tupleIJZ17make_named_threadIZN4ceph5async15io_context_pool5startEsEUlvE_JEES_St17basic_string_viewIcSt11char_traitsIcEEOT_DpOT0_EUlSD_SG_E_S7_EEEEE6_M_runEv (librados.so.2 + 0xc64e4)
                                                    #4  0x00007f7783582ae4 execute_native_thread_routine (libstdc++.so.6 + 0xdbae4)
                                                    #5  0x00007f778ae162fa start_thread (libc.so.6 + 0x8b2fa)
                                                    #6  0x00007f778ae9b400 __clone3 (libc.so.6 + 0x110400)
                                                    
                                                    Stack trace of thread 262896:
                                                    #0  0x00007f778ae1338a __futex_abstimed_wait_common (libc.so.6 + 0x8838a)
                                                    #1  0x00007f778ae158e2 pthread_cond_wait@@GLIBC_2.3.2 (libc.so.6 + 0x8a8e2)
                                                    #2  0x00007f778357c6c0 _ZNSt18condition_variable4waitERSt11unique_lockISt5mutexE (libstdc++.so.6 + 0xd56c0)
                                                    #3  0x00007f778392e7f8 _ZN15CommonSafeTimerISt5mutexE12timer_threadEv (libceph-common.so.2 + 0x25e7f8)
                                                    #4  0x00007f778392ef81 _ZN21CommonSafeTimerThreadISt5mutexE5entryEv (libceph-common.so.2 + 0x25ef81)
                                                    #5  0x00007f778ae162fa start_thread (libc.so.6 + 0x8b2fa)
                                                    #6  0x00007f778ae9b400 __clone3 (libc.so.6 + 0x110400)
                                                    
                                                    Stack trace of thread 262893:
                                                    #0  0x00007f778ae1338a __futex_abstimed_wait_common (libc.so.6 + 0x8838a)
                                                    #1  0x00007f778ae158e2 pthread_cond_wait@@GLIBC_2.3.2 (libc.so.6 + 0x8a8e2)
                                                    #2  0x00007f778357c6c0 _ZNSt18condition_variable4waitERSt11unique_lockISt5mutexE (libstdc++.so.6 + 0xd56c0)
                                                    #3  0x00007f7783a3749f _ZN13DispatchQueue5entryEv (libceph-common.so.2 + 0x36749f)
                                                    #4  0x00007f7783ac8411 _ZN13DispatchQueue14DispatchThread5entryEv (libceph-common.so.2 + 0x3f8411)
                                                    #5  0x00007f778ae162fa start_thread (libc.so.6 + 0x8b2fa)
                                                    #6  0x00007f778ae9b400 __clone3 (libc.so.6 + 0x110400)
                                                    
                                                    Stack trace of thread 262885:
                                                    #0  0x00007f778ae9aa3e epoll_wait (libc.so.6 + 0x10fa3e)
                                                    #1  0x00007f7783b10618 _ZN11EpollDriver10event_waitERSt6vectorI14FiredFileEventSaIS1_EEP7timeval (libceph-common.so.2 + 0x440618)
                                                    #2  0x00007f7783b0e702 _ZN11EventCenter14process_eventsEjPNSt6chrono8durationImSt5ratioILl1ELl1000000000EEEE (libceph-common.so.2 + 0x43e702)
                                                    #3  0x00007f7783b0f2c6 _ZNSt17_Function_handlerIFvvEZN12NetworkStack10add_threadEP6WorkerEUlvE_E9_M_invokeERKSt9_Any_data (libceph-common.so.2 + 0x43f2c6)
                                                    #4  0x00007f7783582ae4 execute_native_thread_routine (libstdc++.so.6 + 0xdbae4)
                                                    #5  0x00007f778ae162fa start_thread (libc.so.6 + 0x8b2fa)
                                                    #6  0x00007f778ae9b400 __clone3 (libc.so.6 + 0x110400)
                                                    
                                                    Stack trace of thread 262894:
                                                    #0  0x00007f778ae1338a __futex_abstimed_wait_common (libc.so.6 + 0x8838a)
                                                    #1  0x00007f778ae158e2 pthread_cond_wait@@GLIBC_2.3.2 (libc.so.6 + 0x8a8e2)
                                                    #2  0x00007f778357c6c0 _ZNSt18condition_variable4waitERSt11unique_lockISt5mutexE (libstdc++.so.6 + 0xd56c0)
                                                    #3  0x00007f7783a370b9 _ZN13DispatchQueue18run_local_deliveryEv (libceph-common.so.2 + 0x3670b9)
                                                    #4  0x00007f7783ac8431 _ZN13DispatchQueue19LocalDeliveryThread5entryEv (libceph-common.so.2 + 0x3f8431)
                                                    #5  0x00007f778ae162fa start_thread (libc.so.6 + 0x8b2fa)
                                                    #6  0x00007f778ae9b400 __clone3 (libc.so.6 + 0x110400)
                                                    
                                                    Stack trace of thread 262895:
                                                    #0  0x00007f778ae1338a __futex_abstimed_wait_common (libc.so.6 + 0x8838a)
                                                    #1  0x00007f778ae15cc0 pthread_cond_clockwait@GLIBC_2.30 (libc.so.6 + 0x8acc0)
                                                    #2  0x00007f778392eb23 _ZN15CommonSafeTimerISt5mutexE12timer_threadEv (libceph-common.so.2 + 0x25eb23)
                                                    #3  0x00007f778392ef81 _ZN21CommonSafeTimerThreadISt5mutexE5entryEv (libceph-common.so.2 + 0x25ef81)
                                                    #4  0x00007f778ae162fa start_thread (libc.so.6 + 0x8b2fa)
                                                    #5  0x00007f778ae9b400 __clone3 (libc.so.6 + 0x110400)
                                                    
                                                    Stack trace of thread 262884:
                                                    #0  0x00007f778ae9aa3e epoll_wait (libc.so.6 + 0x10fa3e)
                                                    #1  0x00007f7783b10618 _ZN11EpollDriver10event_waitERSt6vectorI14FiredFileEventSaIS1_EEP7timeval (libceph-common.so.2 + 0x440618)
                                                    #2  0x00007f7783b0e702 _ZN11EventCenter14process_eventsEjPNSt6chrono8durationImSt5ratioILl1ELl1000000000EEEE (libceph-common.so.2 + 0x43e702)
                                                    #3  0x00007f7783b0f2c6 _ZNSt17_Function_handlerIFvvEZN12NetworkStack10add_threadEP6WorkerEUlvE_E9_M_invokeERKSt9_Any_data (libceph-common.so.2 + 0x43f2c6)
                                                    #4  0x00007f7783582ae4 execute_native_thread_routine (libstdc++.so.6 + 0xdbae4)
                                                    #5  0x00007f778ae162fa start_thread (libc.so.6 + 0x8b2fa)
                                                    #6  0x00007f778ae9b400 __clone3 (libc.so.6 + 0x110400)
                                                    ELF object binary architecture: AMD x86-64

● system.slice - System Slice
     Loaded: loaded
     Active: active since Tue 2025-12-02 10:03:29 UTC; 1h 35min ago
      Until: Tue 2025-12-02 10:03:29 UTC; 1h 35min ago
       Docs: man:systemd.special(7)
         IO: 1.8G read, 25.6G written
      Tasks: 1117
     Memory: 4.2G (peak: 5.2G)
        CPU: 11min 43.662s
     CGroup: /system.slice
             ├─NetworkManager.service
             │ └─48987 /usr/sbin/NetworkManager --no-daemon
             ├─auditd.service
             │ ├─702 /sbin/auditd
             │ └─704 /usr/sbin/sedispatch
             ├─chronyd.service
             │ └─58549 /usr/sbin/chronyd -F 2
             ├─crond.service
             │ ├─  1009 /usr/sbin/crond -n
             │ └─162071 /usr/sbin/anacron -s
             ├─dbus-broker.service
             │ ├─756 /usr/bin/dbus-broker-launch --scope system --audit
             │ └─773 dbus-broker --log 4 --controller 9 --machine-id 1f988c78c563e12389ab342aced42dbb --max-bytes 536870912 --max-fds 4096 --max-matches 131072 --audit
             ├─edpm_multipathd.service
             │ └─236122 /usr/bin/conmon --api-version 1 -c 130400eaf961ceaaa203e2cc0a5af0fe03396f0667ec510c1291b1ee03bff193 -u 130400eaf961ceaaa203e2cc0a5af0fe03396f0667ec510c1291b1ee03bff193 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/130400eaf961ceaaa203e2cc0a5af0fe03396f0667ec510c1291b1ee03bff193/userdata -p /run/containers/storage/overlay-containers/130400eaf961ceaaa203e2cc0a5af0fe03396f0667ec510c1291b1ee03bff193/userdata/pidfile -n multipathd --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/130400eaf961ceaaa203e2cc0a5af0fe03396f0667ec510c1291b1ee03bff193 --full-attach -s -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/130400eaf961ceaaa203e2cc0a5af0fe03396f0667ec510c1291b1ee03bff193/userdata/oci-log --conmon-pidfile /run/multipathd.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg 130400eaf961ceaaa203e2cc0a5af0fe03396f0667ec510c1291b1ee03bff193
             ├─edpm_nova_compute.service
             │ └─254900 /usr/bin/conmon --api-version 1 -c 5fbc74cebe070d8cc77fb5ba95cab60fe5a6a788996a0004958af316ccf471ad -u 5fbc74cebe070d8cc77fb5ba95cab60fe5a6a788996a0004958af316ccf471ad -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/5fbc74cebe070d8cc77fb5ba95cab60fe5a6a788996a0004958af316ccf471ad/userdata -p /run/containers/storage/overlay-containers/5fbc74cebe070d8cc77fb5ba95cab60fe5a6a788996a0004958af316ccf471ad/userdata/pidfile -n nova_compute --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/5fbc74cebe070d8cc77fb5ba95cab60fe5a6a788996a0004958af316ccf471ad --full-attach -s -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/5fbc74cebe070d8cc77fb5ba95cab60fe5a6a788996a0004958af316ccf471ad/userdata/oci-log --conmon-pidfile /run/nova_compute.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg 5fbc74cebe070d8cc77fb5ba95cab60fe5a6a788996a0004958af316ccf471ad
             ├─edpm_ovn_controller.service
             │ └─153849 /usr/bin/conmon --api-version 1 -c 5c31229430d6adbfb6e358463fdefc47061e3db1d274ed40ca82e25890f29998 -u 5c31229430d6adbfb6e358463fdefc47061e3db1d274ed40ca82e25890f29998 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/5c31229430d6adbfb6e358463fdefc47061e3db1d274ed40ca82e25890f29998/userdata -p /run/containers/storage/overlay-containers/5c31229430d6adbfb6e358463fdefc47061e3db1d274ed40ca82e25890f29998/userdata/pidfile -n ovn_controller --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/5c31229430d6adbfb6e358463fdefc47061e3db1d274ed40ca82e25890f29998 --full-attach -s -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/5c31229430d6adbfb6e358463fdefc47061e3db1d274ed40ca82e25890f29998/userdata/oci-log --conmon-pidfile /run/ovn_controller.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg 5c31229430d6adbfb6e358463fdefc47061e3db1d274ed40ca82e25890f29998
             ├─edpm_ovn_metadata_agent.service
             │ └─163733 /usr/bin/conmon --api-version 1 -c 301660b5961629ac564857138dcba46d0947a2a1c7d3debbb9f5976c1df04193 -u 301660b5961629ac564857138dcba46d0947a2a1c7d3debbb9f5976c1df04193 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/301660b5961629ac564857138dcba46d0947a2a1c7d3debbb9f5976c1df04193/userdata -p /run/containers/storage/overlay-containers/301660b5961629ac564857138dcba46d0947a2a1c7d3debbb9f5976c1df04193/userdata/pidfile -n ovn_metadata_agent --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/301660b5961629ac564857138dcba46d0947a2a1c7d3debbb9f5976c1df04193 --full-attach -s -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/301660b5961629ac564857138dcba46d0947a2a1c7d3debbb9f5976c1df04193/userdata/oci-log --conmon-pidfile /run/ovn_metadata_agent.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg 301660b5961629ac564857138dcba46d0947a2a1c7d3debbb9f5976c1df04193
             ├─gssproxy.service
             │ └─870 /usr/sbin/gssproxy -D
             ├─irqbalance.service
             │ └─782 /usr/sbin/irqbalance
             ├─iscsid.service
             │ └─238852 /usr/sbin/iscsid -f
             ├─ovs-vswitchd.service
             │ └─47291 ovs-vswitchd unix:/var/run/openvswitch/db.sock -vconsole:emer -vsyslog:err -vfile:info --mlockall --user openvswitch:hugetlbfs --no-chdir --log-file=/var/log/openvswitch/ovs-vswitchd.log --pidfile=/var/run/openvswitch/ovs-vswitchd.pid --detach
             ├─ovsdb-server.service
             │ └─47210 ovsdb-server /etc/openvswitch/conf.db -vconsole:emer -vsyslog:err -vfile:info --remote=punix:/var/run/openvswitch/db.sock --private-key=db:Open_vSwitch,SSL,private_key --certificate=db:Open_vSwitch,SSL,certificate --bootstrap-ca-cert=db:Open_vSwitch,SSL,ca_cert --user openvswitch:hugetlbfs --no-chdir --log-file=/var/log/openvswitch/ovsdb-server.log --pidfile=/var/run/openvswitch/ovsdb-server.pid --detach
             ├─polkit.service
             │ └─43476 /usr/lib/polkit-1/polkitd --no-debug
             ├─rpcbind.service
             │ └─700 /usr/bin/rpcbind -w -f
             ├─rsyslog.service
             │ └─1005 /usr/sbin/rsyslogd -n
             ├─sshd.service
             │ └─190649 "sshd: /usr/sbin/sshd -D [listener] 0 of 10-100 startups"
             ├─system-ceph\x2d95bc4eaa\x2d1a14\x2d59bf\x2dacf2\x2d4b3da055547d.slice
             │ ├─ceph-95bc4eaa-1a14-59bf-acf2-4b3da055547d@crash.compute-0.service
             │ │ ├─libpod-payload-3db2848945635a23ef05a5bb5103b9327936169af4da62c76a6e841d53c77f4b
             │ │ │ ├─83162 /run/podman-init -- /usr/bin/ceph-crash -n client.crash.compute-0
             │ │ │ └─83164 /usr/bin/python3 -s /usr/bin/ceph-crash -n client.crash.compute-0
             │ │ └─runtime
             │ │   └─83160 /usr/bin/conmon --api-version 1 -c 3db2848945635a23ef05a5bb5103b9327936169af4da62c76a6e841d53c77f4b -u 3db2848945635a23ef05a5bb5103b9327936169af4da62c76a6e841d53c77f4b -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/3db2848945635a23ef05a5bb5103b9327936169af4da62c76a6e841d53c77f4b/userdata -p /run/containers/storage/overlay-containers/3db2848945635a23ef05a5bb5103b9327936169af4da62c76a6e841d53c77f4b/userdata/pidfile -n ceph-95bc4eaa-1a14-59bf-acf2-4b3da055547d-crash-compute-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/3db2848945635a23ef05a5bb5103b9327936169af4da62c76a6e841d53c77f4b --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/3db2848945635a23ef05a5bb5103b9327936169af4da62c76a6e841d53c77f4b/userdata/oci-log --conmon-pidfile /run/ceph-95bc4eaa-1a14-59bf-acf2-4b3da055547d@crash.compute-0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 3db2848945635a23ef05a5bb5103b9327936169af4da62c76a6e841d53c77f4b
             │ ├─ceph-95bc4eaa-1a14-59bf-acf2-4b3da055547d@mds.cephfs.compute-0.bydekr.service
             │ │ ├─libpod-payload-f64fbb716bbb10d4999b599073f025f197d0e4bf9a2dfe0b7e86e89fb078831c
             │ │ │ ├─101612 /run/podman-init -- /usr/bin/ceph-mds -n mds.cephfs.compute-0.bydekr -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ │ └─101614 /usr/bin/ceph-mds -n mds.cephfs.compute-0.bydekr -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ └─runtime
             │ │   └─101610 /usr/bin/conmon --api-version 1 -c f64fbb716bbb10d4999b599073f025f197d0e4bf9a2dfe0b7e86e89fb078831c -u f64fbb716bbb10d4999b599073f025f197d0e4bf9a2dfe0b7e86e89fb078831c -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/f64fbb716bbb10d4999b599073f025f197d0e4bf9a2dfe0b7e86e89fb078831c/userdata -p /run/containers/storage/overlay-containers/f64fbb716bbb10d4999b599073f025f197d0e4bf9a2dfe0b7e86e89fb078831c/userdata/pidfile -n ceph-95bc4eaa-1a14-59bf-acf2-4b3da055547d-mds-cephfs-compute-0-bydekr --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/f64fbb716bbb10d4999b599073f025f197d0e4bf9a2dfe0b7e86e89fb078831c --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/f64fbb716bbb10d4999b599073f025f197d0e4bf9a2dfe0b7e86e89fb078831c/userdata/oci-log --conmon-pidfile /run/ceph-95bc4eaa-1a14-59bf-acf2-4b3da055547d@mds.cephfs.compute-0.bydekr.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg f64fbb716bbb10d4999b599073f025f197d0e4bf9a2dfe0b7e86e89fb078831c
             │ ├─ceph-95bc4eaa-1a14-59bf-acf2-4b3da055547d@mgr.compute-0.ntxcvs.service
             │ │ ├─libpod-payload-e03605b236b56660f5174f63911714aeac7aabe1e5dbe09eb60fe0086b6a7093
             │ │ │ ├─75370 /run/podman-init -- /usr/bin/ceph-mgr -n mgr.compute-0.ntxcvs -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ │ └─75372 /usr/bin/ceph-mgr -n mgr.compute-0.ntxcvs -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ └─runtime
             │ │   └─75368 /usr/bin/conmon --api-version 1 -c e03605b236b56660f5174f63911714aeac7aabe1e5dbe09eb60fe0086b6a7093 -u e03605b236b56660f5174f63911714aeac7aabe1e5dbe09eb60fe0086b6a7093 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/e03605b236b56660f5174f63911714aeac7aabe1e5dbe09eb60fe0086b6a7093/userdata -p /run/containers/storage/overlay-containers/e03605b236b56660f5174f63911714aeac7aabe1e5dbe09eb60fe0086b6a7093/userdata/pidfile -n ceph-95bc4eaa-1a14-59bf-acf2-4b3da055547d-mgr-compute-0-ntxcvs --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/e03605b236b56660f5174f63911714aeac7aabe1e5dbe09eb60fe0086b6a7093 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/e03605b236b56660f5174f63911714aeac7aabe1e5dbe09eb60fe0086b6a7093/userdata/oci-log --conmon-pidfile /run/ceph-95bc4eaa-1a14-59bf-acf2-4b3da055547d@mgr.compute-0.ntxcvs.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg e03605b236b56660f5174f63911714aeac7aabe1e5dbe09eb60fe0086b6a7093
             │ ├─ceph-95bc4eaa-1a14-59bf-acf2-4b3da055547d@mon.compute-0.service
             │ │ ├─libpod-payload-cfead6f8cdae3fb33ff10b470724c55f63ec4997c8e0a95beaf5732ac7b8da1b
             │ │ │ ├─75079 /run/podman-init -- /usr/bin/ceph-mon -n mon.compute-0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false
             │ │ │ └─75081 /usr/bin/ceph-mon -n mon.compute-0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false
             │ │ └─runtime
             │ │   └─75077 /usr/bin/conmon --api-version 1 -c cfead6f8cdae3fb33ff10b470724c55f63ec4997c8e0a95beaf5732ac7b8da1b -u cfead6f8cdae3fb33ff10b470724c55f63ec4997c8e0a95beaf5732ac7b8da1b -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/cfead6f8cdae3fb33ff10b470724c55f63ec4997c8e0a95beaf5732ac7b8da1b/userdata -p /run/containers/storage/overlay-containers/cfead6f8cdae3fb33ff10b470724c55f63ec4997c8e0a95beaf5732ac7b8da1b/userdata/pidfile -n ceph-95bc4eaa-1a14-59bf-acf2-4b3da055547d-mon-compute-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/cfead6f8cdae3fb33ff10b470724c55f63ec4997c8e0a95beaf5732ac7b8da1b --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/cfead6f8cdae3fb33ff10b470724c55f63ec4997c8e0a95beaf5732ac7b8da1b/userdata/oci-log --conmon-pidfile /run/ceph-95bc4eaa-1a14-59bf-acf2-4b3da055547d@mon.compute-0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg cfead6f8cdae3fb33ff10b470724c55f63ec4997c8e0a95beaf5732ac7b8da1b
             │ ├─ceph-95bc4eaa-1a14-59bf-acf2-4b3da055547d@osd.0.service
             │ │ ├─libpod-payload-13b62c875bf29e1fd647b09f26aeb88392fd8e7fd6f27f5d56d59c8433a0b222
             │ │ │ ├─88959 /run/podman-init -- /usr/bin/ceph-osd -n osd.0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ │ └─88961 /usr/bin/ceph-osd -n osd.0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ └─runtime
             │ │   └─88957 /usr/bin/conmon --api-version 1 -c 13b62c875bf29e1fd647b09f26aeb88392fd8e7fd6f27f5d56d59c8433a0b222 -u 13b62c875bf29e1fd647b09f26aeb88392fd8e7fd6f27f5d56d59c8433a0b222 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/13b62c875bf29e1fd647b09f26aeb88392fd8e7fd6f27f5d56d59c8433a0b222/userdata -p /run/containers/storage/overlay-containers/13b62c875bf29e1fd647b09f26aeb88392fd8e7fd6f27f5d56d59c8433a0b222/userdata/pidfile -n ceph-95bc4eaa-1a14-59bf-acf2-4b3da055547d-osd-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/13b62c875bf29e1fd647b09f26aeb88392fd8e7fd6f27f5d56d59c8433a0b222 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/13b62c875bf29e1fd647b09f26aeb88392fd8e7fd6f27f5d56d59c8433a0b222/userdata/oci-log --conmon-pidfile /run/ceph-95bc4eaa-1a14-59bf-acf2-4b3da055547d@osd.0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 13b62c875bf29e1fd647b09f26aeb88392fd8e7fd6f27f5d56d59c8433a0b222
             │ ├─ceph-95bc4eaa-1a14-59bf-acf2-4b3da055547d@osd.1.service
             │ │ ├─libpod-payload-2ab4bdfb1336673e273752f56f87fc01cb41214970130231b1c970afc70361db
             │ │ │ ├─89964 /run/podman-init -- /usr/bin/ceph-osd -n osd.1 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ │ └─89966 /usr/bin/ceph-osd -n osd.1 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ └─runtime
             │ │   └─89962 /usr/bin/conmon --api-version 1 -c 2ab4bdfb1336673e273752f56f87fc01cb41214970130231b1c970afc70361db -u 2ab4bdfb1336673e273752f56f87fc01cb41214970130231b1c970afc70361db -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/2ab4bdfb1336673e273752f56f87fc01cb41214970130231b1c970afc70361db/userdata -p /run/containers/storage/overlay-containers/2ab4bdfb1336673e273752f56f87fc01cb41214970130231b1c970afc70361db/userdata/pidfile -n ceph-95bc4eaa-1a14-59bf-acf2-4b3da055547d-osd-1 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/2ab4bdfb1336673e273752f56f87fc01cb41214970130231b1c970afc70361db --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/2ab4bdfb1336673e273752f56f87fc01cb41214970130231b1c970afc70361db/userdata/oci-log --conmon-pidfile /run/ceph-95bc4eaa-1a14-59bf-acf2-4b3da055547d@osd.1.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 2ab4bdfb1336673e273752f56f87fc01cb41214970130231b1c970afc70361db
             │ ├─ceph-95bc4eaa-1a14-59bf-acf2-4b3da055547d@osd.2.service
             │ │ ├─libpod-payload-227e7141028a66a33b9f9b116360c43ad8422375cf0151e5d0bdde0e35411ed5
             │ │ │ ├─91052 /run/podman-init -- /usr/bin/ceph-osd -n osd.2 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ │ └─91055 /usr/bin/ceph-osd -n osd.2 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ └─runtime
             │ │   └─91050 /usr/bin/conmon --api-version 1 -c 227e7141028a66a33b9f9b116360c43ad8422375cf0151e5d0bdde0e35411ed5 -u 227e7141028a66a33b9f9b116360c43ad8422375cf0151e5d0bdde0e35411ed5 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/227e7141028a66a33b9f9b116360c43ad8422375cf0151e5d0bdde0e35411ed5/userdata -p /run/containers/storage/overlay-containers/227e7141028a66a33b9f9b116360c43ad8422375cf0151e5d0bdde0e35411ed5/userdata/pidfile -n ceph-95bc4eaa-1a14-59bf-acf2-4b3da055547d-osd-2 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/227e7141028a66a33b9f9b116360c43ad8422375cf0151e5d0bdde0e35411ed5 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/227e7141028a66a33b9f9b116360c43ad8422375cf0151e5d0bdde0e35411ed5/userdata/oci-log --conmon-pidfile /run/ceph-95bc4eaa-1a14-59bf-acf2-4b3da055547d@osd.2.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 227e7141028a66a33b9f9b116360c43ad8422375cf0151e5d0bdde0e35411ed5
             │ └─ceph-95bc4eaa-1a14-59bf-acf2-4b3da055547d@rgw.rgw.compute-0.ssuoka.service
             │   ├─libpod-payload-ab2d5290b253be90bd5477bcee4a9bca9d7800a28bd9dd88c5ef94245f38483b
             │   │ ├─101149 /run/podman-init -- /usr/bin/radosgw -n client.rgw.rgw.compute-0.ssuoka -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │   │ └─101151 /usr/bin/radosgw -n client.rgw.rgw.compute-0.ssuoka -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │   └─runtime
             │     └─101147 /usr/bin/conmon --api-version 1 -c ab2d5290b253be90bd5477bcee4a9bca9d7800a28bd9dd88c5ef94245f38483b -u ab2d5290b253be90bd5477bcee4a9bca9d7800a28bd9dd88c5ef94245f38483b -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/ab2d5290b253be90bd5477bcee4a9bca9d7800a28bd9dd88c5ef94245f38483b/userdata -p /run/containers/storage/overlay-containers/ab2d5290b253be90bd5477bcee4a9bca9d7800a28bd9dd88c5ef94245f38483b/userdata/pidfile -n ceph-95bc4eaa-1a14-59bf-acf2-4b3da055547d-rgw-rgw-compute-0-ssuoka --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/ab2d5290b253be90bd5477bcee4a9bca9d7800a28bd9dd88c5ef94245f38483b --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/ab2d5290b253be90bd5477bcee4a9bca9d7800a28bd9dd88c5ef94245f38483b/userdata/oci-log --conmon-pidfile /run/ceph-95bc4eaa-1a14-59bf-acf2-4b3da055547d@rgw.rgw.compute-0.ssuoka.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg ab2d5290b253be90bd5477bcee4a9bca9d7800a28bd9dd88c5ef94245f38483b
             ├─system-getty.slice
             │ └─getty@tty1.service
             │   └─1010 /sbin/agetty -o "-p -- \\u" --noclear - linux
             ├─system-serial\x2dgetty.slice
             │ └─serial-getty@ttyS0.service
             │   └─1011 /sbin/agetty -o "-p -- \\u" --keep-baud 115200,57600,38400,9600 - vt220
             ├─systemd-hostnamed.service
             │ └─304498 /usr/lib/systemd/systemd-hostnamed
             ├─systemd-journald.service
             │ └─679 /usr/lib/systemd/systemd-journald
             ├─systemd-logind.service
             │ └─787 /usr/lib/systemd/systemd-logind
             ├─systemd-machined.service
             │ └─216222 /usr/lib/systemd/systemd-machined
             ├─systemd-udevd.service
             │ └─udev
             │   └─730 /usr/lib/systemd/systemd-udevd
             ├─tuned.service
             │ └─113067 /usr/bin/python3 -Es /usr/sbin/tuned -l -P
             ├─virtlogd.service
             │ └─215589 /usr/sbin/virtlogd
             ├─virtnodedevd.service
             │ └─255222 /usr/sbin/virtnodedevd --timeout 120
             ├─virtqemud.service
             │ └─254597 /usr/sbin/virtqemud --timeout 120
             └─virtsecretd.service
               └─262209 /usr/sbin/virtsecretd --timeout 120

Dec 02 11:39:05 compute-0 nova_compute[254900]: 2025-12-02 11:39:05.316 254904 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 25 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263[00m
Dec 02 11:39:08 compute-0 sshd-session[307174]: Invalid user test from 177.157.199.115 port 51670
Dec 02 11:39:08 compute-0 sshd-session[307174]: Received disconnect from 177.157.199.115 port 51670:11: Bye Bye [preauth]
Dec 02 11:39:08 compute-0 sshd-session[307174]: Disconnected from invalid user test 177.157.199.115 port 51670 [preauth]
Dec 02 11:39:09 compute-0 nova_compute[254900]: 2025-12-02 11:39:09.166 254904 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 25 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263[00m
Dec 02 11:39:09 compute-0 podman[307436]: 2025-12-02 11:39:09.99259343 +0000 UTC m=+0.061665094 container health_status 301660b5961629ac564857138dcba46d0947a2a1c7d3debbb9f5976c1df04193 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=ovn_metadata_agent, health_status=healthy, health_failing_streak=0, health_log=, container_name=ovn_metadata_agent, maintainer=OpenStack Kubernetes Operator team, tcib_managed=true, config_data={'cgroupns': 'host', 'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'EDPM_CONFIG_HASH': '0823bd3e096c75f72e4a95820d41b0d4b6a1172bd2892ddb9f29b788a11bc87d'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_metadata_agent', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified', 'net': 'host', 'pid': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/run/openvswitch:/run/openvswitch:z', '/var/lib/config-data/ansible-generated/neutron-ovn-metadata-agent:/etc/neutron.conf.d:z', '/run/netns:/run/netns:shared', '/var/lib/kolla/config_files/ovn_metadata_agent.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/neutron:/var/lib/neutron:shared,z', '/var/lib/neutron/ovn_metadata_haproxy_wrapper:/usr/local/bin/haproxy:ro', '/var/lib/neutron/kill_scripts:/etc/neutron/kill_scripts:ro', '/var/lib/openstack/cacerts/neutron-metadata/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_metadata_agent:/openstack:ro,z']}, managed_by=edpm_ansible, org.label-schema.build-date=20251125, org.label-schema.vendor=CentOS, tcib_build_tag=fa2bb8efef6782c26ea7f1675eeb36dd, io.buildah.version=1.41.3, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, config_id=ovn_metadata_agent, org.label-schema.license=GPLv2)
Dec 02 11:39:10 compute-0 podman[307437]: 2025-12-02 11:39:10.054366307 +0000 UTC m=+0.123551454 container health_status 5c31229430d6adbfb6e358463fdefc47061e3db1d274ed40ca82e25890f29998 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, org.label-schema.name=CentOS Stream 9 Base Image, tcib_managed=true, container_name=ovn_controller, org.label-schema.build-date=20251125, managed_by=edpm_ansible, org.label-schema.license=GPLv2, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, config_id=ovn_controller, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, tcib_build_tag=fa2bb8efef6782c26ea7f1675eeb36dd)
Dec 02 11:39:10 compute-0 nova_compute[254900]: 2025-12-02 11:39:10.364 254904 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 25 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263[00m
Dec 02 11:39:13 compute-0 virtqemud[254597]: Failed to connect socket to '/var/run/libvirt/virtstoraged-sock-ro': No such file or directory
Dec 02 11:39:14 compute-0 nova_compute[254900]: 2025-12-02 11:39:14.169 254904 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 25 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263[00m

● user-1000.slice - User Slice of UID 1000
     Loaded: loaded
    Drop-In: /usr/lib/systemd/system/user-.slice.d
             └─10-defaults.conf
     Active: active since Tue 2025-12-02 10:04:12 UTC; 1h 35min ago
      Until: Tue 2025-12-02 10:04:12 UTC; 1h 35min ago
       Docs: man:user@.service(5)
         IO: 734.1M read, 7.6G written
      Tasks: 21 (limit: 20059)
     Memory: 1.4G (peak: 4.0G)
        CPU: 23min 14.105s
     CGroup: /user.slice/user-1000.slice
             ├─session-1.scope
             │ └─4520 /usr/bin/python3
             ├─session-52.scope
             │ ├─301119 "sshd-session: zuul [priv]"
             │ ├─301122 "sshd-session: zuul@notty"
             │ ├─301123 sudo bash -c "rm -rf /var/tmp/sos-osp && mkdir /var/tmp/sos-osp && sos report --batch --all-logs --tmp-dir=/var/tmp/sos-osp  -p container,openstack_edpm,system,storage,virt"
             │ ├─301147 /usr/bin/python3 -s /sbin/sos report --batch --all-logs --tmp-dir=/var/tmp/sos-osp -p container,openstack_edpm,system,storage,virt
             │ ├─307235 timeout 15s turbostat --debug sleep 10
             │ ├─307887 timeout 300s systemctl status --all
             │ ├─307888 systemctl status --all
             │ ├─307978 timeout --foreground 300s virsh -r nodedev-dumpxml pci_0000_00_01_3
             │ └─307979 virsh -r nodedev-dumpxml pci_0000_00_01_3
             └─user@1000.service
               ├─app.slice
               │ └─dbus-broker.service
               │   ├─14561 /usr/bin/dbus-broker-launch --scope user
               │   └─14577 dbus-broker --log 4 --controller 9 --machine-id 1f988c78c563e12389ab342aced42dbb --max-bytes 100000000000000 --max-fds 25000000000000 --max-matches 5000000000
               ├─init.scope
               │ ├─4310 /usr/lib/systemd/systemd --user
               │ └─4312 "(sd-pam)"
               └─user.slice
                 └─podman-pause-2aa5b999.scope
                   └─14491 catatonit -P

Dec 02 11:09:27 compute-0 podman[255088]: 2025-12-02 11:09:27.071792921 +0000 UTC m=+0.124680178 container start 473189829e7117a1d1eab4a762a38a4795dfa3e1dd1d4d965c36c0097a74ceba (image=quay.io/podified-antelope-centos9/openstack-nova-compute:current-podified, name=nova_compute_init, tcib_build_tag=fa2bb8efef6782c26ea7f1675eeb36dd, tcib_managed=true, config_id=edpm, io.buildah.version=1.41.3, org.label-schema.build-date=20251125, org.label-schema.name=CentOS Stream 9 Base Image, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-nova-compute:current-podified', 'privileged': False, 'user': 'root', 'restart': 'never', 'command': 'bash -c $* -- eval python3 /sbin/nova_statedir_ownership.py | logger -t nova_compute_init', 'net': 'none', 'security_opt': ['label=disable'], 'detach': False, 'environment': {'NOVA_STATEDIR_OWNERSHIP_SKIP': '/var/lib/nova/compute_id', '__OS_DEBUG': False}, 'volumes': ['/dev/log:/dev/log', '/var/lib/nova:/var/lib/nova:shared', '/var/lib/_nova_secontext:/var/lib/_nova_secontext:shared,z', '/var/lib/openstack/config/nova/nova_statedir_ownership.py:/sbin/nova_statedir_ownership.py:z']}, container_name=nova_compute_init, maintainer=OpenStack Kubernetes Operator team, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, managed_by=edpm_ansible, org.label-schema.schema-version=1.0)
Dec 02 11:09:27 compute-0 python3.9[255063]: ansible-containers.podman.podman_container PODMAN-CONTAINER-DEBUG: podman start nova_compute_init
Dec 02 11:09:27 compute-0 sudo[255061]: pam_unix(sudo:session): session closed for user root
Dec 02 11:09:27 compute-0 sshd-session[223994]: Connection closed by 192.168.122.30 port 46604
Dec 02 11:09:27 compute-0 sshd-session[223991]: pam_unix(sshd:session): session closed for user zuul
Dec 02 11:38:22 compute-0 sudo[301123]:     zuul : PWD=/home/zuul ; USER=root ; COMMAND=/bin/bash -c 'rm -rf /var/tmp/sos-osp && mkdir /var/tmp/sos-osp && sos report --batch --all-logs --tmp-dir=/var/tmp/sos-osp  -p container,openstack_edpm,system,storage,virt'
Dec 02 11:38:22 compute-0 sudo[301123]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Dec 02 11:38:33 compute-0 ovs-vsctl[301477]: ovs|00001|db_ctl_base|ERR|no key "dpdk-init" in Open_vSwitch record "." column other_config
Dec 02 11:39:04 compute-0 ovs-appctl[306389]: ovs|00001|daemon_unix|WARN|/var/run/openvswitch/ovs-monitor-ipsec.pid: open: No such file or directory
Dec 02 11:39:04 compute-0 ovs-appctl[306399]: ovs|00001|daemon_unix|WARN|/var/run/openvswitch/ovs-monitor-ipsec.pid: open: No such file or directory

● user-42477.slice - User Slice of UID 42477
     Loaded: loaded
    Drop-In: /usr/lib/systemd/system/user-.slice.d
             └─10-defaults.conf
     Active: active since Tue 2025-12-02 10:49:32 UTC; 49min ago
      Until: Tue 2025-12-02 10:49:32 UTC; 49min ago
       Docs: man:user@.service(5)
         IO: 1.0M read, 154.8M written
      Tasks: 26 (limit: 20059)
     Memory: 26.2M (peak: 76.5M)
        CPU: 4min 49.329s
     CGroup: /user.slice/user-42477.slice
             ├─session-21.scope
             │ ├─76630 "sshd-session: ceph-admin [priv]"
             │ └─76652 "sshd-session: ceph-admin"
             ├─session-23.scope
             │ ├─76647 "sshd-session: ceph-admin [priv]"
             │ └─76653 "sshd-session: ceph-admin@notty"
             ├─session-24.scope
             │ ├─76704 "sshd-session: ceph-admin [priv]"
             │ └─76707 "sshd-session: ceph-admin@notty"
             ├─session-25.scope
             │ ├─76758 "sshd-session: ceph-admin [priv]"
             │ └─76761 "sshd-session: ceph-admin@notty"
             ├─session-26.scope
             │ ├─76812 "sshd-session: ceph-admin [priv]"
             │ └─76816 "sshd-session: ceph-admin@notty"
             ├─session-27.scope
             │ ├─76867 "sshd-session: ceph-admin [priv]"
             │ └─76870 "sshd-session: ceph-admin@notty"
             ├─session-28.scope
             │ ├─76921 "sshd-session: ceph-admin [priv]"
             │ └─76924 "sshd-session: ceph-admin@notty"
             ├─session-29.scope
             │ ├─76975 "sshd-session: ceph-admin [priv]"
             │ └─76978 "sshd-session: ceph-admin@notty"
             ├─session-30.scope
             │ ├─77029 "sshd-session: ceph-admin [priv]"
             │ └─77032 "sshd-session: ceph-admin@notty"
             ├─session-31.scope
             │ ├─77083 "sshd-session: ceph-admin [priv]"
             │ └─77086 "sshd-session: ceph-admin@notty"
             ├─session-32.scope
             │ ├─77110 "sshd-session: ceph-admin [priv]"
             │ └─77113 "sshd-session: ceph-admin@notty"
             ├─session-33.scope
             │ ├─77164 "sshd-session: ceph-admin [priv]"
             │ └─77167 "sshd-session: ceph-admin@notty"
             └─user@42477.service
               └─init.scope
                 ├─76634 /usr/lib/systemd/systemd --user
                 └─76636 "(sd-pam)"

Dec 02 11:38:16 compute-0 podman[301005]: 2025-12-02 11:38:16.985904003 +0000 UTC m=+0.198588557 container attach c00ce674ac8e3776d6d6b71f1a07e4148b4ecc2421b1cc3c193f03c4617a4dc2 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=quizzical_solomon, ceph=True, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, CEPH_REF=reef, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default)
Dec 02 11:38:18 compute-0 podman[301005]: 2025-12-02 11:38:18.160765791 +0000 UTC m=+1.373450435 container died c00ce674ac8e3776d6d6b71f1a07e4148b4ecc2421b1cc3c193f03c4617a4dc2 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=quizzical_solomon, ceph=True, org.label-schema.license=GPLv2, CEPH_REF=reef, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Dec 02 11:38:18 compute-0 podman[301005]: 2025-12-02 11:38:18.236208736 +0000 UTC m=+1.448893290 container remove c00ce674ac8e3776d6d6b71f1a07e4148b4ecc2421b1cc3c193f03c4617a4dc2 (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=quizzical_solomon, io.buildah.version=1.39.3, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=reef, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image)
Dec 02 11:38:18 compute-0 sudo[300896]: pam_unix(sudo:session): session closed for user root
Dec 02 11:38:18 compute-0 sudo[301069]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Dec 02 11:38:18 compute-0 sudo[301069]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Dec 02 11:38:18 compute-0 sudo[301069]: pam_unix(sudo:session): session closed for user root
Dec 02 11:38:18 compute-0 sudo[301094]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/ls /etc/sysctl.d
Dec 02 11:38:18 compute-0 sudo[301094]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Dec 02 11:38:18 compute-0 sudo[301094]: pam_unix(sudo:session): session closed for user root

● user.slice - User and Session Slice
     Loaded: loaded (/usr/lib/systemd/system/user.slice; static)
     Active: active since Tue 2025-12-02 10:03:31 UTC; 1h 35min ago
      Until: Tue 2025-12-02 10:03:31 UTC; 1h 35min ago
       Docs: man:systemd.special(7)
         IO: 735.1M read, 7.8G written
      Tasks: 48
     Memory: 1.4G (peak: 4.0G)
        CPU: 28min 4.100s
     CGroup: /user.slice
             ├─user-1000.slice
             │ ├─session-1.scope
             │ │ └─4520 /usr/bin/python3
             │ ├─session-52.scope
             │ │ ├─301119 "sshd-session: zuul [priv]"
             │ │ ├─301122 "sshd-session: zuul@notty"
             │ │ ├─301123 sudo bash -c "rm -rf /var/tmp/sos-osp && mkdir /var/tmp/sos-osp && sos report --batch --all-logs --tmp-dir=/var/tmp/sos-osp  -p container,openstack_edpm,system,storage,virt"
             │ │ ├─301147 /usr/bin/python3 -s /sbin/sos report --batch --all-logs --tmp-dir=/var/tmp/sos-osp -p container,openstack_edpm,system,storage,virt
             │ │ ├─307235 timeout 15s turbostat --debug sleep 10
             │ │ ├─307887 timeout 300s systemctl status --all
             │ │ ├─307888 systemctl status --all
             │ │ ├─307978 timeout --foreground 300s virsh -r nodedev-dumpxml pci_0000_00_01_3
             │ │ └─307979 "[virsh]"
             │ └─user@1000.service
             │   ├─app.slice
             │   │ └─dbus-broker.service
             │   │   ├─14561 /usr/bin/dbus-broker-launch --scope user
             │   │   └─14577 dbus-broker --log 4 --controller 9 --machine-id 1f988c78c563e12389ab342aced42dbb --max-bytes 100000000000000 --max-fds 25000000000000 --max-matches 5000000000
             │   ├─init.scope
             │   │ ├─4310 /usr/lib/systemd/systemd --user
             │   │ └─4312 "(sd-pam)"
             │   └─user.slice
             │     └─podman-pause-2aa5b999.scope
             │       └─14491 catatonit -P
             └─user-42477.slice
               ├─session-21.scope
               │ ├─76630 "sshd-session: ceph-admin [priv]"
               │ └─76652 "sshd-session: ceph-admin"
               ├─session-23.scope
               │ ├─76647 "sshd-session: ceph-admin [priv]"
               │ └─76653 "sshd-session: ceph-admin@notty"
               ├─session-24.scope
               │ ├─76704 "sshd-session: ceph-admin [priv]"
               │ └─76707 "sshd-session: ceph-admin@notty"
               ├─session-25.scope
               │ ├─76758 "sshd-session: ceph-admin [priv]"
               │ └─76761 "sshd-session: ceph-admin@notty"
               ├─session-26.scope
               │ ├─76812 "sshd-session: ceph-admin [priv]"
               │ └─76816 "sshd-session: ceph-admin@notty"
               ├─session-27.scope
               │ ├─76867 "sshd-session: ceph-admin [priv]"
               │ └─76870 "sshd-session: ceph-admin@notty"
               ├─session-28.scope
               │ ├─76921 "sshd-session: ceph-admin [priv]"
               │ └─76924 "sshd-session: ceph-admin@notty"
               ├─session-29.scope
               │ ├─76975 "sshd-session: ceph-admin [priv]"
               │ └─76978 "sshd-session: ceph-admin@notty"
               ├─session-30.scope
               │ ├─77029 "sshd-session: ceph-admin [priv]"
               │ └─77032 "sshd-session: ceph-admin@notty"
               ├─session-31.scope
               │ ├─77083 "sshd-session: ceph-admin [priv]"
               │ └─77086 "sshd-session: ceph-admin@notty"
               ├─session-32.scope
               │ ├─77110 "sshd-session: ceph-admin [priv]"
               │ └─77113 "sshd-session: ceph-admin@notty"
               ├─session-33.scope
               │ ├─77164 "sshd-session: ceph-admin [priv]"
               │ └─77167 "sshd-session: ceph-admin@notty"
               └─user@42477.service
                 └─init.scope
                   ├─76634 /usr/lib/systemd/systemd --user
                   └─76636 "(sd-pam)"

● dbus.socket - D-Bus System Message Bus Socket
     Loaded: loaded (/usr/lib/systemd/system/dbus.socket; enabled; preset: enabled)
     Active: active (running) since Tue 2025-12-02 10:03:32 UTC; 1h 35min ago
      Until: Tue 2025-12-02 10:03:32 UTC; 1h 35min ago
   Triggers: ● dbus-broker.service
     Listen: /run/dbus/system_bus_socket (Stream)
     CGroup: /system.slice/dbus.socket

Dec 02 10:03:32 localhost systemd[1]: Listening on D-Bus System Message Bus Socket.

● dm-event.socket - Device-mapper event daemon FIFOs
     Loaded: loaded (/usr/lib/systemd/system/dm-event.socket; enabled; preset: enabled)
     Active: active (listening) since Tue 2025-12-02 10:41:14 UTC; 58min ago
      Until: Tue 2025-12-02 10:41:14 UTC; 58min ago
   Triggers: ● dm-event.service
       Docs: man:dmeventd(8)
     Listen: /run/dmeventd-server (FIFO)
             /run/dmeventd-client (FIFO)
     CGroup: /system.slice/dm-event.socket

Dec 02 10:41:14 compute-0 systemd[1]: Listening on Device-mapper event daemon FIFOs.

● iscsid.socket - Open-iSCSI iscsid Socket
     Loaded: loaded (/usr/lib/systemd/system/iscsid.socket; enabled; preset: enabled)
     Active: active (running) since Tue 2025-12-02 11:06:35 UTC; 32min ago
      Until: Tue 2025-12-02 11:06:35 UTC; 32min ago
   Triggers: ● iscsid.service
       Docs: man:iscsid(8)
             man:iscsiadm(8)
     Listen: @ISCSIADM_ABSTRACT_NAMESPACE (Stream)
     CGroup: /system.slice/iscsid.socket

Dec 02 11:06:35 compute-0 systemd[1]: Listening on Open-iSCSI iscsid Socket.

○ iscsiuio.socket - Open-iSCSI iscsiuio Socket
     Loaded: loaded (/usr/lib/systemd/system/iscsiuio.socket; enabled; preset: enabled)
     Active: inactive (dead)
   Triggers: ● iscsiuio.service
       Docs: man:iscsiuio(8)
     Listen: @ISCSID_UIP_ABSTRACT_NAMESPACE (Stream)

○ libvirtd-admin.socket - libvirt legacy monolithic daemon admin socket
     Loaded: loaded (/usr/lib/systemd/system/libvirtd-admin.socket; disabled; preset: disabled)
     Active: inactive (dead)
   Triggers: ● libvirtd.service
     Listen: /run/libvirt/libvirt-admin-sock (Stream)

○ libvirtd-ro.socket - libvirt legacy monolithic daemon read-only socket
     Loaded: loaded (/usr/lib/systemd/system/libvirtd-ro.socket; disabled; preset: disabled)
     Active: inactive (dead)
   Triggers: ● libvirtd.service
     Listen: /run/libvirt/libvirt-sock-ro (Stream)

○ libvirtd-tls.socket
     Loaded: masked (Reason: Unit libvirtd-tls.socket is masked.)
     Active: inactive (dead)

○ libvirtd.socket - libvirt legacy monolithic daemon socket
     Loaded: loaded (/usr/lib/systemd/system/libvirtd.socket; disabled; preset: disabled)
     Active: inactive (dead)
   Triggers: ● libvirtd.service
     Listen: /run/libvirt/libvirt-sock (Stream)

● lvm2-lvmpolld.socket - LVM2 poll daemon socket
     Loaded: loaded (/usr/lib/systemd/system/lvm2-lvmpolld.socket; enabled; preset: enabled)
     Active: active (listening) since Tue 2025-12-02 10:41:15 UTC; 57min ago
      Until: Tue 2025-12-02 10:41:15 UTC; 57min ago
   Triggers: ● lvm2-lvmpolld.service
       Docs: man:lvmpolld(8)
     Listen: /run/lvm/lvmpolld.socket (Stream)
     CGroup: /system.slice/lvm2-lvmpolld.socket

Dec 02 10:41:15 compute-0 systemd[1]: Listening on LVM2 poll daemon socket.

● rpcbind.socket - RPCbind Server Activation Socket
     Loaded: loaded (/usr/lib/systemd/system/rpcbind.socket; enabled; preset: enabled)
     Active: active (running) since Tue 2025-12-02 10:03:31 UTC; 1h 35min ago
      Until: Tue 2025-12-02 10:03:31 UTC; 1h 35min ago
   Triggers: ● rpcbind.service
     Listen: /run/rpcbind.sock (Stream)
             0.0.0.0:111 (Stream)
             0.0.0.0:111 (Datagram)
             [::]:111 (Stream)
             [::]:111 (Datagram)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48628)
     Memory: 16.0K (peak: 288.0K)
        CPU: 5ms
     CGroup: /system.slice/rpcbind.socket

● sssd-kcm.socket - SSSD Kerberos Cache Manager responder socket
     Loaded: loaded (/usr/lib/systemd/system/sssd-kcm.socket; enabled; preset: enabled)
     Active: active (listening) since Tue 2025-12-02 10:03:32 UTC; 1h 35min ago
      Until: Tue 2025-12-02 10:03:32 UTC; 1h 35min ago
   Triggers: ● sssd-kcm.service
       Docs: man:sssd-kcm(8)
     Listen: /run/.heim_org.h5l.kcm-socket (Stream)
     CGroup: /system.slice/sssd-kcm.socket

Dec 02 10:03:32 localhost systemd[1]: Listening on SSSD Kerberos Cache Manager responder socket.

○ syslog.socket - Syslog Socket
     Loaded: loaded (/usr/lib/systemd/system/syslog.socket; static)
     Active: inactive (dead)
   Triggers: ● syslog.service
       Docs: man:systemd.special(7)
             https://www.freedesktop.org/wiki/Software/systemd/syslog
     Listen: /run/systemd/journal/syslog (Datagram)

● systemd-coredump.socket - Process Core Dump Socket
     Loaded: loaded (/usr/lib/systemd/system/systemd-coredump.socket; static)
     Active: active (listening) since Tue 2025-12-02 10:03:31 UTC; 1h 35min ago
      Until: Tue 2025-12-02 10:03:31 UTC; 1h 35min ago
       Docs: man:systemd-coredump(8)
     Listen: /run/systemd/coredump (SequentialPacket)
   Accepted: 1; Connected: 0;
     CGroup: /system.slice/systemd-coredump.socket

● systemd-initctl.socket - initctl Compatibility Named Pipe
     Loaded: loaded (/usr/lib/systemd/system/systemd-initctl.socket; static)
     Active: active (listening) since Tue 2025-12-02 10:03:31 UTC; 1h 35min ago
      Until: Tue 2025-12-02 10:03:31 UTC; 1h 35min ago
   Triggers: ● systemd-initctl.service
       Docs: man:systemd-initctl.socket(8)
     Listen: /run/initctl (FIFO)
     CGroup: /system.slice/systemd-initctl.socket

● systemd-journald-dev-log.socket - Journal Socket (/dev/log)
     Loaded: loaded (/usr/lib/systemd/system/systemd-journald-dev-log.socket; static)
     Active: active (running) since Tue 2025-12-02 10:03:29 UTC; 1h 35min ago
      Until: Tue 2025-12-02 10:03:29 UTC; 1h 35min ago
   Triggers: ● systemd-journald.service
       Docs: man:systemd-journald.service(8)
             man:journald.conf(5)
     Listen: /run/systemd/journal/dev-log (Datagram)
     CGroup: /system.slice/systemd-journald-dev-log.socket

Notice: journal has been rotated since unit was started, output may be incomplete.

● systemd-journald.socket - Journal Socket
     Loaded: loaded (/usr/lib/systemd/system/systemd-journald.socket; static)
     Active: active (running) since Tue 2025-12-02 10:03:29 UTC; 1h 35min ago
      Until: Tue 2025-12-02 10:03:29 UTC; 1h 35min ago
   Triggers: ● systemd-journald.service
       Docs: man:systemd-journald.service(8)
             man:journald.conf(5)
     Listen: /run/systemd/journal/socket (Datagram)
             /run/systemd/journal/stdout (Stream)
     CGroup: /system.slice/systemd-journald.socket

Notice: journal has been rotated since unit was started, output may be incomplete.

● systemd-rfkill.socket - Load/Save RF Kill Switch Status /dev/rfkill Watch
     Loaded: loaded (/usr/lib/systemd/system/systemd-rfkill.socket; static)
     Active: active (listening) since Tue 2025-12-02 10:03:34 UTC; 1h 35min ago
      Until: Tue 2025-12-02 10:03:34 UTC; 1h 35min ago
   Triggers: ● systemd-rfkill.service
       Docs: man:systemd-rfkill.socket(8)
     Listen: /dev/rfkill (Special)
     CGroup: /system.slice/systemd-rfkill.socket

Dec 02 10:03:34 np0005542249.novalocal systemd[1]: Listening on Load/Save RF Kill Switch Status /dev/rfkill Watch.

● systemd-udevd-control.socket - udev Control Socket
     Loaded: loaded (/usr/lib/systemd/system/systemd-udevd-control.socket; static)
     Active: active (running) since Tue 2025-12-02 10:03:31 UTC; 1h 35min ago
      Until: Tue 2025-12-02 10:03:31 UTC; 1h 35min ago
   Triggers: ● systemd-udevd.service
       Docs: man:systemd-udevd-control.socket(8)
             man:udev(7)
     Listen: /run/udev/control (SequentialPacket)
     CGroup: /system.slice/systemd-udevd-control.socket

● systemd-udevd-kernel.socket - udev Kernel Socket
     Loaded: loaded (/usr/lib/systemd/system/systemd-udevd-kernel.socket; static)
     Active: active (running) since Tue 2025-12-02 10:03:31 UTC; 1h 35min ago
      Until: Tue 2025-12-02 10:03:31 UTC; 1h 35min ago
   Triggers: ● systemd-udevd.service
       Docs: man:systemd-udevd-kernel.socket(8)
             man:udev(7)
     Listen: kobject-uevent 1 (Netlink)
     CGroup: /system.slice/systemd-udevd-kernel.socket

○ virtinterfaced-admin.socket - libvirt interface daemon admin socket
     Loaded: loaded (/usr/lib/systemd/system/virtinterfaced-admin.socket; enabled; preset: enabled)
     Active: inactive (dead)
   Triggers: ● virtinterfaced.service
     Listen: /run/libvirt/virtinterfaced-admin-sock (Stream)

○ virtinterfaced-ro.socket - libvirt interface daemon read-only socket
     Loaded: loaded (/usr/lib/systemd/system/virtinterfaced-ro.socket; enabled; preset: enabled)
     Active: inactive (dead)
   Triggers: ● virtinterfaced.service
     Listen: /run/libvirt/virtinterfaced-sock-ro (Stream)

○ virtinterfaced.socket - libvirt interface daemon socket
     Loaded: loaded (/usr/lib/systemd/system/virtinterfaced.socket; enabled; preset: enabled)
     Active: inactive (dead)
   Triggers: ● virtinterfaced.service
     Listen: /run/libvirt/virtinterfaced-sock (Stream)

○ virtlockd-admin.socket - libvirt locking daemon admin socket
     Loaded: loaded (/usr/lib/systemd/system/virtlockd-admin.socket; enabled; preset: enabled)
     Active: inactive (dead)
   Triggers: ● virtlockd.service
     Listen: /run/libvirt/virtlockd-admin-sock (Stream)

● virtlockd.socket - libvirt locking daemon socket
     Loaded: loaded (/usr/lib/systemd/system/virtlockd.socket; enabled; preset: enabled)
     Active: active (listening) since Tue 2025-12-02 11:05:26 UTC; 33min ago
      Until: Tue 2025-12-02 11:05:26 UTC; 33min ago
   Triggers: ● virtlockd.service
     Listen: /run/libvirt/virtlockd-sock (Stream)
     CGroup: /system.slice/virtlockd.socket

Dec 02 11:05:26 compute-0 systemd[1]: Listening on libvirt locking daemon socket.

● virtlogd-admin.socket - libvirt logging daemon admin socket
     Loaded: loaded (/usr/lib/systemd/system/virtlogd-admin.socket; enabled; preset: enabled)
    Drop-In: /etc/systemd/system/virtlogd-admin.socket.d
             └─override.conf
     Active: active (running) since Tue 2025-12-02 11:05:21 UTC; 33min ago
      Until: Tue 2025-12-02 11:05:21 UTC; 33min ago
   Triggers: ● virtlogd.service
     Listen: /run/libvirt/virtlogd-admin-sock (Stream)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48628)
     Memory: 0B (peak: 536.0K)
        CPU: 4ms
     CGroup: /system.slice/virtlogd-admin.socket

Dec 02 11:05:21 compute-0 systemd[1]: Starting libvirt logging daemon admin socket...
Dec 02 11:05:21 compute-0 systemd[1]: Listening on libvirt logging daemon admin socket.

● virtlogd.socket - libvirt logging daemon socket
     Loaded: loaded (/usr/lib/systemd/system/virtlogd.socket; enabled; preset: enabled)
    Drop-In: /etc/systemd/system/virtlogd.socket.d
             └─override.conf
     Active: active (running) since Tue 2025-12-02 11:05:21 UTC; 33min ago
      Until: Tue 2025-12-02 11:05:21 UTC; 33min ago
   Triggers: ● virtlogd.service
     Listen: /run/libvirt/virtlogd-sock (Stream)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48628)
     Memory: 4.0K (peak: 512.0K)
        CPU: 6ms
     CGroup: /system.slice/virtlogd.socket

Dec 02 11:05:21 compute-0 systemd[1]: Starting libvirt logging daemon socket...
Dec 02 11:05:21 compute-0 systemd[1]: Listening on libvirt logging daemon socket.

○ virtnetworkd-admin.socket - libvirt network daemon admin socket
     Loaded: loaded (/usr/lib/systemd/system/virtnetworkd-admin.socket; enabled; preset: enabled)
     Active: inactive (dead)
   Triggers: ● virtnetworkd.service
     Listen: /run/libvirt/virtnetworkd-admin-sock (Stream)

○ virtnetworkd-ro.socket - libvirt network daemon read-only socket
     Loaded: loaded (/usr/lib/systemd/system/virtnetworkd-ro.socket; enabled; preset: enabled)
     Active: inactive (dead)
   Triggers: ● virtnetworkd.service
     Listen: /run/libvirt/virtnetworkd-sock-ro (Stream)

○ virtnetworkd.socket - libvirt network daemon socket
     Loaded: loaded (/usr/lib/systemd/system/virtnetworkd.socket; enabled; preset: enabled)
     Active: inactive (dead)
   Triggers: ● virtnetworkd.service
     Listen: /run/libvirt/virtnetworkd-sock (Stream)

● virtnodedevd-admin.socket - libvirt nodedev daemon admin socket
     Loaded: loaded (/usr/lib/systemd/system/virtnodedevd-admin.socket; enabled; preset: enabled)
    Drop-In: /etc/systemd/system/virtnodedevd-admin.socket.d
             └─override.conf
     Active: active (running) since Tue 2025-12-02 11:05:23 UTC; 33min ago
      Until: Tue 2025-12-02 11:05:23 UTC; 33min ago
   Triggers: ● virtnodedevd.service
     Listen: /run/libvirt/virtnodedevd-admin-sock (Stream)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48628)
     Memory: 0B (peak: 512.0K)
        CPU: 4ms
     CGroup: /system.slice/virtnodedevd-admin.socket

Dec 02 11:05:23 compute-0 systemd[1]: Starting libvirt nodedev daemon admin socket...
Dec 02 11:05:23 compute-0 systemd[1]: Listening on libvirt nodedev daemon admin socket.

● virtnodedevd-ro.socket - libvirt nodedev daemon read-only socket
     Loaded: loaded (/usr/lib/systemd/system/virtnodedevd-ro.socket; enabled; preset: enabled)
    Drop-In: /etc/systemd/system/virtnodedevd-ro.socket.d
             └─override.conf
     Active: active (running) since Tue 2025-12-02 11:05:23 UTC; 33min ago
      Until: Tue 2025-12-02 11:05:23 UTC; 33min ago
   Triggers: ● virtnodedevd.service
     Listen: /run/libvirt/virtnodedevd-sock-ro (Stream)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48628)
     Memory: 0B (peak: 512.0K)
        CPU: 5ms
     CGroup: /system.slice/virtnodedevd-ro.socket

Dec 02 11:05:23 compute-0 systemd[1]: Starting libvirt nodedev daemon read-only socket...
Dec 02 11:05:23 compute-0 systemd[1]: Listening on libvirt nodedev daemon read-only socket.

● virtnodedevd.socket - libvirt nodedev daemon socket
     Loaded: loaded (/usr/lib/systemd/system/virtnodedevd.socket; enabled; preset: enabled)
    Drop-In: /etc/systemd/system/virtnodedevd.socket.d
             └─override.conf
     Active: active (running) since Tue 2025-12-02 11:05:23 UTC; 33min ago
      Until: Tue 2025-12-02 11:05:23 UTC; 33min ago
   Triggers: ● virtnodedevd.service
     Listen: /run/libvirt/virtnodedevd-sock (Stream)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48628)
     Memory: 4.0K (peak: 512.0K)
        CPU: 4ms
     CGroup: /system.slice/virtnodedevd.socket

Dec 02 11:05:23 compute-0 systemd[1]: Starting libvirt nodedev daemon socket...
Dec 02 11:05:23 compute-0 systemd[1]: Listening on libvirt nodedev daemon socket.

○ virtnwfilterd-admin.socket - libvirt nwfilter daemon admin socket
     Loaded: loaded (/usr/lib/systemd/system/virtnwfilterd-admin.socket; enabled; preset: enabled)
     Active: inactive (dead)
   Triggers: ● virtnwfilterd.service
     Listen: /run/libvirt/virtnwfilterd-admin-sock (Stream)

○ virtnwfilterd-ro.socket - libvirt nwfilter daemon read-only socket
     Loaded: loaded (/usr/lib/systemd/system/virtnwfilterd-ro.socket; enabled; preset: enabled)
     Active: inactive (dead)
   Triggers: ● virtnwfilterd.service
     Listen: /run/libvirt/virtnwfilterd-sock-ro (Stream)

○ virtnwfilterd.socket - libvirt nwfilter daemon socket
     Loaded: loaded (/usr/lib/systemd/system/virtnwfilterd.socket; enabled; preset: enabled)
     Active: inactive (dead)
   Triggers: ● virtnwfilterd.service
     Listen: /run/libvirt/virtnwfilterd-sock (Stream)

● virtproxyd-admin.socket - libvirt proxy daemon admin socket
     Loaded: loaded (/usr/lib/systemd/system/virtproxyd-admin.socket; enabled; preset: enabled)
    Drop-In: /etc/systemd/system/virtproxyd-admin.socket.d
             └─override.conf
     Active: active (listening) since Tue 2025-12-02 11:05:24 UTC; 33min ago
      Until: Tue 2025-12-02 11:05:24 UTC; 33min ago
   Triggers: ● virtproxyd.service
     Listen: /run/libvirt/libvirt-admin-sock (Stream)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48628)
     Memory: 4.0K (peak: 512.0K)
        CPU: 5ms
     CGroup: /system.slice/virtproxyd-admin.socket

Dec 02 11:05:24 compute-0 systemd[1]: Starting libvirt proxy daemon admin socket...
Dec 02 11:05:24 compute-0 systemd[1]: Listening on libvirt proxy daemon admin socket.

● virtproxyd-ro.socket - libvirt proxy daemon read-only socket
     Loaded: loaded (/usr/lib/systemd/system/virtproxyd-ro.socket; enabled; preset: enabled)
    Drop-In: /etc/systemd/system/virtproxyd-ro.socket.d
             └─override.conf
     Active: active (listening) since Tue 2025-12-02 11:05:24 UTC; 33min ago
      Until: Tue 2025-12-02 11:05:24 UTC; 33min ago
   Triggers: ● virtproxyd.service
     Listen: /run/libvirt/libvirt-sock-ro (Stream)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48628)
     Memory: 4.0K (peak: 512.0K)
        CPU: 5ms
     CGroup: /system.slice/virtproxyd-ro.socket

Dec 02 11:05:24 compute-0 systemd[1]: Starting libvirt proxy daemon read-only socket...
Dec 02 11:05:24 compute-0 systemd[1]: Listening on libvirt proxy daemon read-only socket.

● virtproxyd-tls.socket - libvirt proxy daemon TLS IP socket
     Loaded: loaded (/usr/lib/systemd/system/virtproxyd-tls.socket; enabled; preset: disabled)
     Active: active (listening) since Tue 2025-12-02 11:04:11 UTC; 35min ago
      Until: Tue 2025-12-02 11:04:11 UTC; 35min ago
   Triggers: ● virtproxyd.service
     Listen: [::]:16514 (Stream)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48628)
     Memory: 8.0K (peak: 256.0K)
        CPU: 3ms
     CGroup: /system.slice/virtproxyd-tls.socket

Dec 02 11:04:11 compute-0 systemd[1]: Listening on libvirt proxy daemon TLS IP socket.

● virtproxyd.socket - libvirt proxy daemon socket
     Loaded: loaded (/usr/lib/systemd/system/virtproxyd.socket; enabled; preset: enabled)
    Drop-In: /etc/systemd/system/virtproxyd.socket.d
             └─override.conf
     Active: active (listening) since Tue 2025-12-02 11:04:11 UTC; 35min ago
      Until: Tue 2025-12-02 11:04:11 UTC; 35min ago
   Triggers: ● virtproxyd.service
     Listen: /run/libvirt/libvirt-sock (Stream)
     CGroup: /system.slice/virtproxyd.socket

Dec 02 11:04:11 compute-0 systemd[1]: Listening on libvirt proxy daemon socket.

● virtqemud-admin.socket - libvirt QEMU daemon admin socket
     Loaded: loaded (/usr/lib/systemd/system/virtqemud-admin.socket; enabled; preset: enabled)
    Drop-In: /etc/systemd/system/virtqemud-admin.socket.d
             └─override.conf
     Active: active (running) since Tue 2025-12-02 11:05:26 UTC; 33min ago
      Until: Tue 2025-12-02 11:05:26 UTC; 33min ago
   Triggers: ● virtqemud.service
     Listen: /run/libvirt/virtqemud-admin-sock (Stream)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48628)
     Memory: 0B (peak: 512.0K)
        CPU: 3ms
     CGroup: /system.slice/virtqemud-admin.socket

Dec 02 11:05:26 compute-0 systemd[1]: Starting libvirt QEMU daemon admin socket...
Dec 02 11:05:26 compute-0 systemd[1]: Listening on libvirt QEMU daemon admin socket.

● virtqemud-ro.socket - libvirt QEMU daemon read-only socket
     Loaded: loaded (/usr/lib/systemd/system/virtqemud-ro.socket; enabled; preset: enabled)
    Drop-In: /etc/systemd/system/virtqemud-ro.socket.d
             └─override.conf
     Active: active (running) since Tue 2025-12-02 11:05:26 UTC; 33min ago
      Until: Tue 2025-12-02 11:05:26 UTC; 33min ago
   Triggers: ● virtqemud.service
     Listen: /run/libvirt/virtqemud-sock-ro (Stream)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48628)
     Memory: 0B (peak: 708.0K)
        CPU: 3ms
     CGroup: /system.slice/virtqemud-ro.socket

Dec 02 11:05:26 compute-0 systemd[1]: Starting libvirt QEMU daemon read-only socket...
Dec 02 11:05:26 compute-0 systemd[1]: Listening on libvirt QEMU daemon read-only socket.

● virtqemud.socket - libvirt QEMU daemon socket
     Loaded: loaded (/usr/lib/systemd/system/virtqemud.socket; enabled; preset: enabled)
    Drop-In: /etc/systemd/system/virtqemud.socket.d
             └─override.conf
     Active: active (running) since Tue 2025-12-02 11:05:26 UTC; 33min ago
      Until: Tue 2025-12-02 11:05:26 UTC; 33min ago
   Triggers: ● virtqemud.service
     Listen: /run/libvirt/virtqemud-sock (Stream)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48628)
     Memory: 0B (peak: 512.0K)
        CPU: 3ms
     CGroup: /system.slice/virtqemud.socket

Dec 02 11:05:26 compute-0 systemd[1]: Starting libvirt QEMU daemon socket...
Dec 02 11:05:26 compute-0 systemd[1]: Listening on libvirt QEMU daemon socket.

● virtsecretd-admin.socket - libvirt secret daemon admin socket
     Loaded: loaded (/usr/lib/systemd/system/virtsecretd-admin.socket; enabled; preset: enabled)
    Drop-In: /etc/systemd/system/virtsecretd-admin.socket.d
             └─override.conf
     Active: active (running) since Tue 2025-12-02 11:05:27 UTC; 33min ago
      Until: Tue 2025-12-02 11:05:27 UTC; 33min ago
   Triggers: ● virtsecretd.service
     Listen: /run/libvirt/virtsecretd-admin-sock (Stream)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48628)
     Memory: 4.0K (peak: 636.0K)
        CPU: 4ms
     CGroup: /system.slice/virtsecretd-admin.socket

Dec 02 11:05:27 compute-0 systemd[1]: Starting libvirt secret daemon admin socket...
Dec 02 11:05:27 compute-0 systemd[1]: Listening on libvirt secret daemon admin socket.

● virtsecretd-ro.socket - libvirt secret daemon read-only socket
     Loaded: loaded (/usr/lib/systemd/system/virtsecretd-ro.socket; enabled; preset: enabled)
    Drop-In: /etc/systemd/system/virtsecretd-ro.socket.d
             └─override.conf
     Active: active (running) since Tue 2025-12-02 11:05:27 UTC; 33min ago
      Until: Tue 2025-12-02 11:05:27 UTC; 33min ago
   Triggers: ● virtsecretd.service
     Listen: /run/libvirt/virtsecretd-sock-ro (Stream)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48628)
     Memory: 4.0K (peak: 512.0K)
        CPU: 7ms
     CGroup: /system.slice/virtsecretd-ro.socket

Dec 02 11:05:27 compute-0 systemd[1]: Starting libvirt secret daemon read-only socket...
Dec 02 11:05:27 compute-0 systemd[1]: Listening on libvirt secret daemon read-only socket.

● virtsecretd.socket - libvirt secret daemon socket
     Loaded: loaded (/usr/lib/systemd/system/virtsecretd.socket; enabled; preset: enabled)
    Drop-In: /etc/systemd/system/virtsecretd.socket.d
             └─override.conf
     Active: active (running) since Tue 2025-12-02 11:05:27 UTC; 33min ago
      Until: Tue 2025-12-02 11:05:27 UTC; 33min ago
   Triggers: ● virtsecretd.service
     Listen: /run/libvirt/virtsecretd-sock (Stream)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48628)
     Memory: 0B (peak: 512.0K)
        CPU: 5ms
     CGroup: /system.slice/virtsecretd.socket

Dec 02 11:05:27 compute-0 systemd[1]: Starting libvirt secret daemon socket...
Dec 02 11:05:27 compute-0 systemd[1]: Listening on libvirt secret daemon socket.

○ virtstoraged-admin.socket - libvirt storage daemon admin socket
     Loaded: loaded (/usr/lib/systemd/system/virtstoraged-admin.socket; enabled; preset: enabled)
     Active: inactive (dead)
   Triggers: ● virtstoraged.service
     Listen: /run/libvirt/virtstoraged-admin-sock (Stream)

○ virtstoraged-ro.socket - libvirt storage daemon read-only socket
     Loaded: loaded (/usr/lib/systemd/system/virtstoraged-ro.socket; enabled; preset: enabled)
     Active: inactive (dead)
   Triggers: ● virtstoraged.service
     Listen: /run/libvirt/virtstoraged-sock-ro (Stream)

○ virtstoraged.socket - libvirt storage daemon socket
     Loaded: loaded (/usr/lib/systemd/system/virtstoraged.socket; enabled; preset: enabled)
     Active: inactive (dead)
   Triggers: ● virtstoraged.service
     Listen: /run/libvirt/virtstoraged-sock (Stream)

● swap.swap - /swap
     Loaded: loaded (/etc/fstab; generated)
     Active: active since Tue 2025-12-02 10:43:21 UTC; 55min ago
      Until: Tue 2025-12-02 10:43:21 UTC; 55min ago
       What: /swap
       Docs: man:fstab(5)
             man:systemd-fstab-generator(8)

● basic.target - Basic System
     Loaded: loaded (/usr/lib/systemd/system/basic.target; static)
     Active: active since Tue 2025-12-02 10:03:32 UTC; 1h 35min ago
      Until: Tue 2025-12-02 10:03:32 UTC; 1h 35min ago
       Docs: man:systemd.special(7)

Dec 02 10:03:32 localhost systemd[1]: Reached target Basic System.

○ blockdev@dev-disk-by\x2duuid-b277050f\x2d8ace\x2d464d\x2dabb6\x2d4c46d4c45253.target - Block Device Preparation for /dev/disk/by-uuid/b277050f-8ace-464d-abb6-4c46d4c45253
     Loaded: loaded (/usr/lib/systemd/system/blockdev@.target; static)
     Active: inactive (dead)
       Docs: man:systemd.special(7)

○ blockdev@dev-vda1.target - Block Device Preparation for /dev/vda1
     Loaded: loaded (/usr/lib/systemd/system/blockdev@.target; static)
     Active: inactive (dead)
       Docs: man:systemd.special(7)

● ceph-95bc4eaa-1a14-59bf-acf2-4b3da055547d.target - Ceph cluster 95bc4eaa-1a14-59bf-acf2-4b3da055547d
     Loaded: loaded (/etc/systemd/system/ceph-95bc4eaa-1a14-59bf-acf2-4b3da055547d.target; enabled; preset: disabled)
     Active: active since Tue 2025-12-02 10:48:34 UTC; 50min ago
      Until: Tue 2025-12-02 10:48:34 UTC; 50min ago

Dec 02 10:48:34 compute-0 systemd[1]: Reached target Ceph cluster 95bc4eaa-1a14-59bf-acf2-4b3da055547d.

● ceph.target - All Ceph clusters and services
     Loaded: loaded (/etc/systemd/system/ceph.target; enabled; preset: disabled)
     Active: active since Tue 2025-12-02 10:48:34 UTC; 50min ago
      Until: Tue 2025-12-02 10:48:34 UTC; 50min ago

Dec 02 10:48:34 compute-0 systemd[1]: Reached target All Ceph clusters and services.

● cloud-config.target - Cloud-config availability
     Loaded: loaded (/usr/lib/systemd/system/cloud-config.target; static)
     Active: active since Tue 2025-12-02 10:03:36 UTC; 1h 35min ago
      Until: Tue 2025-12-02 10:03:36 UTC; 1h 35min ago

Dec 02 10:03:36 np0005542249.novalocal systemd[1]: Reached target Cloud-config availability.

● cloud-init.target - Cloud-init target
     Loaded: loaded (/usr/lib/systemd/system/cloud-init.target; enabled-runtime; preset: disabled)
     Active: active since Tue 2025-12-02 10:03:37 UTC; 1h 35min ago
      Until: Tue 2025-12-02 10:03:37 UTC; 1h 35min ago

Dec 02 10:03:37 np0005542249.novalocal systemd[1]: Reached target Cloud-init target.

○ cryptsetup-pre.target - Local Encrypted Volumes (Pre)
     Loaded: loaded (/usr/lib/systemd/system/cryptsetup-pre.target; static)
     Active: inactive (dead)
       Docs: man:systemd.special(7)

● cryptsetup.target - Local Encrypted Volumes
     Loaded: loaded (/usr/lib/systemd/system/cryptsetup.target; static)
     Active: active since Tue 2025-12-02 10:03:31 UTC; 1h 35min ago
      Until: Tue 2025-12-02 10:03:31 UTC; 1h 35min ago
       Docs: man:systemd.special(7)

● edpm_libvirt.target
     Loaded: loaded (/etc/systemd/system/edpm_libvirt.target; static)
     Active: active since Tue 2025-12-02 11:06:02 UTC; 33min ago
      Until: Tue 2025-12-02 11:06:02 UTC; 33min ago

Dec 02 11:06:02 compute-0 systemd[1]: Reached target edpm_libvirt.target.

○ emergency.target - Emergency Mode
     Loaded: loaded (/usr/lib/systemd/system/emergency.target; static)
     Active: inactive (dead)
       Docs: man:systemd.special(7)

○ first-boot-complete.target - First Boot Complete
     Loaded: loaded (/usr/lib/systemd/system/first-boot-complete.target; static)
     Active: inactive (dead)
  Condition: start condition failed at Tue 2025-12-02 10:03:32 UTC; 1h 35min ago
       Docs: man:systemd.special(7)

Dec 02 10:03:32 localhost systemd[1]: First Boot Complete was skipped because of an unmet condition check (ConditionFirstBoot=yes).

○ getty-pre.target - Preparation for Logins
     Loaded: loaded (/usr/lib/systemd/system/getty-pre.target; static)
     Active: inactive (dead)
       Docs: man:systemd.special(7)
             man:systemd-getty-generator(8)
             http://0pointer.de/blog/projects/serial-console.html

● getty.target - Login Prompts
     Loaded: loaded (/usr/lib/systemd/system/getty.target; static)
     Active: active since Tue 2025-12-02 10:03:36 UTC; 1h 35min ago
      Until: Tue 2025-12-02 10:03:36 UTC; 1h 35min ago
       Docs: man:systemd.special(7)
             man:systemd-getty-generator(8)
             http://0pointer.de/blog/projects/serial-console.html

Dec 02 10:03:36 np0005542249.novalocal systemd[1]: Reached target Login Prompts.

○ graphical.target - Graphical Interface
     Loaded: loaded (/usr/lib/systemd/system/graphical.target; static)
     Active: inactive (dead)
       Docs: man:systemd.special(7)

○ initrd-fs.target - Initrd File Systems
     Loaded: loaded (/usr/lib/systemd/system/initrd-fs.target; static)
     Active: inactive (dead) since Tue 2025-12-02 10:03:31 UTC; 1h 35min ago
       Docs: man:systemd.special(7)

Dec 02 10:03:31 localhost systemd[1]: Reached target Initrd File Systems.

○ initrd-root-device.target - Initrd Root Device
     Loaded: loaded (/usr/lib/systemd/system/initrd-root-device.target; static)
     Active: inactive (dead) since Tue 2025-12-02 10:03:31 UTC; 1h 35min ago
       Docs: man:systemd.special(7)

Dec 02 10:03:30 localhost systemd[1]: Reached target Initrd Root Device.
Dec 02 10:03:31 localhost systemd[1]: Stopped target Initrd Root Device.

○ initrd-root-fs.target - Initrd Root File System
     Loaded: loaded (/usr/lib/systemd/system/initrd-root-fs.target; static)
     Active: inactive (dead) since Tue 2025-12-02 10:03:31 UTC; 1h 35min ago
       Docs: man:systemd.special(7)

Dec 02 10:03:31 localhost systemd[1]: Reached target Initrd Root File System.

○ initrd-switch-root.target - Switch Root
     Loaded: loaded (/usr/lib/systemd/system/initrd-switch-root.target; static)
     Active: inactive (dead) since Tue 2025-12-02 10:03:31 UTC; 1h 35min ago

Dec 02 10:03:31 localhost systemd[1]: Reached target Switch Root.

○ initrd-usr-fs.target - Initrd /usr File System
     Loaded: loaded (/usr/lib/systemd/system/initrd-usr-fs.target; static)
     Active: inactive (dead) since Tue 2025-12-02 10:03:31 UTC; 1h 35min ago
       Docs: man:systemd.special(7)

Dec 02 10:03:31 localhost systemd[1]: Stopped target Initrd /usr File System.
Notice: journal has been rotated since unit was started, output may be incomplete.

○ initrd.target - Initrd Default Target
     Loaded: loaded (/usr/lib/systemd/system/initrd.target; static)
     Active: inactive (dead) since Tue 2025-12-02 10:03:31 UTC; 1h 35min ago
       Docs: man:systemd.special(7)

Dec 02 10:03:31 localhost systemd[1]: Reached target Initrd Default Target.
Dec 02 10:03:31 localhost systemd[1]: Stopped target Initrd Default Target.

● integritysetup.target - Local Integrity Protected Volumes
     Loaded: loaded (/usr/lib/systemd/system/integritysetup.target; static)
     Active: active since Tue 2025-12-02 10:03:31 UTC; 1h 35min ago
      Until: Tue 2025-12-02 10:03:31 UTC; 1h 35min ago
       Docs: man:systemd.special(7)

● local-fs-pre.target - Preparation for Local File Systems
     Loaded: loaded (/usr/lib/systemd/system/local-fs-pre.target; static)
     Active: active since Tue 2025-12-02 10:03:32 UTC; 1h 35min ago
      Until: Tue 2025-12-02 10:03:32 UTC; 1h 35min ago
       Docs: man:systemd.special(7)

Dec 02 10:03:32 localhost systemd[1]: Reached target Preparation for Local File Systems.

● local-fs.target - Local File Systems
     Loaded: loaded (/usr/lib/systemd/system/local-fs.target; static)
     Active: active since Tue 2025-12-02 10:03:32 UTC; 1h 35min ago
      Until: Tue 2025-12-02 10:03:32 UTC; 1h 35min ago
       Docs: man:systemd.special(7)

Dec 02 10:03:32 localhost systemd[1]: Reached target Local File Systems.

● multi-user.target - Multi-User System
     Loaded: loaded (/usr/lib/systemd/system/multi-user.target; indirect; preset: disabled)
     Active: active since Tue 2025-12-02 10:03:36 UTC; 1h 35min ago
      Until: Tue 2025-12-02 10:03:36 UTC; 1h 35min ago
       Docs: man:systemd.special(7)

Dec 02 10:03:36 np0005542249.novalocal systemd[1]: Reached target Multi-User System.

● network-online.target - Network is Online
     Loaded: loaded (/usr/lib/systemd/system/network-online.target; static)
     Active: active since Tue 2025-12-02 10:03:36 UTC; 1h 35min ago
      Until: Tue 2025-12-02 10:03:36 UTC; 1h 35min ago
       Docs: man:systemd.special(7)
             https://systemd.io/NETWORK_ONLINE

Dec 02 10:03:36 np0005542249.novalocal systemd[1]: Reached target Network is Online.

● network-pre.target - Preparation for Network
     Loaded: loaded (/usr/lib/systemd/system/network-pre.target; static)
     Active: active since Tue 2025-12-02 10:03:34 UTC; 1h 35min ago
      Until: Tue 2025-12-02 10:03:34 UTC; 1h 35min ago
       Docs: man:systemd.special(7)
             https://systemd.io/NETWORK_ONLINE

Dec 02 10:03:34 np0005542249.novalocal systemd[1]: Reached target Preparation for Network.

● network.target - Network
     Loaded: loaded (/usr/lib/systemd/system/network.target; static)
     Active: active since Tue 2025-12-02 10:03:34 UTC; 1h 35min ago
      Until: Tue 2025-12-02 10:03:34 UTC; 1h 35min ago
       Docs: man:systemd.special(7)
             https://systemd.io/NETWORK_ONLINE

Dec 02 10:03:34 np0005542249.novalocal systemd[1]: Reached target Network.

● nfs-client.target - NFS client services
     Loaded: loaded (/usr/lib/systemd/system/nfs-client.target; enabled; preset: disabled)
     Active: active since Tue 2025-12-02 10:03:34 UTC; 1h 35min ago
      Until: Tue 2025-12-02 10:03:34 UTC; 1h 35min ago

Dec 02 10:03:34 np0005542249.novalocal systemd[1]: Reached target NFS client services.

○ nss-lookup.target - Host and Network Name Lookups
     Loaded: loaded (/usr/lib/systemd/system/nss-lookup.target; static)
     Active: inactive (dead)
       Docs: man:systemd.special(7)

● nss-user-lookup.target - User and Group Name Lookups
     Loaded: loaded (/usr/lib/systemd/system/nss-user-lookup.target; static)
     Active: active since Tue 2025-12-02 10:03:32 UTC; 1h 35min ago
      Until: Tue 2025-12-02 10:03:32 UTC; 1h 35min ago
       Docs: man:systemd.special(7)

Dec 02 10:03:32 localhost systemd[1]: Reached target User and Group Name Lookups.

● paths.target - Path Units
     Loaded: loaded (/usr/lib/systemd/system/paths.target; static)
     Active: active since Tue 2025-12-02 10:03:31 UTC; 1h 35min ago
      Until: Tue 2025-12-02 10:03:31 UTC; 1h 35min ago
       Docs: man:systemd.special(7)

○ remote-cryptsetup.target - Remote Encrypted Volumes
     Loaded: loaded (/usr/lib/systemd/system/remote-cryptsetup.target; disabled; preset: enabled)
     Active: inactive (dead) since Tue 2025-12-02 10:03:31 UTC; 1h 35min ago
       Docs: man:systemd.special(7)

Dec 02 10:03:30 localhost systemd[1]: Reached target Remote Encrypted Volumes.
Dec 02 10:03:31 localhost systemd[1]: Stopped target Remote Encrypted Volumes.

● remote-fs-pre.target - Preparation for Remote File Systems
     Loaded: loaded (/usr/lib/systemd/system/remote-fs-pre.target; static)
     Active: active since Tue 2025-12-02 10:03:34 UTC; 1h 35min ago
      Until: Tue 2025-12-02 10:03:34 UTC; 1h 35min ago
       Docs: man:systemd.special(7)

Dec 02 10:03:34 np0005542249.novalocal systemd[1]: Reached target Preparation for Remote File Systems.

● remote-fs.target - Remote File Systems
     Loaded: loaded (/usr/lib/systemd/system/remote-fs.target; enabled; preset: enabled)
     Active: active since Tue 2025-12-02 10:03:34 UTC; 1h 35min ago
      Until: Tue 2025-12-02 10:03:34 UTC; 1h 35min ago
       Docs: man:systemd.special(7)

Dec 02 10:03:34 np0005542249.novalocal systemd[1]: Reached target Remote File Systems.

○ remote-veritysetup.target - Remote Verity Protected Volumes
     Loaded: loaded (/usr/lib/systemd/system/remote-veritysetup.target; disabled; preset: disabled)
     Active: inactive (dead)
       Docs: man:systemd.special(7)

○ rescue.target - Rescue Mode
     Loaded: loaded (/usr/lib/systemd/system/rescue.target; static)
     Active: inactive (dead)
       Docs: man:systemd.special(7)

● rpc_pipefs.target
     Loaded: loaded (/usr/lib/systemd/system/rpc_pipefs.target; static)
     Active: active since Tue 2025-12-02 10:03:31 UTC; 1h 35min ago
      Until: Tue 2025-12-02 10:03:31 UTC; 1h 35min ago

● rpcbind.target - RPC Port Mapper
     Loaded: loaded (/usr/lib/systemd/system/rpcbind.target; static)
     Active: active since Tue 2025-12-02 10:03:31 UTC; 1h 35min ago
      Until: Tue 2025-12-02 10:03:31 UTC; 1h 35min ago
       Docs: man:systemd.special(7)

○ shutdown.target - System Shutdown
     Loaded: loaded (/usr/lib/systemd/system/shutdown.target; static)
     Active: inactive (dead)
       Docs: man:systemd.special(7)

● slices.target - Slice Units
     Loaded: loaded (/usr/lib/systemd/system/slices.target; static)
     Active: active since Tue 2025-12-02 10:03:31 UTC; 1h 35min ago
   Unit syslog.target could not be found.
   Until: Tue 2025-12-02 10:03:31 UTC; 1h 35min ago
       Docs: man:systemd.special(7)

● sockets.target - Socket Units
     Loaded: loaded (/usr/lib/systemd/system/sockets.target; static)
     Active: active since Tue 2025-12-02 10:03:32 UTC; 1h 35min ago
      Until: Tue 2025-12-02 10:03:32 UTC; 1h 35min ago
       Docs: man:systemd.special(7)

Dec 02 10:03:32 localhost systemd[1]: Reached target Socket Units.

● sshd-keygen.target
     Loaded: loaded (/usr/lib/systemd/system/sshd-keygen.target; static)
     Active: active since Tue 2025-12-02 11:03:52 UTC; 35min ago
      Until: Tue 2025-12-02 11:03:52 UTC; 35min ago

Dec 02 11:03:52 compute-0 systemd[1]: Reached target sshd-keygen.target.

● swap.target - Swaps
     Loaded: loaded (/usr/lib/systemd/system/swap.target; static)
     Active: active since Tue 2025-12-02 10:03:31 UTC; 1h 35min ago
      Until: Tue 2025-12-02 10:03:31 UTC; 1h 35min ago
       Docs: man:systemd.special(7)

● sysinit.target - System Initialization
     Loaded: loaded (/usr/lib/systemd/system/sysinit.target; static)
     Active: active since Tue 2025-12-02 10:03:32 UTC; 1h 35min ago
      Until: Tue 2025-12-02 10:03:32 UTC; 1h 35min ago
       Docs: man:systemd.special(7)

Dec 02 10:03:32 localhost systemd[1]: Reached target System Initialization.

● time-set.target - System Time Set
     Loaded: loaded (/usr/lib/systemd/system/time-set.target; static)
     Active: active since Tue 2025-12-02 10:48:35 UTC; 50min ago
      Until: Tue 2025-12-02 10:48:35 UTC; 50min ago
       Docs: man:systemd.special(7)

Dec 02 10:48:35 compute-0 systemd[1]: Reached target System Time Set.

● time-sync.target - System Time Synchronized
     Loaded: loaded (/usr/lib/systemd/system/time-sync.target; static)
     Active: active since Tue 2025-12-02 10:48:35 UTC; 50min ago
      Until: Tue 2025-12-02 10:48:35 UTC; 50min ago
       Docs: man:systemd.special(7)

Dec 02 10:48:35 compute-0 systemd[1]: Reached target System Time Synchronized.

● timers.target - Timer Units
     Loaded: loaded (/usr/lib/systemd/system/timers.target; static)
     Active: active since Tue 2025-12-02 10:03:32 UTC; 1h 35min ago
      Until: Tue 2025-12-02 10:03:32 UTC; 1h 35min ago
       Docs: man:systemd.special(7)

Dec 02 10:03:32 localhost systemd[1]: Reached target Timer Units.

○ umount.target - Unmount All Filesystems
     Loaded: loaded (/usr/lib/systemd/system/umount.target; static)
     Active: inactive (dead)
       Docs: man:systemd.special(7)

○ veritysetup-pre.target - Local Verity Protected Volumes (Pre)
     Loaded: loaded (/usr/lib/systemd/system/veritysetup-pre.target; static)
     Active: inactive (dead)
       Docs: man:systemd.special(7)

● veritysetup.target - Local Verity Protected Volumes
     Loaded: loaded (/usr/lib/systemd/system/veritysetup.target; static)
     Active: active since Tue 2025-12-02 10:03:31 UTC; 1h 35min ago
      Until: Tue 2025-12-02 10:03:31 UTC; 1h 35min ago
       Docs: man:systemd.special(7)

○ virt-guest-shutdown.target - Libvirt guests shutdown
     Loaded: loaded (/etc/systemd/system/virt-guest-shutdown.target; static)
     Active: inactive (dead)
       Docs: https://libvirt.org

● 130400eaf961ceaaa203e2cc0a5af0fe03396f0667ec510c1291b1ee03bff193-746b1df9bc05c1b6.timer - /usr/bin/podman healthcheck run 130400eaf961ceaaa203e2cc0a5af0fe03396f0667ec510c1291b1ee03bff193
     Loaded: loaded (/run/systemd/transient/130400eaf961ceaaa203e2cc0a5af0fe03396f0667ec510c1291b1ee03bff193-746b1df9bc05c1b6.timer; transient)
  Transient: yes
     Active: active (waiting) since Tue 2025-12-02 11:07:36 UTC; 31min ago
      Until: Tue 2025-12-02 11:07:36 UTC; 31min ago
    Trigger: Tue 2025-12-02 11:39:31 UTC; 15s left
   Triggers: ● 130400eaf961ceaaa203e2cc0a5af0fe03396f0667ec510c1291b1ee03bff193-746b1df9bc05c1b6.service

Dec 02 11:07:36 compute-0 systemd[1]: Started /usr/bin/podman healthcheck run 130400eaf961ceaaa203e2cc0a5af0fe03396f0667ec510c1291b1ee03bff193.

● 301660b5961629ac564857138dcba46d0947a2a1c7d3debbb9f5976c1df04193-29801a8faae2132d.timer - /usr/bin/podman healthcheck run 301660b5961629ac564857138dcba46d0947a2a1c7d3debbb9f5976c1df04193
     Loaded: loaded (/run/systemd/transient/301660b5961629ac564857138dcba46d0947a2a1c7d3debbb9f5976c1df04193-29801a8faae2132d.timer; transient)
  Transient: yes
     Active: active (waiting) since Tue 2025-12-02 11:01:17 UTC; 37min ago
      Until: Tue 2025-12-02 11:01:17 UTC; 37min ago
    Trigger: Tue 2025-12-02 11:39:40 UTC; 24s left
   Triggers: ● 301660b5961629ac564857138dcba46d0947a2a1c7d3debbb9f5976c1df04193-29801a8faae2132d.service

Dec 02 11:01:17 compute-0 systemd[1]: Started /usr/bin/podman healthcheck run 301660b5961629ac564857138dcba46d0947a2a1c7d3debbb9f5976c1df04193.

● 5c31229430d6adbfb6e358463fdefc47061e3db1d274ed40ca82e25890f29998-3c4e81cff09bf014.timer - /usr/bin/podman healthcheck run 5c31229430d6adbfb6e358463fdefc47061e3db1d274ed40ca82e25890f29998
     Loaded: loaded (/run/systemd/transient/5c31229430d6adbfb6e358463fdefc47061e3db1d274ed40ca82e25890f29998-3c4e81cff09bf014.timer; transient)
  Transient: yes
     Active: active (waiting) since Tue 2025-12-02 11:00:09 UTC; 39min ago
      Until: Tue 2025-12-02 11:00:09 UTC; 39min ago
    Trigger: Tue 2025-12-02 11:39:40 UTC; 24s left
   Triggers: ● 5c31229430d6adbfb6e358463fdefc47061e3db1d274ed40ca82e25890f29998-3c4e81cff09bf014.service

Dec 02 11:00:09 compute-0 systemd[1]: Started /usr/bin/podman healthcheck run 5c31229430d6adbfb6e358463fdefc47061e3db1d274ed40ca82e25890f29998.

● dnf-makecache.timer - dnf makecache --timer
     Loaded: loaded (/usr/lib/systemd/system/dnf-makecache.timer; enabled; preset: enabled)
     Active: active (waiting) since Tue 2025-12-02 10:03:32 UTC; 1h 35min ago
      Until: Tue 2025-12-02 10:03:32 UTC; 1h 35min ago
    Trigger: Tue 2025-12-02 12:03:56 UTC; 24min left
   Triggers: ● dnf-makecache.service

Dec 02 10:03:32 localhost systemd[1]: Started dnf makecache --timer.

● logrotate.timer - Daily rotation of log files
     Loaded: loaded (/usr/lib/systemd/system/logrotate.timer; enabled; preset: enabled)
     Active: active (waiting) since Tue 2025-12-02 10:03:32 UTC; 1h 35min ago
      Until: Tue 2025-12-02 10:03:32 UTC; 1h 35min ago
    Trigger: Wed 2025-12-03 00:00:00 UTC; 12h left
   Triggers: ● logrotate.service
       Docs: man:logrotate(8)
             man:logrotate.conf(5)

Dec 02 10:03:32 localhost systemd[1]: Started Daily rotation of log files.

○ raid-check.timer - Weekly RAID setup health check
     Loaded: loaded (/usr/lib/systemd/system/raid-check.timer; enabled; preset: enabled)
     Active: inactive (dead)
    Trigger: n/a
   Triggers: ● raid-check.service

○ sysstat-collect.timer - Run system activity accounting tool every 10 minutes
     Loaded: loaded (/usr/lib/systemd/system/sysstat-collect.timer; enabled; preset: disabled)
     Active: inactive (dead)
    Trigger: n/a
   Triggers: ● sysstat-collect.service

○ sysstat-summary.timer - Generate summary of yesterday's process accounting
     Loaded: loaded (/usr/lib/systemd/system/sysstat-summary.timer; enabled; preset: disabled)
     Active: inactive (dead)
    Trigger: n/a
   Triggers: ● sysstat-summary.service

● systemd-tmpfiles-clean.timer - Daily Cleanup of Temporary Directories
     Loaded: loaded (/usr/lib/systemd/system/systemd-tmpfiles-clean.timer; static)
     Active: active (waiting) since Tue 2025-12-02 10:03:32 UTC; 1h 35min ago
      Until: Tue 2025-12-02 10:03:32 UTC; 1h 35min ago
    Trigger: Wed 2025-12-03 10:18:41 UTC; 22h left
   Triggers: ● systemd-tmpfiles-clean.service
       Docs: man:tmpfiles.d(5)
             man:systemd-tmpfiles(8)

Dec 02 10:03:32 localhost systemd[1]: Started Daily Cleanup of Temporary Directories.

● unbound-anchor.timer - daily update of the root trust anchor for DNSSEC
     Loaded: loaded (/usr/lib/systemd/system/unbound-anchor.timer; enabled; preset: enabled)
     Active: active (waiting) since Tue 2025-12-02 10:43:53 UTC; 55min ago
      Until: Tue 2025-12-02 10:43:53 UTC; 55min ago
    Trigger: Wed 2025-12-03 00:00:00 UTC; 12h left
   Triggers: ● unbound-anchor.service
       Docs: man:unbound-anchor(8)

Dec 02 10:43:53 compute-0 systemd[1]: Started daily update of the root trust anchor for DNSSEC.
