● compute-0
    State: running
    Units: 480 loaded (incl. loaded aliases)
     Jobs: 0 queued
   Failed: 0 units
    Since: Sat 2025-10-11 02:42:17 UTC; 1h 28min ago
  systemd: 252-57.el9
   CGroup: /
           ├─315709 turbostat --debug sleep 10
           ├─315712 sleep 10
           ├─init.scope
           │ └─1 /usr/lib/systemd/systemd --switched-root --system --deserialize 31
           ├─machine.slice
           │ ├─libpod-26619da4fa972b2b2b8df272a799dceac616417728cb0ea1160a898d8a7167a1.scope
           │ │ └─container
           │ │   ├─260091 dumb-init --single-child -- kolla_start
           │ │   ├─260093 /usr/bin/python3 /usr/bin/nova-compute
           │ │   ├─267479 /usr/bin/python3 /bin/privsep-helper --config-file /etc/nova/nova.conf --config-file /etc/nova/nova-compute.conf --config-dir /etc/nova/nova.conf.d --privsep_context vif_plug_ovs.privsep.vif_plug --privsep_sock_path /tmp/tmpqv15d5ht/privsep.sock
           │ │   ├─268186 /usr/bin/python3 /bin/privsep-helper --config-file /etc/nova/nova.conf --config-file /etc/nova/nova-compute.conf --config-dir /etc/nova/nova.conf.d --privsep_context os_brick.privileged.default --privsep_sock_path /tmp/tmplcy3_gw7/privsep.sock
           │ │   └─268305 /usr/bin/python3 /bin/privsep-helper --config-file /etc/nova/nova.conf --config-file /etc/nova/nova-compute.conf --config-dir /etc/nova/nova.conf.d --privsep_context nova.privsep.sys_admin_pctxt --privsep_sock_path /tmp/tmpb19gkq7f/privsep.sock
           │ ├─libpod-65c3a3d72e1cba3c83fc771a841564f690b47cc0f5012ce0acf16e2d9f8e3fe2.scope
           │ │ └─container
           │ │   ├─152669 dumb-init --single-child -- kolla_start
           │ │   └─152684 /usr/bin/ovn-controller --pidfile unix:/run/openvswitch/db.sock -p /etc/pki/tls/private/ovndb.key -c /etc/pki/tls/certs/ovndb.crt -C /etc/pki/tls/certs/ovndbca.crt
           │ ├─libpod-8b003d65c8e439e280409825aa37dacfb921ffdd0ada54278b9746654fdc0aa8.scope
           │ │ └─container
           │ │   ├─241114 dumb-init --single-child -- kolla_start
           │ │   └─241117 /usr/sbin/multipathd -d
           │ ├─libpod-dd5285a58cbe29a90687a00af14b934b599bb4de55df5857e4d7b7ac9f22feab.scope
           │ │ └─container
           │ │   ├─162242 dumb-init --single-child -- kolla_start
           │ │   ├─162245 "neutron-ovn-metadata-agent (/usr/bin/python3 /usr/bin/neutron-ovn-metadata-agent)"
           │ │   ├─162583 "neutron-ovn-metadata-agent (/usr/bin/python3 /usr/bin/neutron-ovn-metadata-agent)"
           │ │   ├─162666 /usr/bin/python3 /bin/privsep-helper --config-file /etc/neutron/neutron.conf --config-dir /etc/neutron.conf.d --privsep_context neutron.privileged.namespace_cmd --privsep_sock_path /tmp/tmpf884qsmb/privsep.sock
           │ │   ├─267859 /usr/bin/python3 /bin/privsep-helper --config-file /etc/neutron/neutron.conf --config-dir /etc/neutron.conf.d --privsep_context neutron.privileged.default --privsep_sock_path /tmp/tmph48b70lz/privsep.sock
           │ │   └─268023 /usr/bin/python3 /bin/privsep-helper --config-file /etc/neutron/neutron.conf --config-dir /etc/neutron.conf.d --privsep_context neutron.privileged.link_cmd --privsep_sock_path /tmp/tmp8g8ifqko/privsep.sock
           │ └─libpod-f2b19cad22d0fbdd185b264c3c8b6443d09a02319dc9fb0585dc69a0f24a758d.scope
           │   └─container
           │     ├─230879 dumb-init --single-child -- kolla_start
           │     └─230882 /usr/sbin/iscsid -f
           ├─system.slice
           │ ├─NetworkManager.service
           │ │ └─44969 /usr/sbin/NetworkManager --no-daemon
           │ ├─auditd.service
           │ │ ├─700 /sbin/auditd
           │ │ └─702 /usr/sbin/sedispatch
           │ ├─chronyd.service
           │ │ └─54376 /usr/sbin/chronyd -F 2
           │ ├─crond.service
           │ │ ├─ 1007 /usr/sbin/crond -n
           │ │ └─27476 /usr/sbin/anacron -s
           │ ├─dbus-broker.service
           │ │ ├─738 /usr/bin/dbus-broker-launch --scope system --audit
           │ │ └─770 dbus-broker --log 4 --controller 9 --machine-id a1727ec20198bc6caf436a6e13c4ff5e --max-bytes 536870912 --max-fds 4096 --max-matches 131072 --audit
           │ ├─edpm_iscsid.service
           │ │ └─230877 /usr/bin/conmon --api-version 1 -c f2b19cad22d0fbdd185b264c3c8b6443d09a02319dc9fb0585dc69a0f24a758d -u f2b19cad22d0fbdd185b264c3c8b6443d09a02319dc9fb0585dc69a0f24a758d -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/f2b19cad22d0fbdd185b264c3c8b6443d09a02319dc9fb0585dc69a0f24a758d/userdata -p /run/containers/storage/overlay-containers/f2b19cad22d0fbdd185b264c3c8b6443d09a02319dc9fb0585dc69a0f24a758d/userdata/pidfile -n iscsid --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/f2b19cad22d0fbdd185b264c3c8b6443d09a02319dc9fb0585dc69a0f24a758d --full-attach -s -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/f2b19cad22d0fbdd185b264c3c8b6443d09a02319dc9fb0585dc69a0f24a758d/userdata/oci-log --conmon-pidfile /run/iscsid.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg f2b19cad22d0fbdd185b264c3c8b6443d09a02319dc9fb0585dc69a0f24a758d
           │ ├─edpm_multipathd.service
           │ │ └─241112 /usr/bin/conmon --api-version 1 -c 8b003d65c8e439e280409825aa37dacfb921ffdd0ada54278b9746654fdc0aa8 -u 8b003d65c8e439e280409825aa37dacfb921ffdd0ada54278b9746654fdc0aa8 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/8b003d65c8e439e280409825aa37dacfb921ffdd0ada54278b9746654fdc0aa8/userdata -p /run/containers/storage/overlay-containers/8b003d65c8e439e280409825aa37dacfb921ffdd0ada54278b9746654fdc0aa8/userdata/pidfile -n multipathd --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/8b003d65c8e439e280409825aa37dacfb921ffdd0ada54278b9746654fdc0aa8 --full-attach -s -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/8b003d65c8e439e280409825aa37dacfb921ffdd0ada54278b9746654fdc0aa8/userdata/oci-log --conmon-pidfile /run/multipathd.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg 8b003d65c8e439e280409825aa37dacfb921ffdd0ada54278b9746654fdc0aa8
           │ ├─edpm_nova_compute.service
           │ │ └─260089 /usr/bin/conmon --api-version 1 -c 26619da4fa972b2b2b8df272a799dceac616417728cb0ea1160a898d8a7167a1 -u 26619da4fa972b2b2b8df272a799dceac616417728cb0ea1160a898d8a7167a1 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/26619da4fa972b2b2b8df272a799dceac616417728cb0ea1160a898d8a7167a1/userdata -p /run/containers/storage/overlay-containers/26619da4fa972b2b2b8df272a799dceac616417728cb0ea1160a898d8a7167a1/userdata/pidfile -n nova_compute --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/26619da4fa972b2b2b8df272a799dceac616417728cb0ea1160a898d8a7167a1 --full-attach -s -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/26619da4fa972b2b2b8df272a799dceac616417728cb0ea1160a898d8a7167a1/userdata/oci-log --conmon-pidfile /run/nova_compute.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg 26619da4fa972b2b2b8df272a799dceac616417728cb0ea1160a898d8a7167a1
           │ ├─edpm_ovn_controller.service
           │ │ └─152667 /usr/bin/conmon --api-version 1 -c 65c3a3d72e1cba3c83fc771a841564f690b47cc0f5012ce0acf16e2d9f8e3fe2 -u 65c3a3d72e1cba3c83fc771a841564f690b47cc0f5012ce0acf16e2d9f8e3fe2 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/65c3a3d72e1cba3c83fc771a841564f690b47cc0f5012ce0acf16e2d9f8e3fe2/userdata -p /run/containers/storage/overlay-containers/65c3a3d72e1cba3c83fc771a841564f690b47cc0f5012ce0acf16e2d9f8e3fe2/userdata/pidfile -n ovn_controller --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/65c3a3d72e1cba3c83fc771a841564f690b47cc0f5012ce0acf16e2d9f8e3fe2 --full-attach -s -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/65c3a3d72e1cba3c83fc771a841564f690b47cc0f5012ce0acf16e2d9f8e3fe2/userdata/oci-log --conmon-pidfile /run/ovn_controller.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg 65c3a3d72e1cba3c83fc771a841564f690b47cc0f5012ce0acf16e2d9f8e3fe2
           │ ├─edpm_ovn_metadata_agent.service
           │ │ └─162240 /usr/bin/conmon --api-version 1 -c dd5285a58cbe29a90687a00af14b934b599bb4de55df5857e4d7b7ac9f22feab -u dd5285a58cbe29a90687a00af14b934b599bb4de55df5857e4d7b7ac9f22feab -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/dd5285a58cbe29a90687a00af14b934b599bb4de55df5857e4d7b7ac9f22feab/userdata -p /run/containers/storage/overlay-containers/dd5285a58cbe29a90687a00af14b934b599bb4de55df5857e4d7b7ac9f22feab/userdata/pidfile -n ovn_metadata_agent --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/dd5285a58cbe29a90687a00af14b934b599bb4de55df5857e4d7b7ac9f22feab --full-attach -s -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/dd5285a58cbe29a90687a00af14b934b599bb4de55df5857e4d7b7ac9f22feab/userdata/oci-log --conmon-pidfile /run/ovn_metadata_agent.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg dd5285a58cbe29a90687a00af14b934b599bb4de55df5857e4d7b7ac9f22feab
           │ ├─gssproxy.service
           │ │ └─874 /usr/sbin/gssproxy -D
           │ ├─irqbalance.service
           │ │ └─777 /usr/sbin/irqbalance
           │ ├─ovs-vswitchd.service
           │ │ └─43265 ovs-vswitchd unix:/var/run/openvswitch/db.sock -vconsole:emer -vsyslog:err -vfile:info --mlockall --user openvswitch:hugetlbfs --no-chdir --log-file=/var/log/openvswitch/ovs-vswitchd.log --pidfile=/var/run/openvswitch/ovs-vswitchd.pid --detach
           │ ├─ovsdb-server.service
           │ │ └─43180 ovsdb-server /etc/openvswitch/conf.db -vconsole:emer -vsyslog:err -vfile:info --remote=punix:/var/run/openvswitch/db.sock --private-key=db:Open_vSwitch,SSL,private_key --certificate=db:Open_vSwitch,SSL,certificate --bootstrap-ca-cert=db:Open_vSwitch,SSL,ca_cert --user openvswitch:hugetlbfs --no-chdir --log-file=/var/log/openvswitch/ovsdb-server.log --pidfile=/var/run/openvswitch/ovsdb-server.pid --detach
           │ ├─polkit.service
           │ │ └─6205 /usr/lib/polkit-1/polkitd --no-debug
           │ ├─rpcbind.service
           │ │ └─698 /usr/bin/rpcbind -w -f
           │ ├─rsyslog.service
           │ │ └─1004 /usr/sbin/rsyslogd -n
           │ ├─sshd.service
           │ │ └─189518 "sshd: /usr/sbin/sshd -D [listener] 0 of 10-100 startups"
           │ ├─system-ceph\x2d92cfe4d4\x2d4917\x2d5be1\x2d9d00\x2d73758793a62b.slice
           │ │ ├─ceph-92cfe4d4-4917-5be1-9d00-73758793a62b@crash.compute-0.service
           │ │ │ ├─libpod-payload-90b5b5a031904a9fddf3ed90b02a240d3a0ebaaf670415137274ba4391b5f476
           │ │ │ │ ├─82392 /run/podman-init -- /usr/bin/ceph-crash -n client.crash.compute-0
           │ │ │ │ └─82394 /usr/bin/python3 -s /usr/bin/ceph-crash -n client.crash.compute-0
           │ │ │ └─runtime
           │ │ │   └─82390 /usr/bin/conmon --api-version 1 -c 90b5b5a031904a9fddf3ed90b02a240d3a0ebaaf670415137274ba4391b5f476 -u 90b5b5a031904a9fddf3ed90b02a240d3a0ebaaf670415137274ba4391b5f476 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/90b5b5a031904a9fddf3ed90b02a240d3a0ebaaf670415137274ba4391b5f476/userdata -p /run/containers/storage/overlay-containers/90b5b5a031904a9fddf3ed90b02a240d3a0ebaaf670415137274ba4391b5f476/userdata/pidfile -n ceph-92cfe4d4-4917-5be1-9d00-73758793a62b-crash-compute-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/90b5b5a031904a9fddf3ed90b02a240d3a0ebaaf670415137274ba4391b5f476 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/90b5b5a031904a9fddf3ed90b02a240d3a0ebaaf670415137274ba4391b5f476/userdata/oci-log --conmon-pidfile /run/ceph-92cfe4d4-4917-5be1-9d00-73758793a62b@crash.compute-0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 90b5b5a031904a9fddf3ed90b02a240d3a0ebaaf670415137274ba4391b5f476
           │ │ ├─ceph-92cfe4d4-4917-5be1-9d00-73758793a62b@mds.cephfs.compute-0.uxaxgb.service
           │ │ │ ├─libpod-payload-53426c3eb317616c2b70d50730a6965f375d00d0ca023da2460bbd9c696b45a6
           │ │ │ │ ├─101046 /run/podman-init -- /usr/bin/ceph-mds -n mds.cephfs.compute-0.uxaxgb -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
           │ │ │ │ └─101067 /usr/bin/ceph-mds -n mds.cephfs.compute-0.uxaxgb -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
           │ │ │ └─runtime
           │ │ │   └─101044 /usr/bin/conmon --api-version 1 -c 53426c3eb317616c2b70d50730a6965f375d00d0ca023da2460bbd9c696b45a6 -u 53426c3eb317616c2b70d50730a6965f375d00d0ca023da2460bbd9c696b45a6 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/53426c3eb317616c2b70d50730a6965f375d00d0ca023da2460bbd9c696b45a6/userdata -p /run/containers/storage/overlay-containers/53426c3eb317616c2b70d50730a6965f375d00d0ca023da2460bbd9c696b45a6/userdata/pidfile -n ceph-92cfe4d4-4917-5be1-9d00-73758793a62b-mds-cephfs-compute-0-uxaxgb --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/53426c3eb317616c2b70d50730a6965f375d00d0ca023da2460bbd9c696b45a6 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/53426c3eb317616c2b70d50730a6965f375d00d0ca023da2460bbd9c696b45a6/userdata/oci-log --conmon-pidfile /run/ceph-92cfe4d4-4917-5be1-9d00-73758793a62b@mds.cephfs.compute-0.uxaxgb.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 53426c3eb317616c2b70d50730a6965f375d00d0ca023da2460bbd9c696b45a6
           │ │ ├─ceph-92cfe4d4-4917-5be1-9d00-73758793a62b@mgr.compute-0.pdyrua.service
           │ │ │ ├─libpod-payload-5396d33f03d79fbf0e6626513c29ad41ed11bd6b7439ed7e048b771ff7bb44ba
           │ │ │ │ ├─74615 /run/podman-init -- /usr/bin/ceph-mgr -n mgr.compute-0.pdyrua -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
           │ │ │ │ └─74617 /usr/bin/ceph-mgr -n mgr.compute-0.pdyrua -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
           │ │ │ └─runtime
           │ │ │   └─74613 /usr/bin/conmon --api-version 1 -c 5396d33f03d79fbf0e6626513c29ad41ed11bd6b7439ed7e048b771ff7bb44ba -u 5396d33f03d79fbf0e6626513c29ad41ed11bd6b7439ed7e048b771ff7bb44ba -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/5396d33f03d79fbf0e6626513c29ad41ed11bd6b7439ed7e048b771ff7bb44ba/userdata -p /run/containers/storage/overlay-containers/5396d33f03d79fbf0e6626513c29ad41ed11bd6b7439ed7e048b771ff7bb44ba/userdata/pidfile -n ceph-92cfe4d4-4917-5be1-9d00-73758793a62b-mgr-compute-0-pdyrua --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/5396d33f03d79fbf0e6626513c29ad41ed11bd6b7439ed7e048b771ff7bb44ba --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/5396d33f03d79fbf0e6626513c29ad41ed11bd6b7439ed7e048b771ff7bb44ba/userdata/oci-log --conmon-pidfile /run/ceph-92cfe4d4-4917-5be1-9d00-73758793a62b@mgr.compute-0.pdyrua.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 5396d33f03d79fbf0e6626513c29ad41ed11bd6b7439ed7e048b771ff7bb44ba
           │ │ ├─ceph-92cfe4d4-4917-5be1-9d00-73758793a62b@mon.compute-0.service
           │ │ │ ├─libpod-payload-a848fe58749db588a5a4b8471e0c9916b9e4a1ccc899f04343e6491a43c45c05
           │ │ │ │ ├─74324 /run/podman-init -- /usr/bin/ceph-mon -n mon.compute-0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false
           │ │ │ │ └─74326 /usr/bin/ceph-mon -n mon.compute-0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false
           │ │ │ └─runtime
           │ │ │   └─74322 /usr/bin/conmon --api-version 1 -c a848fe58749db588a5a4b8471e0c9916b9e4a1ccc899f04343e6491a43c45c05 -u a848fe58749db588a5a4b8471e0c9916b9e4a1ccc899f04343e6491a43c45c05 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/a848fe58749db588a5a4b8471e0c9916b9e4a1ccc899f04343e6491a43c45c05/userdata -p /run/containers/storage/overlay-containers/a848fe58749db588a5a4b8471e0c9916b9e4a1ccc899f04343e6491a43c45c05/userdata/pidfile -n ceph-92cfe4d4-4917-5be1-9d00-73758793a62b-mon-compute-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/a848fe58749db588a5a4b8471e0c9916b9e4a1ccc899f04343e6491a43c45c05 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/a848fe58749db588a5a4b8471e0c9916b9e4a1ccc899f04343e6491a43c45c05/userdata/oci-log --conmon-pidfile /run/ceph-92cfe4d4-4917-5be1-9d00-73758793a62b@mon.compute-0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg a848fe58749db588a5a4b8471e0c9916b9e4a1ccc899f04343e6491a43c45c05
           │ │ ├─ceph-92cfe4d4-4917-5be1-9d00-73758793a62b@osd.0.service
           │ │ │ ├─libpod-payload-47f64e87e5871d7c071a121b83455d21f2bfdfce6b6c64a3ceca78155daa9205
           │ │ │ │ ├─88323 /run/podman-init -- /usr/bin/ceph-osd -n osd.0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
           │ │ │ │ └─88325 /usr/bin/ceph-osd -n osd.0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
           │ │ │ └─runtime
           │ │ │   └─88321 /usr/bin/conmon --api-version 1 -c 47f64e87e5871d7c071a121b83455d21f2bfdfce6b6c64a3ceca78155daa9205 -u 47f64e87e5871d7c071a121b83455d21f2bfdfce6b6c64a3ceca78155daa9205 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/47f64e87e5871d7c071a121b83455d21f2bfdfce6b6c64a3ceca78155daa9205/userdata -p /run/containers/storage/overlay-containers/47f64e87e5871d7c071a121b83455d21f2bfdfce6b6c64a3ceca78155daa9205/userdata/pidfile -n ceph-92cfe4d4-4917-5be1-9d00-73758793a62b-osd-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/47f64e87e5871d7c071a121b83455d21f2bfdfce6b6c64a3ceca78155daa9205 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/47f64e87e5871d7c071a121b83455d21f2bfdfce6b6c64a3ceca78155daa9205/userdata/oci-log --conmon-pidfile /run/ceph-92cfe4d4-4917-5be1-9d00-73758793a62b@osd.0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 47f64e87e5871d7c071a121b83455d21f2bfdfce6b6c64a3ceca78155daa9205
           │ │ ├─ceph-92cfe4d4-4917-5be1-9d00-73758793a62b@osd.1.service
           │ │ │ ├─libpod-payload-159562a3a15003ca855365b6b045a9e9a594d967d5f41c2a02d35e99d6a005e2
           │ │ │ │ ├─89399 /run/podman-init -- /usr/bin/ceph-osd -n osd.1 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
           │ │ │ │ └─89401 /usr/bin/ceph-osd -n osd.1 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
           │ │ │ └─runtime
           │ │ │   └─89397 /usr/bin/conmon --api-version 1 -c 159562a3a15003ca855365b6b045a9e9a594d967d5f41c2a02d35e99d6a005e2 -u 159562a3a15003ca855365b6b045a9e9a594d967d5f41c2a02d35e99d6a005e2 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/159562a3a15003ca855365b6b045a9e9a594d967d5f41c2a02d35e99d6a005e2/userdata -p /run/containers/storage/overlay-containers/159562a3a15003ca855365b6b045a9e9a594d967d5f41c2a02d35e99d6a005e2/userdata/pidfile -n ceph-92cfe4d4-4917-5be1-9d00-73758793a62b-osd-1 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/159562a3a15003ca855365b6b045a9e9a594d967d5f41c2a02d35e99d6a005e2 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/159562a3a15003ca855365b6b045a9e9a594d967d5f41c2a02d35e99d6a005e2/userdata/oci-log --conmon-pidfile /run/ceph-92cfe4d4-4917-5be1-9d00-73758793a62b@osd.1.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 159562a3a15003ca855365b6b045a9e9a594d967d5f41c2a02d35e99d6a005e2
           │ │ ├─ceph-92cfe4d4-4917-5be1-9d00-73758793a62b@osd.2.service
           │ │ │ ├─libpod-payload-1ea030e74696bd87ca10210c9afc9c6b7cb96088b5f077a47c4d25c6a9cd0784
           │ │ │ │ ├─90441 /run/podman-init -- /usr/bin/ceph-osd -n osd.2 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
           │ │ │ │ └─90443 /usr/bin/ceph-osd -n osd.2 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
           │ │ │ └─runtime
           │ │ │   └─90439 /usr/bin/conmon --api-version 1 -c 1ea030e74696bd87ca10210c9afc9c6b7cb96088b5f077a47c4d25c6a9cd0784 -u 1ea030e74696bd87ca10210c9afc9c6b7cb96088b5f077a47c4d25c6a9cd0784 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/1ea030e74696bd87ca10210c9afc9c6b7cb96088b5f077a47c4d25c6a9cd0784/userdata -p /run/containers/storage/overlay-containers/1ea030e74696bd87ca10210c9afc9c6b7cb96088b5f077a47c4d25c6a9cd0784/userdata/pidfile -n ceph-92cfe4d4-4917-5be1-9d00-73758793a62b-osd-2 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/1ea030e74696bd87ca10210c9afc9c6b7cb96088b5f077a47c4d25c6a9cd0784 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/1ea030e74696bd87ca10210c9afc9c6b7cb96088b5f077a47c4d25c6a9cd0784/userdata/oci-log --conmon-pidfile /run/ceph-92cfe4d4-4917-5be1-9d00-73758793a62b@osd.2.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 1ea030e74696bd87ca10210c9afc9c6b7cb96088b5f077a47c4d25c6a9cd0784
           │ │ └─ceph-92cfe4d4-4917-5be1-9d00-73758793a62b@rgw.rgw.compute-0.bqunnq.service
           │ │   ├─libpod-payload-25ff1f079ca6d5d68281bf95dc2311e742798de4e5895710535ed7a1e9682f60
           │ │   │ ├─100587 /run/podman-init -- /usr/bin/radosgw -n client.rgw.rgw.compute-0.bqunnq -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
           │ │   │ └─100589 /usr/bin/radosgw -n client.rgw.rgw.compute-0.bqunnq -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
           │ │   └─runtime
           │ │     └─100585 /usr/bin/conmon --api-version 1 -c 25ff1f079ca6d5d68281bf95dc2311e742798de4e5895710535ed7a1e9682f60 -u 25ff1f079ca6d5d68281bf95dc2311e742798de4e5895710535ed7a1e9682f60 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/25ff1f079ca6d5d68281bf95dc2311e742798de4e5895710535ed7a1e9682f60/userdata -p /run/containers/storage/overlay-containers/25ff1f079ca6d5d68281bf95dc2311e742798de4e5895710535ed7a1e9682f60/userdata/pidfile -n ceph-92cfe4d4-4917-5be1-9d00-73758793a62b-rgw-rgw-compute-0-bqunnq --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/25ff1f079ca6d5d68281bf95dc2311e742798de4e5895710535ed7a1e9682f60 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/25ff1f079ca6d5d68281bf95dc2311e742798de4e5895710535ed7a1e9682f60/userdata/oci-log --conmon-pidfile /run/ceph-92cfe4d4-4917-5be1-9d00-73758793a62b@rgw.rgw.compute-0.bqunnq.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 25ff1f079ca6d5d68281bf95dc2311e742798de4e5895710535ed7a1e9682f60
           │ ├─system-getty.slice
           │ │ └─getty@tty1.service
           │ │   └─1008 /sbin/agetty -o "-p -- \\u" --noclear - linux
           │ ├─system-serial\x2dgetty.slice
           │ │ └─serial-getty@ttyS0.service
           │ │   └─1009 /sbin/agetty -o "-p -- \\u" --keep-baud 115200,57600,38400,9600 - vt220
           │ ├─systemd-hostnamed.service
           │ │ └─313130 /usr/lib/systemd/systemd-hostnamed
           │ ├─systemd-journald.service
           │ │ └─674 /usr/lib/systemd/systemd-journald
           │ ├─systemd-logind.service
           │ │ └─782 /usr/lib/systemd/systemd-logind
           │ ├─systemd-machined.service
           │ │ └─215071 /usr/lib/systemd/systemd-machined
           │ ├─systemd-udevd.service
           │ │ └─udev
           │ │   └─729 /usr/lib/systemd/systemd-udevd
           │ ├─tuned.service
           │ │ └─112210 /usr/bin/python3 -Es /usr/sbin/tuned -l -P
           │ ├─virtlogd.service
           │ │ └─214444 /usr/sbin/virtlogd
           │ ├─virtnodedevd.service
           │ │ └─260415 /usr/sbin/virtnodedevd --timeout 120
           │ ├─virtqemud.service
           │ │ └─259861 /usr/sbin/virtqemud --timeout 120
           │ └─virtsecretd.service
           │   └─267673 /usr/sbin/virtsecretd --timeout 120
           └─user.slice
             ├─user-1000.slice
             │ ├─session-1.scope
             │ │ └─1267 /usr/bin/python3
             │ ├─session-53.scope
             │ │ ├─308926 "sshd-session: zuul [priv]"
             │ │ ├─308929 "sshd-session: zuul@notty"
             │ │ ├─308930 sudo bash -c "rm -rf /var/tmp/sos-osp && mkdir /var/tmp/sos-osp && sos report --batch --all-logs --tmp-dir=/var/tmp/sos-osp -p container,openstack_edpm,system,storage,virt"
             │ │ ├─308954 /usr/bin/python3 -s /sbin/sos report --batch --all-logs --tmp-dir=/var/tmp/sos-osp -p container,openstack_edpm,system,storage,virt
             │ │ ├─315707 timeout 15s turbostat --debug sleep 10
             │ │ ├─316407 timeout 300s systemctl status --all
             │ │ ├─316408 systemctl status --all
             │ │ ├─316409 timeout 300s semanage interface -l
             │ │ ├─316411 /usr/bin/python3 -EsI /usr/sbin/semanage interface -l
             │ │ └─316412 timeout --foreground 300s virsh -r nodedev-dumpxml block_vda
             │ └─user@1000.service
             │   ├─app.slice
             │   │ └─dbus-broker.service
             │   │   ├─9030 /usr/bin/dbus-broker-launch --scope user
             │   │   └─9042 dbus-broker --log 4 --controller 9 --machine-id a1727ec20198bc6caf436a6e13c4ff5e --max-bytes 100000000000000 --max-fds 25000000000000 --max-matches 5000000000
             │   ├─init.scope
             │   │ ├─1057 /usr/lib/systemd/systemd --user
             │   │ └─1059 "(sd-pam)"
             │   └─user.slice
             │     └─podman-pause-31202ca3.scope
             │       └─8928 catatonit -P
             └─user-42477.slice
               ├─session-21.scope
               │ ├─75886 "sshd-session: ceph-admin [priv]"
               │ └─75908 "sshd-session: ceph-admin"
               ├─session-23.scope
               │ ├─75893 "sshd-session: ceph-admin [priv]"
               │ └─75909 "sshd-session: ceph-admin@notty"
               ├─session-24.scope
               │ ├─75960 "sshd-session: ceph-admin [priv]"
               │ └─75963 "sshd-session: ceph-admin@notty"
               ├─session-25.scope
               │ ├─76014 "sshd-session: ceph-admin [priv]"
               │ └─76017 "sshd-session: ceph-admin@notty"
               ├─session-26.scope
               │ ├─76068 "sshd-session: ceph-admin [priv]"
               │ └─76071 "sshd-session: ceph-admin@notty"
               ├─session-27.scope
               │ ├─76122 "sshd-session: ceph-admin [priv]"
               │ └─76125 "sshd-session: ceph-admin@notty"
               ├─session-28.scope
               │ ├─76176 "sshd-session: ceph-admin [priv]"
               │ └─76179 "sshd-session: ceph-admin@notty"
               ├─session-29.scope
               │ ├─76230 "sshd-session: ceph-admin [priv]"
               │ └─76233 "sshd-session: ceph-admin@notty"
               ├─session-30.scope
               │ ├─76284 "sshd-session: ceph-admin [priv]"
               │ └─76287 "sshd-session: ceph-admin@notty"
               ├─session-31.scope
               │ ├─76338 "sshd-session: ceph-admin [priv]"
               │ └─76341 "sshd-session: ceph-admin@notty"
               ├─session-32.scope
               │ ├─76365 "sshd-session: ceph-admin [priv]"
               │ └─76368 "sshd-session: ceph-admin@notty"
               ├─session-33.scope
               │ ├─76419 "sshd-session: ceph-admin [priv]"
               │ └─76422 "sshd-session: ceph-admin@notty"
               └─user@42477.service
                 └─init.sUnit boot.automount could not be found.
cope
                   ├─75890 /usr/lib/systemd/systemd --user
                   └─75892 "(sd-pam)"

● proc-sys-fs-binfmt_misc.automount - Arbitrary Executable File Formats File System Automount Point
     Loaded: loaded (/usr/lib/systemd/system/proc-sys-fs-binfmt_misc.automount; static)
     Active: active (running) since Sat 2025-10-11 02:42:17 UTC; 1h 28min ago
      Until: Sat 2025-10-11 02:42:17 UTC; 1h 28min ago
   Triggers: ● proc-sys-fs-binfmt_misc.mount
      Where: /proc/sys/fs/binfmt_misc
       Docs: https://docs.kernel.org/admin-guide/binfmt-misc.html
             https://www.freedesktop.org/wiki/Software/systemd/APIFileSystems

Oct 11 03:19:42 compute-0 systemd[1]: proc-sys-fs-binfmt_misc.automount: Got automount request for /proc/sys/fs/binfmt_misc, triggered by 77323 (sysctl)

● dev-cdrom.device - QEMU_DVD-ROM config-2
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:01.1-ata1-host0-target0:0:0-0:0:0:0-block-sr0.device
     Loaded: loaded
     Active: active (plugged) since Sat 2025-10-11 02:42:18 UTC; 1h 28min ago
      Until: Sat 2025-10-11 02:42:18 UTC; 1h 28min ago
     Device: /sys/devices/pci0000:00/0000:00:01.1/ata1/host0/target0:0:0/0:0:0:0/block/sr0

● dev-ceph_vg0-ceph_lv0.device - /dev/ceph_vg0/ceph_lv0
    Follows: unit currently follows state of sys-devices-virtual-block-dm\x2d0.device
     Loaded: loaded
     Active: active (plugged) since Sat 2025-10-11 03:17:48 UTC; 52min ago
      Until: Sat 2025-10-11 03:17:48 UTC; 52min ago
     Device: /sys/devices/virtual/block/dm-0

● dev-ceph_vg1-ceph_lv1.device - /dev/ceph_vg1/ceph_lv1
    Follows: unit currently follows state of sys-devices-virtual-block-dm\x2d1.device
     Loaded: loaded
     Active: active (plugged) since Sat 2025-10-11 03:17:53 UTC; 52min ago
      Until: Sat 2025-10-11 03:17:53 UTC; 52min ago
     Device: /sys/devices/virtual/block/dm-1

● dev-ceph_vg2-ceph_lv2.device - /dev/ceph_vg2/ceph_lv2
    Follows: unit currently follows state of sys-devices-virtual-block-dm\x2d2.device
     Loaded: loaded
     Active: active (plugged) since Sat 2025-10-11 03:17:58 UTC; 52min ago
      Until: Sat 2025-10-11 03:17:58 UTC; 52min ago
     Device: /sys/devices/virtual/block/dm-2

● dev-disk-by\x2ddiskseq-1.device - /dev/disk/by-diskseq/1
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:04.0-virtio2-block-vda.device
     Loaded: loaded
     Active: active (plugged) since Sat 2025-10-11 02:42:18 UTC; 1h 28min ago
      Until: Sat 2025-10-11 02:42:18 UTC; 1h 28min ago
     Device: /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda

● dev-disk-by\x2ddiskseq-11.device - /dev/disk/by-diskseq/11
    Follows: unit currently follows state of sys-devices-virtual-block-loop5.device
     Loaded: loaded
     Active: active (plugged) since Sat 2025-10-11 03:17:58 UTC; 52min ago
      Until: Sat 2025-10-11 03:17:58 UTC; 52min ago
     Device: /sys/devices/virtual/block/loop5

● dev-disk-by\x2ddiskseq-12.device - /dev/disk/by-diskseq/12
    Follows: unit currently follows state of sys-devices-virtual-block-dm\x2d2.device
     Loaded: loaded
     Active: active (plugged) since Sat 2025-10-11 03:17:58 UTC; 52min ago
      Until: Sat 2025-10-11 03:17:58 UTC; 52min ago
     Device: /sys/devices/virtual/block/dm-2

● dev-disk-by\x2ddiskseq-3.device - QEMU_DVD-ROM config-2
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:01.1-ata1-host0-target0:0:0-0:0:0:0-block-sr0.device
     Loaded: loaded
     Active: active (plugged) since Sat 2025-10-11 02:42:18 UTC; 1h 28min ago
      Until: Sat 2025-10-11 02:42:18 UTC; 1h 28min ago
     Device: /sys/devices/pci0000:00/0000:00:01.1/ata1/host0/target0:0:0/0:0:0:0/block/sr0

● dev-disk-by\x2ddiskseq-5.device - /dev/disk/by-diskseq/5
    Follows: unit currently follows state of sys-devices-virtual-block-loop3.device
     Loaded: loaded
     Active: active (plugged) since Sat 2025-10-11 03:17:48 UTC; 52min ago
      Until: Sat 2025-10-11 03:17:48 UTC; 52min ago
     Device: /sys/devices/virtual/block/loop3

● dev-disk-by\x2ddiskseq-6.device - /dev/disk/by-diskseq/6
    Follows: unit currently follows state of sys-devices-virtual-block-dm\x2d0.device
     Loaded: loaded
     Active: active (plugged) since Sat 2025-10-11 03:17:48 UTC; 52min ago
      Until: Sat 2025-10-11 03:17:48 UTC; 52min ago
     Device: /sys/devices/virtual/block/dm-0

● dev-disk-by\x2ddiskseq-8.device - /dev/disk/by-diskseq/8
    Follows: unit currently follows state of sys-devices-virtual-block-loop4.device
     Loaded: loaded
     Active: active (plugged) since Sat 2025-10-11 03:17:53 UTC; 52min ago
      Until: Sat 2025-10-11 03:17:53 UTC; 52min ago
     Device: /sys/devices/virtual/block/loop4

● dev-disk-by\x2ddiskseq-9.device - /dev/disk/by-diskseq/9
    Follows: unit currently follows state of sys-devices-virtual-block-dm\x2d1.device
     Loaded: loaded
     Active: active (plugged) since Sat 2025-10-11 03:17:53 UTC; 52min ago
      Until: Sat 2025-10-11 03:17:53 UTC; 52min ago
     Device: /sys/devices/virtual/block/dm-1

● dev-disk-by\x2did-ata\x2dQEMU_DVD\x2dROM_QM00001.device - QEMU_DVD-ROM config-2
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:01.1-ata1-host0-target0:0:0-0:0:0:0-block-sr0.device
     Loaded: loaded
     Active: active (plugged) since Sat 2025-10-11 02:42:18 UTC; 1h 28min ago
      Until: Sat 2025-10-11 02:42:18 UTC; 1h 28min ago
     Device: /sys/devices/pci0000:00/0000:00:01.1/ata1/host0/target0:0:0/0:0:0:0/block/sr0

● dev-disk-by\x2did-dm\x2dname\x2dceph_vg0\x2dceph_lv0.device - /dev/disk/by-id/dm-name-ceph_vg0-ceph_lv0
    Follows: unit currently follows state of sys-devices-virtual-block-dm\x2d0.device
     Loaded: loaded
     Active: active (plugged) since Sat 2025-10-11 03:17:48 UTC; 52min ago
      Until: Sat 2025-10-11 03:17:48 UTC; 52min ago
     Device: /sys/devices/virtual/block/dm-0

● dev-disk-by\x2did-dm\x2dname\x2dceph_vg1\x2dceph_lv1.device - /dev/disk/by-id/dm-name-ceph_vg1-ceph_lv1
    Follows: unit currently follows state of sys-devices-virtual-block-dm\x2d1.device
     Loaded: loaded
     Active: active (plugged) since Sat 2025-10-11 03:17:53 UTC; 52min ago
      Until: Sat 2025-10-11 03:17:53 UTC; 52min ago
     Device: /sys/devices/virtual/block/dm-1

● dev-disk-by\x2did-dm\x2dname\x2dceph_vg2\x2dceph_lv2.device - /dev/disk/by-id/dm-name-ceph_vg2-ceph_lv2
    Follows: unit currently follows state of sys-devices-virtual-block-dm\x2d2.device
     Loaded: loaded
     Active: active (plugged) since Sat 2025-10-11 03:17:58 UTC; 52min ago
      Until: Sat 2025-10-11 03:17:58 UTC; 52min ago
     Device: /sys/devices/virtual/block/dm-2

● dev-disk-by\x2did-dm\x2duuid\x2dLVM\x2dbLn778ASNIB3CaZQr8dFfz38Vm2s8FbeTkjgGDB7rJDOqO7Nrb56OYGrPKLhMT7w.device - /dev/disk/by-id/dm-uuid-LVM-bLn778ASNIB3CaZQr8dFfz38Vm2s8FbeTkjgGDB7rJDOqO7Nrb56OYGrPKLhMT7w
    Follows: unit currently follows state of sys-devices-virtual-block-dm\x2d0.device
     Loaded: loaded
     Active: active (plugged) since Sat 2025-10-11 03:17:48 UTC; 52min ago
      Until: Sat 2025-10-11 03:17:48 UTC; 52min ago
     Device: /sys/devices/virtual/block/dm-0

● dev-disk-by\x2did-dm\x2duuid\x2dLVM\x2dBRXqrKXirUtXUef0x8F1gxdt6gFwAcJlGduJypMYzkOG0E1pmcDFS8D4MGC6qmeN.device - /dev/disk/by-id/dm-uuid-LVM-BRXqrKXirUtXUef0x8F1gxdt6gFwAcJlGduJypMYzkOG0E1pmcDFS8D4MGC6qmeN
    Follows: unit currently follows state of sys-devices-virtual-block-dm\x2d2.device
     Loaded: loaded
     Active: active (plugged) since Sat 2025-10-11 03:17:58 UTC; 52min ago
      Until: Sat 2025-10-11 03:17:58 UTC; 52min ago
     Device: /sys/devices/virtual/block/dm-2

● dev-disk-by\x2did-dm\x2duuid\x2dLVM\x2ddeYVRwzG5Y7n2S2AocQFdwieuzBnRMEakdDBXaHGt0oIJ28SFk0yLWXlpXTisLKy.device - /dev/disk/by-id/dm-uuid-LVM-deYVRwzG5Y7n2S2AocQFdwieuzBnRMEakdDBXaHGt0oIJ28SFk0yLWXlpXTisLKy
    Follows: unit currently follows state of sys-devices-virtual-block-dm\x2d1.device
     Loaded: loaded
     Active: active (plugged) since Sat 2025-10-11 03:17:53 UTC; 52min ago
      Until: Sat 2025-10-11 03:17:53 UTC; 52min ago
     Device: /sys/devices/virtual/block/dm-1

● dev-disk-by\x2did-lvm\x2dpv\x2duuid\x2d6epEVN\x2dancT\x2dxKoc\x2dyqxR\x2drxEc\x2dyIpG\x2dWEBFtN.device - /dev/disk/by-id/lvm-pv-uuid-6epEVN-ancT-xKoc-yqxR-rxEc-yIpG-WEBFtN
    Follows: unit currently follows state of sys-devices-virtual-block-loop5.device
     Loaded: loaded
     Active: active (plugged) since Sat 2025-10-11 03:17:58 UTC; 52min ago
      Until: Sat 2025-10-11 03:17:58 UTC; 52min ago
     Device: /sys/devices/virtual/block/loop5

● dev-disk-by\x2did-lvm\x2dpv\x2duuid\x2dglcYbb\x2dndRV\x2d6zlo\x2d87hu\x2dCGMA\x2duvEC\x2dchY2vk.device - /dev/disk/by-id/lvm-pv-uuid-glcYbb-ndRV-6zlo-87hu-CGMA-uvEC-chY2vk
    Follows: unit currently follows state of sys-devices-virtual-block-loop4.device
     Loaded: loaded
     Active: active (plugged) since Sat 2025-10-11 03:17:53 UTC; 52min ago
      Until: Sat 2025-10-11 03:17:53 UTC; 52min ago
     Device: /sys/devices/virtual/block/loop4

● dev-disk-by\x2did-lvm\x2dpv\x2duuid\x2dglgwx5\x2dUapw\x2dkSpZ\x2dsRqR\x2d8tb0\x2dQT5x\x2dv8w43U.device - /dev/disk/by-id/lvm-pv-uuid-glgwx5-Uapw-kSpZ-sRqR-8tb0-QT5x-v8w43U
    Follows: unit currently follows state of sys-devices-virtual-block-loop3.device
     Loaded: loaded
     Active: active (plugged) since Sat 2025-10-11 03:17:48 UTC; 52min ago
      Until: Sat 2025-10-11 03:17:48 UTC; 52min ago
     Device: /sys/devices/virtual/block/loop3

● dev-disk-by\x2dlabel-config\x2d2.device - QEMU_DVD-ROM config-2
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:01.1-ata1-host0-target0:0:0-0:0:0:0-block-sr0.device
     Loaded: loaded
     Active: active (plugged) since Sat 2025-10-11 02:42:18 UTC; 1h 28min ago
      Until: Sat 2025-10-11 02:42:18 UTC; 1h 28min ago
     Device: /sys/devices/pci0000:00/0000:00:01.1/ata1/host0/target0:0:0/0:0:0:0/block/sr0

● dev-disk-by\x2dpartuuid-67192416\x2d01.device - /dev/disk/by-partuuid/67192416-01
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:04.0-virtio2-block-vda-vda1.device
     Loaded: loaded
     Active: active (plugged) since Sat 2025-10-11 02:42:18 UTC; 1h 28min ago
      Until: Sat 2025-10-11 02:42:18 UTC; 1h 28min ago
     Device: /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda/vda1

● dev-disk-by\x2dpath-pci\x2d0000:00:01.1\x2data\x2d1.0.device - QEMU_DVD-ROM config-2
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:01.1-ata1-host0-target0:0:0-0:0:0:0-block-sr0.device
     Loaded: loaded
     Active: active (plugged) since Sat 2025-10-11 02:42:18 UTC; 1h 28min ago
      Until: Sat 2025-10-11 02:42:18 UTC; 1h 28min ago
     Device: /sys/devices/pci0000:00/0000:00:01.1/ata1/host0/target0:0:0/0:0:0:0/block/sr0

● dev-disk-by\x2dpath-pci\x2d0000:00:01.1\x2data\x2d1.device - QEMU_DVD-ROM config-2
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:01.1-ata1-host0-target0:0:0-0:0:0:0-block-sr0.device
     Loaded: loaded
     Active: active (plugged) since Sat 2025-10-11 02:42:18 UTC; 1h 28min ago
      Until: Sat 2025-10-11 02:42:18 UTC; 1h 28min ago
     Device: /sys/devices/pci0000:00/0000:00:01.1/ata1/host0/target0:0:0/0:0:0:0/block/sr0

● dev-disk-by\x2dpath-pci\x2d0000:00:04.0.device - /dev/disk/by-path/pci-0000:00:04.0
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:04.0-virtio2-block-vda.device
     Loaded: loaded
     Active: active (plugged) since Sat 2025-10-11 02:42:18 UTC; 1h 28min ago
      Until: Sat 2025-10-11 02:42:18 UTC; 1h 28min ago
     Device: /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda

● dev-disk-by\x2dpath-pci\x2d0000:00:04.0\x2dpart1.device - /dev/disk/by-path/pci-0000:00:04.0-part1
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:04.0-virtio2-block-vda-vda1.device
     Loaded: loaded
     Active: active (plugged) since Sat 2025-10-11 02:42:18 UTC; 1h 28min ago
      Until: Sat 2025-10-11 02:42:18 UTC; 1h 28min ago
     Device: /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda/vda1

● dev-disk-by\x2dpath-virtio\x2dpci\x2d0000:00:04.0.device - /dev/disk/by-path/virtio-pci-0000:00:04.0
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:04.0-virtio2-block-vda.device
     Loaded: loaded
     Active: active (plugged) since Sat 2025-10-11 02:42:18 UTC; 1h 28min ago
      Until: Sat 2025-10-11 02:42:18 UTC; 1h 28min ago
     Device: /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda

● dev-disk-by\x2dpath-virtio\x2dpci\x2d0000:00:04.0\x2dpart1.device - /dev/disk/by-path/virtio-pci-0000:00:04.0-part1
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:04.0-virtio2-block-vda-vda1.device
     Loaded: loaded
     Active: active (plugged) since Sat 2025-10-11 02:42:18 UTC; 1h 28min ago
      Until: Sat 2025-10-11 02:42:18 UTC; 1h 28min ago
     Device: /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda/vda1

● dev-disk-by\x2duuid-2025\x2d10\x2d11\x2d02\x2d42\x2d03\x2d00.device - QEMU_DVD-ROM config-2
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:01.1-ata1-host0-target0:0:0-0:0:0:0-block-sr0.device
     Loaded: loaded
     Active: active (plugged) since Sat 2025-10-11 02:42:18 UTC; 1h 28min ago
      Until: Sat 2025-10-11 02:42:18 UTC; 1h 28min ago
     Device: /sys/devices/pci0000:00/0000:00:01.1/ata1/host0/target0:0:0/0:0:0:0/block/sr0

● dev-disk-by\x2duuid-9839e2e1\x2d98a2\x2d4594\x2db609\x2d79d514deb0a3.device - /dev/disk/by-uuid/9839e2e1-98a2-4594-b609-79d514deb0a3
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:04.0-virtio2-block-vda-vda1.device
     Loaded: loaded
     Active: active (plugged) since Sat 2025-10-11 02:42:18 UTC; 1h 28min ago
      Until: Sat 2025-10-11 02:42:18 UTC; 1h 28min ago
     Device: /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda/vda1

Oct 11 02:42:15 localhost systemd[1]: Found device /dev/disk/by-uuid/9839e2e1-98a2-4594-b609-79d514deb0a3.

● dev-dm\x2d0.device - /dev/dm-0
    Follows: unit currently follows state of sys-devices-virtual-block-dm\x2d0.device
     Loaded: loaded
     Active: active (plugged) since Sat 2025-10-11 03:17:48 UTC; 52min ago
      Until: Sat 2025-10-11 03:17:48 UTC; 52min ago
     Device: /sys/devices/virtual/block/dm-0

● dev-dm\x2d1.device - /dev/dm-1
    Follows: unit currently follows state of sys-devices-virtual-block-dm\x2d1.device
     Loaded: loaded
     Active: active (plugged) since Sat 2025-10-11 03:17:53 UTC; 52min ago
      Until: Sat 2025-10-11 03:17:53 UTC; 52min ago
     Device: /sys/devices/virtual/block/dm-1

● dev-dm\x2d2.device - /dev/dm-2
    Follows: unit currently follows state of sys-devices-virtual-block-dm\x2d2.device
     Loaded: loaded
     Active: active (plugged) since Sat 2025-10-11 03:17:58 UTC; 52min ago
      Until: Sat 2025-10-11 03:17:58 UTC; 52min ago
     Device: /sys/devices/virtual/block/dm-2

● dev-loop3.device - /dev/loop3
    Follows: unit currently follows state of sys-devices-virtual-block-loop3.device
     Loaded: loaded
     Active: active (plugged) since Sat 2025-10-11 03:17:47 UTC; 52min ago
      Until: Sat 2025-10-11 03:17:47 UTC; 52min ago
     Device: /sys/devices/virtual/block/loop3

● dev-loop4.device - /dev/loop4
    Follows: unit currently follows state of sys-devices-virtual-block-loop4.device
     Loaded: loaded
     Active: active (plugged) since Sat 2025-10-11 03:17:53 UTC; 52min ago
      Until: Sat 2025-10-11 03:17:53 UTC; 52min ago
     Device: /sys/devices/virtual/block/loop4

● dev-loop5.device - /dev/loop5
    Follows: unit currently follows state of sys-devices-virtual-block-loop5.device
     Loaded: loaded
     Active: active (plugged) since Sat 2025-10-11 03:17:57 UTC; 52min ago
      Until: Sat 2025-10-11 03:17:57 UTC; 52min ago
     Device: /sys/devices/virtual/block/loop5

● dev-mapper-ceph_vg0\x2dceph_lv0.device - /dev/mapper/ceph_vg0-ceph_lv0
    Follows: unit currently follows state of sys-devices-virtual-block-dm\x2d0.device
     Loaded: loaded
     Active: active (plugged) since Sat 2025-10-11 03:17:48 UTC; 52min ago
      Until: Sat 2025-10-11 03:17:48 UTC; 52min ago
     Device: /sys/devices/virtual/block/dm-0

● dev-mapper-ceph_vg1\x2dceph_lv1.device - /dev/mapper/ceph_vg1-ceph_lv1
    Follows: unit currently follows state of sys-devices-virtual-block-dm\x2d1.device
     Loaded: loaded
     Active: active (plugged) since Sat 2025-10-11 03:17:53 UTC; 52min ago
      Until: Sat 2025-10-11 03:17:53 UTC; 52min ago
     Device: /sys/devices/virtual/block/dm-1

● dev-mapper-ceph_vg2\x2dceph_lv2.device - /dev/mapper/ceph_vg2-ceph_lv2
    Follows: unit currently follows state of sys-devices-virtual-block-dm\x2d2.device
     Loaded: loaded
     Active: active (plugged) since Sat 2025-10-11 03:17:58 UTC; 52min ago
      Until: Sat 2025-10-11 03:17:58 UTC; 52min ago
     Device: /sys/devices/virtual/block/dm-2

● dev-rfkill.device - /dev/rfkill
    Follows: unit currently follows state of sys-devices-virtual-misc-rfkill.device
     Loaded: loaded
     Active: active (plugged) since Sat 2025-10-11 02:42:19 UTC; 1h 28min ago
      Until: Sat 2025-10-11 02:42:19 UTC; 1h 28min ago
     Device: /sys/devices/virtual/misc/rfkill

● dev-sr0.device - QEMU_DVD-ROM config-2
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:01.1-ata1-host0-target0:0:0-0:0:0:0-block-sr0.device
     Loaded: loaded
     Active: active (plugged) since Sat 2025-10-11 02:42:18 UTC; 1h 28min ago
      Until: Sat 2025-10-11 02:42:18 UTC; 1h 28min ago
     Device: /sys/devices/pci0000:00/0000:00:01.1/ata1/host0/target0:0:0/0:0:0:0/block/sr0

● dev-ttyS0.device - /dev/ttyS0
    Follows: unit currently follows state of sys-devices-pnp0-00:00-tty-ttyS0.device
     Loaded: loaded
     Active: active (plugged) since Sat 2025-10-11 02:42:18 UTC; 1h 28min ago
      Until: Sat 2025-10-11 02:42:18 UTC; 1h 28min ago
     Device: /sys/devices/pnp0/00:00/tty/ttyS0

Oct 11 02:42:18 localhost systemd[1]: Condition check resulted in /dev/ttyS0 being skipped.

● dev-ttyS1.device - /dev/ttyS1
    Follows: unit currently follows state of sys-devices-platform-serial8250-tty-ttyS1.device
     Loaded: loaded
     Active: active (plugged) since Sat 2025-10-11 02:42:18 UTC; 1h 28min ago
      Until: Sat 2025-10-11 02:42:18 UTC; 1h 28min ago
     Device: /sys/devices/platform/serial8250/tty/ttyS1

● dev-ttyS2.device - /dev/ttyS2
    Follows: unit currently follows state of sys-devices-platform-serial8250-tty-ttyS2.device
     Loaded: loaded
     Active: active (plugged) since Sat 2025-10-11 02:42:18 UTC; 1h 28min ago
      Until: Sat 2025-10-11 02:42:18 UTC; 1h 28min ago
     Device: /sys/devices/platform/serial8250/tty/ttyS2

● dev-ttyS3.device - /dev/ttyS3
    Follows: unit currently follows state of sys-devices-platform-serial8250-tty-ttyS3.device
     Loaded: loaded
     Active: active (plugged) since Sat 2025-10-11 02:42:18 UTC; 1h 28min ago
      Until: Sat 2025-10-11 02:42:18 UTC; 1h 28min ago
     Device: /sys/devices/platform/serial8250/tty/ttyS3

● dev-vda.device - /dev/vda
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:04.0-virtio2-block-vda.device
     Loaded: loaded
     Active: active (plugged) since Sat 2025-10-11 02:42:18 UTC; 1h 28min ago
      Until: Sat 2025-10-11 02:42:18 UTC; 1h 28min ago
     Device: /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda

● dev-vda1.device - /dev/vda1
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:04.0-virtio2-block-vda-vda1.device
     Loaded: loaded
     Active: active (plugged) since Sat 2025-10-11 02:42:18 UTC; 1h 28min ago
      Until: Sat 2025-10-11 02:42:18 UTC; 1h 28min ago
     Device: /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda/vda1

● sys-devices-pci0000:00-0000:00:01.1-ata1-host0-target0:0:0-0:0:0:0-block-sr0.device - QEMU_DVD-ROM config-2
     Loaded: loaded
     Active: active (plugged) since Sat 2025-10-11 02:42:18 UTC; 1h 28min ago
      Until: Sat 2025-10-11 02:42:18 UTC; 1h 28min ago
     Device: /sys/devices/pci0000:00/0000:00:01.1/ata1/host0/target0:0:0/0:0:0:0/block/sr0

● sys-devices-pci0000:00-0000:00:03.0-virtio1-net-eth0.device - Virtio network device
     Loaded: loaded
     Active: active (plugged) since Sat 2025-10-11 02:42:18 UTC; 1h 28min ago
      Until: Sat 2025-10-11 02:42:18 UTC; 1h 28min ago
     Device: /sys/devices/pci0000:00/0000:00:03.0/virtio1/net/eth0

● sys-devices-pci0000:00-0000:00:04.0-virtio2-block-vda-vda1.device - /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda/vda1
     Loaded: loaded
     Active: active (plugged) since Sat 2025-10-11 02:42:18 UTC; 1h 28min ago
      Until: Sat 2025-10-11 02:42:18 UTC; 1h 28min ago
     Device: /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda/vda1

● sys-devices-pci0000:00-0000:00:04.0-virtio2-block-vda.device - /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda
     Loaded: loaded
     Active: active (plugged) since Sat 2025-10-11 02:42:18 UTC; 1h 28min ago
      Until: Sat 2025-10-11 02:42:18 UTC; 1h 28min ago
     Device: /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda

● sys-devices-pci0000:00-0000:00:07.0-virtio5-net-eth1.device - Virtio network device
     Loaded: loaded
     Active: active (plugged) since Sat 2025-10-11 02:44:15 UTC; 1h 26min ago
      Until: Sat 2025-10-11 02:44:15 UTC; 1h 26min ago
     Device: /sys/devices/pci0000:00/0000:00:07.0/virtio5/net/eth1

● sys-devices-platform-serial8250-tty-ttyS1.device - /sys/devices/platform/serial8250/tty/ttyS1
     Loaded: loaded
     Active: active (plugged) since Sat 2025-10-11 02:42:18 UTC; 1h 28min ago
      Until: Sat 2025-10-11 02:42:18 UTC; 1h 28min ago
     Device: /sys/devices/platform/serial8250/tty/ttyS1

● sys-devices-platform-serial8250-tty-ttyS2.device - /sys/devices/platform/serial8250/tty/ttyS2
     Loaded: loaded
     Active: active (plugged) since Sat 2025-10-11 02:42:18 UTC; 1h 28min ago
      Until: Sat 2025-10-11 02:42:18 UTC; 1h 28min ago
     Device: /sys/devices/platform/serial8250/tty/ttyS2

● sys-devices-platform-serial8250-tty-ttyS3.device - /sys/devices/platform/serial8250/tty/ttyS3
     Loaded: loaded
     Active: active (plugged) since Sat 2025-10-11 02:42:18 UTC; 1h 28min ago
      Until: Sat 2025-10-11 02:42:18 UTC; 1h 28min ago
     Device: /sys/devices/platform/serial8250/tty/ttyS3

● sys-devices-pnp0-00:00-tty-ttyS0.device - /sys/devices/pnp0/00:00/tty/ttyS0
     Loaded: loaded
     Active: active (plugged) since Sat 2025-10-11 02:42:18 UTC; 1h 28min ago
      Until: Sat 2025-10-11 02:42:18 UTC; 1h 28min ago
     Device: /sys/devices/pnp0/00:00/tty/ttyS0

● sys-devices-virtual-block-dm\x2d0.device - /sys/devices/virtual/block/dm-0
     Loaded: loaded
     Active: active (plugged) since Sat 2025-10-11 03:17:48 UTC; 52min ago
      Until: Sat 2025-10-11 03:17:48 UTC; 52min ago
     Device: /sys/devices/virtual/block/dm-0

● sys-devices-virtual-block-dm\x2d1.device - /sys/devices/virtual/block/dm-1
     Loaded: loaded
     Active: active (plugged) since Sat 2025-10-11 03:17:53 UTC; 52min ago
      Until: Sat 2025-10-11 03:17:53 UTC; 52min ago
     Device: /sys/devices/virtual/block/dm-1

● sys-devices-virtual-block-dm\x2d2.device - /sys/devices/virtual/block/dm-2
     Loaded: loaded
     Active: active (plugged) since Sat 2025-10-11 03:17:58 UTC; 52min ago
      Until: Sat 2025-10-11 03:17:58 UTC; 52min ago
     Device: /sys/devices/virtual/block/dm-2

● sys-devices-virtual-block-loop3.device - /sys/devices/virtual/block/loop3
     Loaded: loaded
     Active: active (plugged) since Sat 2025-10-11 03:17:47 UTC; 52min ago
      Until: Sat 2025-10-11 03:17:47 UTC; 52min ago
     Device: /sys/devices/virtual/block/loop3

● sys-devices-virtual-block-loop4.device - /sys/devices/virtual/block/loop4
     Loaded: loaded
     Active: active (plugged) since Sat 2025-10-11 03:17:53 UTC; 52min ago
      Until: Sat 2025-10-11 03:17:53 UTC; 52min ago
     Device: /sys/devices/virtual/block/loop4

● sys-devices-virtual-block-loop5.device - /sys/devices/virtual/block/loop5
     Loaded: loaded
     Active: active (plugged) since Sat 2025-10-11 03:17:57 UTC; 52min ago
      Until: Sat 2025-10-11 03:17:57 UTC; 52min ago
     Device: /sys/devices/virtual/block/loop5

● sys-devices-virtual-misc-rfkill.device - /sys/devices/virtual/misc/rfkill
     Loaded: loaded
     Active: active (plugged) since Sat 2025-10-11 02:42:19 UTC; 1h 28min ago
      Until: Sat 2025-10-11 02:42:19 UTC; 1h 28min ago
     Device: /sys/devices/virtual/misc/rfkill

● sys-devices-virtual-net-br\x2dex.device - /sys/devices/virtual/net/br-ex
     Loaded: loaded
     Active: active (plugged) since Sat 2025-10-11 03:12:05 UTC; 58min ago
      Until: Sat 2025-10-11 03:12:05 UTC; 58min ago
     Device: /sys/devices/virtual/net/br-ex

● sys-devices-virtual-net-br\x2dint.device - /sys/devices/virtual/net/br-int
     Loaded: loaded
     Active: active (plugged) since Sat 2025-10-11 03:30:03 UTC; 40min ago
      Until: Sat 2025-10-11 03:30:03 UTC; 40min ago
     Device: /sys/devices/virtual/net/br-int

● sys-devices-virtual-net-genev_sys_6081.device - /sys/devices/virtual/net/genev_sys_6081
     Loaded: loaded
     Active: active (plugged) since Sat 2025-10-11 03:30:03 UTC; 40min ago
      Until: Sat 2025-10-11 03:30:03 UTC; 40min ago
     Device: /sys/devices/virtual/net/genev_sys_6081

● sys-devices-virtual-net-ovs\x2dsystem.device - /sys/devices/virtual/net/ovs-system
     Loaded: loaded
     Active: active (plugged) since Sat 2025-10-11 03:12:05 UTC; 58min ago
      Until: Sat 2025-10-11 03:12:05 UTC; 58min ago
     Device: /sys/devices/virtual/net/ovs-system

● sys-devices-virtual-net-vlan20.device - /sys/devices/virtual/net/vlan20
     Loaded: loaded
     Active: active (plugged) since Sat 2025-10-11 03:12:05 UTC; 58min ago
      Until: Sat 2025-10-11 03:12:05 UTC; 58min ago
     Device: /sys/devices/virtual/net/vlan20

● sys-devices-virtual-net-vlan21.device - /sys/devices/virtual/net/vlan21
     Loaded: loaded
     Active: active (plugged) since Sat 2025-10-11 03:12:05 UTC; 58min ago
      Until: Sat 2025-10-11 03:12:05 UTC; 58min ago
     Device: /sys/devices/virtual/net/vlan21

● sys-devices-virtual-net-vlan22.device - /sys/devices/virtual/net/vlan22
     Loaded: loaded
     Active: active (plugged) since Sat 2025-10-11 03:12:05 UTC; 58min ago
      Until: Sat 2025-10-11 03:12:05 UTC; 58min ago
     Device: /sys/devices/virtual/net/vlan22

● sys-devices-virtual-net-vlan23.device - /sys/devices/virtual/net/vlan23
     Loaded: loaded
     Active: active (plugged) since Sat 2025-10-11 03:12:05 UTC; 58min ago
      Until: Sat 2025-10-11 03:12:05 UTC; 58min ago
     Device: /sys/devices/virtual/net/vlan23

● sys-module-configfs.device - /sys/module/configfs
     Loaded: loaded
     Active: active (plugged) since Sat 2025-10-11 02:42:18 UTC; 1h 28min ago
      Until: Sat 2025-10-11 02:42:18 UTC; 1h 28min ago
     Device: /sys/module/configfs

● sys-module-fuse.device - /sys/module/fuse
     Loaded: loaded
     Active: active (plugged) since Sat 2025-10-11 02:42:18 UTC; 1h 28min ago
      Until: Sat 2025-10-11 02:42:18 UTC; 1h 28min ago
     Device: /sys/module/fuse

● sys-subsystem-net-devices-br\x2dex.device - /sys/subsystem/net/devices/br-ex
     Loaded: loaded
     Active: active (plugged) since Sat 2025-10-11 03:12:05 UTC; 58min ago
      Until: Sat 2025-10-11 03:12:05 UTC; 58min ago
     Device: /sys/devices/virtual/net/br-ex

● sys-subsystem-net-devices-br\x2dint.device - /sys/subsystem/net/devices/br-int
     Loaded: loaded
     Active: active (plugged) since Sat 2025-10-11 03:30:03 UTC; 40min ago
      Until: Sat 2025-10-11 03:30:03 UTC; 40min ago
     Device: /sys/devices/virtual/net/br-int

● sys-subsystem-net-devices-eth0.device - Virtio network device
     Loaded: loaded
     Active: active (plugged) since Sat 2025-10-11 02:42:18 UTC; 1h 28min ago
      Until: Sat 2025-10-11 02:42:18 UTC; 1h 28min ago
     Device: /sys/devices/pci0000:00/0000:00:03.0/virtio1/net/eth0

● sys-subsystem-net-devices-eth1.device - Virtio network device
     Loaded: loaded
     Active: active (plugged) since Sat 2025-10-11 02:44:15 UTC; 1h 26min ago
      Until: Sat 2025-10-11 02:44:15 UTC; 1h 26min ago
     Device: /sys/devices/pci0000:00/0000:00:07.0/virtio5/net/eth1

● sys-subsystem-net-devices-genev_sys_6081.device - /sys/subsystem/net/devices/genev_sys_6081
     LoadedUnit boot.mount could not be found.
Unit home.mount could not be found.
: loaded
     Active: active (plugged) since Sat 2025-10-11 03:30:03 UTC; 40min ago
      Until: Sat 2025-10-11 03:30:03 UTC; 40min ago
     Device: /sys/devices/virtual/net/genev_sys_6081

● sys-subsystem-net-devices-ovs\x2dsystem.device - /sys/subsystem/net/devices/ovs-system
     Loaded: loaded
     Active: active (plugged) since Sat 2025-10-11 03:12:05 UTC; 58min ago
      Until: Sat 2025-10-11 03:12:05 UTC; 58min ago
     Device: /sys/devices/virtual/net/ovs-system

● sys-subsystem-net-devices-vlan20.device - /sys/subsystem/net/devices/vlan20
     Loaded: loaded
     Active: active (plugged) since Sat 2025-10-11 03:12:05 UTC; 58min ago
      Until: Sat 2025-10-11 03:12:05 UTC; 58min ago
     Device: /sys/devices/virtual/net/vlan20

● sys-subsystem-net-devices-vlan21.device - /sys/subsystem/net/devices/vlan21
     Loaded: loaded
     Active: active (plugged) since Sat 2025-10-11 03:12:05 UTC; 58min ago
      Until: Sat 2025-10-11 03:12:05 UTC; 58min ago
     Device: /sys/devices/virtual/net/vlan21

● sys-subsystem-net-devices-vlan22.device - /sys/subsystem/net/devices/vlan22
     Loaded: loaded
     Active: active (plugged) since Sat 2025-10-11 03:12:05 UTC; 58min ago
      Until: Sat 2025-10-11 03:12:05 UTC; 58min ago
     Device: /sys/devices/virtual/net/vlan22

● sys-subsystem-net-devices-vlan23.device - /sys/subsystem/net/devices/vlan23
     Loaded: loaded
     Active: active (plugged) since Sat 2025-10-11 03:12:05 UTC; 58min ago
      Until: Sat 2025-10-11 03:12:05 UTC; 58min ago
     Device: /sys/devices/virtual/net/vlan23

● -.mount - Root Mount
     Loaded: loaded (/etc/fstab; generated)
     Active: active (mounted)
      Where: /
       What: /dev/vda1
       Docs: man:fstab(5)
             man:systemd-fstab-generator(8)

● dev-hugepages.mount - Huge Pages File System
     Loaded: loaded (/usr/lib/systemd/system/dev-hugepages.mount; static)
     Active: active (mounted) since Sat 2025-10-11 02:42:17 UTC; 1h 28min ago
      Until: Sat 2025-10-11 02:42:17 UTC; 1h 28min ago
      Where: /dev/hugepages
       What: hugetlbfs
       Docs: https://docs.kernel.org/admin-guide/mm/hugetlbpage.html
             https://www.freedesktop.org/wiki/Software/systemd/APIFileSystems
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48573)
     Memory: 52.0K (peak: 556.0K)
        CPU: 6ms
     CGroup: /dev-hugepages.mount

● dev-hugepages1G.mount - /dev/hugepages1G
     Loaded: loaded (/etc/fstab; generated)
     Active: active (mounted) since Sat 2025-10-11 03:16:48 UTC; 53min ago
      Until: Sat 2025-10-11 03:16:48 UTC; 53min ago
      Where: /dev/hugepages1G
       What: none
       Docs: man:fstab(5)
             man:systemd-fstab-generator(8)

● dev-hugepages2M.mount - /dev/hugepages2M
     Loaded: loaded (/etc/fstab; generated)
     Active: active (mounted) since Sat 2025-10-11 03:16:49 UTC; 53min ago
      Until: Sat 2025-10-11 03:16:49 UTC; 53min ago
      Where: /dev/hugepages2M
       What: none
       Docs: man:fstab(5)
             man:systemd-fstab-generator(8)

● dev-mqueue.mount - POSIX Message Queue File System
     Loaded: loaded (/usr/lib/systemd/system/dev-mqueue.mount; static)
     Active: active (mounted) since Sat 2025-10-11 02:42:17 UTC; 1h 28min ago
      Until: Sat 2025-10-11 02:42:17 UTC; 1h 28min ago
      Where: /dev/mqueue
       What: mqueue
       Docs: man:mq_overview(7)
             https://www.freedesktop.org/wiki/Software/systemd/APIFileSystems
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48573)
     Memory: 4.0K (peak: 540.0K)
        CPU: 3ms
     CGroup: /dev-mqueue.mount

○ proc-fs-nfsd.mount - NFSD configuration filesystem
     Loaded: loaded (/usr/lib/systemd/system/proc-fs-nfsd.mount; static)
     Active: inactive (dead)
      Where: /proc/fs/nfsd
       What: nfsd

● proc-sys-fs-binfmt_misc.mount - Arbitrary Executable File Formats File System
     Loaded: loaded (/usr/lib/systemd/system/proc-sys-fs-binfmt_misc.mount; disabled; preset: disabled)
     Active: active (mounted) since Sat 2025-10-11 03:19:42 UTC; 50min ago
      Until: Sat 2025-10-11 03:19:42 UTC; 50min ago
TriggeredBy: ● proc-sys-fs-binfmt_misc.automount
      Where: /proc/sys/fs/binfmt_misc
       What: binfmt_misc
       Docs: https://docs.kernel.org/admin-guide/binfmt-misc.html
             https://www.freedesktop.org/wiki/Software/systemd/APIFileSystems
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48573)
     Memory: 8.0K (peak: 544.0K)
        CPU: 8ms
     CGroup: /proc-sys-fs-binfmt_misc.mount

Oct 11 03:19:42 compute-0 systemd[1]: Mounting Arbitrary Executable File Formats File System...
Oct 11 03:19:42 compute-0 systemd[1]: Mounted Arbitrary Executable File Formats File System.

● run-credentials-systemd\x2dsysctl.service.mount - /run/credentials/systemd-sysctl.service
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Sat 2025-10-11 03:10:55 UTC; 59min ago
      Until: Sat 2025-10-11 03:10:55 UTC; 59min ago
      Where: /run/credentials/systemd-sysctl.service
       What: none

● run-credentials-systemd\x2dsysusers.service.mount - /run/credentials/systemd-sysusers.service
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Sat 2025-10-11 02:42:17 UTC; 1h 28min ago
      Until: Sat 2025-10-11 02:42:17 UTC; 1h 28min ago
      Where: /run/credentials/systemd-sysusers.service
       What: none

● run-credentials-systemd\x2dtmpfiles\x2dsetup.service.mount - /run/credentials/systemd-tmpfiles-setup.service
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Sat 2025-10-11 02:42:18 UTC; 1h 28min ago
      Until: Sat 2025-10-11 02:42:18 UTC; 1h 28min ago
      Where: /run/credentials/systemd-tmpfiles-setup.service
       What: none

● run-credentials-systemd\x2dtmpfiles\x2dsetup\x2ddev.service.mount - /run/credentials/systemd-tmpfiles-setup-dev.service
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Sat 2025-10-11 02:42:17 UTC; 1h 28min ago
      Until: Sat 2025-10-11 02:42:17 UTC; 1h 28min ago
      Where: /run/credentials/systemd-tmpfiles-setup-dev.service
       What: none

● run-netns.mount - /run/netns
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Sat 2025-10-11 03:14:56 UTC; 55min ago
      Until: Sat 2025-10-11 03:14:56 UTC; 55min ago
      Where: /run/netns
       What: tmpfs

● run-user-1000.mount - /run/user/1000
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Sat 2025-10-11 02:42:35 UTC; 1h 28min ago
      Until: Sat 2025-10-11 02:42:35 UTC; 1h 28min ago
      Where: /run/user/1000
       What: tmpfs

● run-user-42477.mount - /run/user/42477
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Sat 2025-10-11 03:19:33 UTC; 51min ago
      Until: Sat 2025-10-11 03:19:33 UTC; 51min ago
      Where: /run/user/42477
       What: tmpfs

● sys-fs-fuse-connections.mount - FUSE Control File System
     Loaded: loaded (/usr/lib/systemd/system/sys-fs-fuse-connections.mount; static)
     Active: active (mounted) since Sat 2025-10-11 02:42:17 UTC; 1h 28min ago
      Until: Sat 2025-10-11 02:42:17 UTC; 1h 28min ago
      Where: /sys/fs/fuse/connections
       What: fusectl
       Docs: https://docs.kernel.org/filesystems/fuse.html
             https://www.freedesktop.org/wiki/Software/systemd/APIFileSystems
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48573)
     Memory: 4.0K (peak: 540.0K)
        CPU: 3ms
     CGroup: /sys-fs-fuse-connections.mount

Oct 11 02:42:17 localhost systemd[1]: Mounting FUSE Control File System...
Oct 11 02:42:17 localhost systemd[1]: Mounted FUSE Control File System.

● sys-kernel-config.mount - Kernel Configuration File System
     Loaded: loaded (/proc/self/mountinfo; static)
     Active: active (mounted) since Sat 2025-10-11 02:42:17 UTC; 1h 28min ago
      Until: Sat 2025-10-11 02:42:17 UTC; 1h 28min ago
      Where: /sys/kernel/config
       What: configfs
       Docs: https://docs.kernel.org/filesystems/configfs.html
             https://www.freedesktop.org/wiki/Software/systemd/APIFileSystems

● sys-kernel-debug-traciUnit sysroot.mount could not be found.
ng.mount - /sys/kernel/debug/tracing
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Sat 2025-10-11 04:09:56 UTC; 41s ago
      Until: Sat 2025-10-11 04:09:56 UTC; 41s ago
      Where: /sys/kernel/debug/tracing
       What: tracefs

● sys-kernel-debug.mount - Kernel Debug File System
     Loaded: loaded (/usr/lib/systemd/system/sys-kernel-debug.mount; static)
     Active: active (mounted) since Sat 2025-10-11 02:42:17 UTC; 1h 28min ago
      Until: Sat 2025-10-11 02:42:17 UTC; 1h 28min ago
      Where: /sys/kernel/debug
       What: debugfs
       Docs: https://docs.kernel.org/filesystems/debugfs.html
             https://www.freedesktop.org/wiki/Software/systemd/APIFileSystems
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48573)
     Memory: 4.0K (peak: 540.0K)
        CPU: 2ms
     CGroup: /sys-kernel-debug.mount

● sys-kernel-tracing.mount - Kernel Trace File System
     Loaded: loaded (/usr/lib/systemd/system/sys-kernel-tracing.mount; static)
     Active: active (mounted) since Sat 2025-10-11 02:42:17 UTC; 1h 28min ago
      Until: Sat 2025-10-11 02:42:17 UTC; 1h 28min ago
      Where: /sys/kernel/tracing
       What: tracefs
       Docs: https://docs.kernel.org/trace/ftrace.html
             https://www.freedesktop.org/wiki/Software/systemd/APIFileSystems
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48573)
     Memory: 4.0K (peak: 540.0K)
        CPU: 5ms
     CGroup: /sys-kernel-tracing.mount

○ tmp.mount - Temporary Directory /tmp
     Loaded: loaded (/usr/lib/systemd/system/tmp.mount; disabled; preset: disabled)
     Active: inactive (dead)
      Where: /tmp
       What: tmpfs
       Docs: https://systemd.io/TEMPORARY_DIRECTORIES
             man:file-hierarchy(7)
             https://www.freedesktop.org/wiki/Software/systemd/APIFileSystems

● var-lib-containers-storage-overlay-221faf47a3aac534811827a618c1a1e4d780f88ba77355c91c1a609fc5a29146-merged.mount - /var/lib/containers/storage/overlay/221faf47a3aac534811827a618c1a1e4d780f88ba77355c91c1a609fc5a29146/merged
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Sat 2025-10-11 03:30:02 UTC; 40min ago
      Until: Sat 2025-10-11 03:30:02 UTC; 40min ago
      Where: /var/lib/containers/storage/overlay/221faf47a3aac534811827a618c1a1e4d780f88ba77355c91c1a609fc5a29146/merged
       What: overlay

● var-lib-containers-storage-overlay-4100cf42122ec3830809bd529ebb34e2cc0afc34e1a230e4e71c8a71502941b8-merged.mount - /var/lib/containers/storage/overlay/4100cf42122ec3830809bd529ebb34e2cc0afc34e1a230e4e71c8a71502941b8/merged
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Sat 2025-10-11 03:20:38 UTC; 49min ago
      Until: Sat 2025-10-11 03:20:38 UTC; 49min ago
      Where: /var/lib/containers/storage/overlay/4100cf42122ec3830809bd529ebb34e2cc0afc34e1a230e4e71c8a71502941b8/merged
       What: overlay

● var-lib-containers-storage-overlay-4b7797612072be3e382f1201758259cdadf087660dcd7a1269c4ba7c8e105662-merged.mount - /var/lib/containers/storage/overlay/4b7797612072be3e382f1201758259cdadf087660dcd7a1269c4ba7c8e105662/merged
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Sat 2025-10-11 03:31:08 UTC; 39min ago
      Until: Sat 2025-10-11 03:31:08 UTC; 39min ago
      Where: /var/lib/containers/storage/overlay/4b7797612072be3e382f1201758259cdadf087660dcd7a1269c4ba7c8e105662/merged
       What: overlay

● var-lib-containers-storage-overlay-5ab7257e1e85cd1f6c4036bb37443e36f37b063e8b3d62e0aa6943aa2aa39de8-merged.mount - /var/lib/containers/storage/overlay/5ab7257e1e85cd1f6c4036bb37443e36f37b063e8b3d62e0aa6943aa2aa39de8/merged
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Sat 2025-10-11 03:18:38 UTC; 51min ago
      Until: Sat 2025-10-11 03:18:38 UTC; 51min ago
      Where: /var/lib/containers/storage/overlay/5ab7257e1e85cd1f6c4036bb37443e36f37b063e8b3d62e0aa6943aa2aa39de8/merged
       What: overlay

● var-lib-containers-storage-overlay-839842863fe9685fced44157e6b32dd25244cef8d08ff70db7eadf72355d8e35-merged.mount - /var/lib/containers/storage/overlay/839842863fe9685fced44157e6b32dd25244cef8d08ff70db7eadf72355d8e35/merged
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Sat 2025-10-11 03:21:52 UTC; 48min ago
      Until: Sat 2025-10-11 03:21:52 UTC; 48min ago
      Where: /var/lib/containers/storage/overlay/839842863fe9685fced44157e6b32dd25244cef8d08ff70db7eadf72355d8e35/merged
       What: overlay

● var-lib-containers-storage-overlay-a08d0c353c41da7206b42681633aec861b88746aeb6dcadae20d769749654ae1-merged.mount - /var/lib/containers/storage/overlay/a08d0c353c41da7206b42681633aec861b88746aeb6dcadae20d769749654ae1/merged
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Sat 2025-10-11 03:18:40 UTC; 51min ago
      Until: Sat 2025-10-11 03:18:40 UTC; 51min ago
      Where: /var/lib/containers/storage/overlay/a08d0c353c41da7206b42681633aec861b88746aeb6dcadae20d769749654ae1/merged
       What: overlay

● var-lib-containers-storage-overlay-acba10e39aee7f7b2979802f8bcc6e90e8eb3ccaf2f6e3ef02735f2c95ae27fc-merged.mount - /var/lib/containers/storage/overlay/acba10e39aee7f7b2979802f8bcc6e90e8eb3ccaf2f6e3ef02735f2c95ae27fc/merged
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Sat 2025-10-11 03:36:30 UTC; 34min ago
      Until: Sat 2025-10-11 03:36:30 UTC; 34min ago
      Where: /var/lib/containers/storage/overlay/acba10e39aee7f7b2979802f8bcc6e90e8eb3ccaf2f6e3ef02735f2c95ae27fc/merged
       What: overlay

● var-lib-containers-storage-overlay-c42a58e981976c6d5b8580b48c1b06d786bf18347af19da33bdd3fab46f24c94-merged.mount - /var/lib/containers/storage/overlay/c42a58e981976c6d5b8580b48c1b06d786bf18347af19da33bdd3fab46f24c94/merged
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Sat 2025-10-11 03:39:20 UTC; 31min ago
      Until: Sat 2025-10-11 03:39:20 UTC; 31min ago
      Where: /var/lib/containers/storage/overlay/c42a58e981976c6d5b8580b48c1b06d786bf18347af19da33bdd3fab46f24c94/merged
       What: overlay

● var-lib-containers-storage-overlay-c6dad0688fbe1c621afae846f1fc01dab43648014e323f8e599f101f39177a53-merged.mount - /var/lib/containers/storage/overlay/c6dad0688fbe1c621afae846f1fc01dab43648014e323f8e599f101f39177a53/merged
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Sat 2025-10-11 03:20:28 UTC; 50min ago
      Until: Sat 2025-10-11 03:20:28 UTC; 50min ago
      Where: /var/lib/containers/storage/overlay/c6dad0688fbe1c621afae846f1fc01dab43648014e323f8e599f101f39177a53/merged
       What: overlay

● var-lib-containers-storage-overlay-d198f75a95ab803013afde8e75ee77508ca10e5ba92e21cc8d8e1b484bdd6d39-merged.mount - /var/lib/containers/storage/overlay/d198f75a95ab803013afde8e75ee77508ca10e5ba92e21cc8d8e1b484bdd6d39/merged
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Sat 2025-10-11 03:19:55 UTC; 50min ago
      Until: Sat 2025-10-11 03:19:55 UTC; 50min ago
      Where: /var/lib/containers/storage/overlay/d198f75a95ab803013afde8e75ee77508ca10e5ba92e21cc8d8e1b484bdd6d39/merged
       What: overlay

● var-lib-containers-storage-overlay-daf1984d7f448edcb8e44e3fc0fa0ef8458bf44e8ae8bf80b781f24b3a332d6b-merged.mount - /var/lib/containers/storage/overlay/daf1984d7f448edcb8e44e3fc0fa0ef8458bf44e8ae8bf80b781f24b3a332d6b/merged
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Sat 2025-10-11 03:37:25 UTC; 33min ago
      Until: Sat 2025-10-11 03:37:25 UTC; 33min ago
      Where: /var/lib/containers/storage/overlay/daf1984d7f448edcb8e44e3fc0fa0ef8458bf44e8ae8bf80b781f24b3a332d6b/merged
       What: overlay

● var-lib-containers-storage-overlay-eaba459bb001adaae8069b1b44a0c2a1c06efc482179810a627870a4dc78e63e-merged.mount - /var/lib/containers/storage/overlay/eaba459bb001adaae8069b1b44a0c2a1c06efc482179810a627870a4dc78e63e/merged
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Sat 2025-10-11 03:21:50 UTC; 48min ago
      Until: Sat 2025-10-11 03:21:50 UTC; 48min ago
      Where: /var/lib/containers/storage/overlay/eaba459bb001adaae8069b1b44a0c2a1c06efc482179810a627870a4dc78e63e/merged
       What: overlay

● var-lib-containers-storage-overlay-f1bb59bd4b28a8d28c79be83607365e79659d0ec269b9b1be7322faaf13c1152-merged.mount - /var/lib/containers/storage/overlay/f1bb59bd4b28a8d28c79be83607365e79659d0ec269b9b1be7322faaf13c1152/merged
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Sat 2025-10-11 03:20:33 UTC; 50min ago
      Until: Sat 2025-10-11 03:20:33 UTC; 50min ago
      Where: /var/lib/containers/storage/overlay/f1bb59bd4b28a8d28c79be83607365e79659d0ec269b9b1be7322faaf13c1152/merged
       What: overlay

● var-lib-containers-storage-overlay.mount - /var/lib/containers/storage/overlay
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Sat 2025-10-11 03:18:37 UTC; 51min ago
      Until: Sat 2025-10-11 03:18:37 UTC; 51min ago
      Where: /var/lib/containers/storage/overlay
       What: /dev/vda1

● var-lib-containers-storage-overlay\x2dcontainers-26619da4fa972b2b2b8df272a799dceac616417728cb0ea1160a898d8a7167a1-userdata-shm.mount - /var/lib/containers/storage/overlay-containers/26619da4fa972b2b2b8df272a799dceac616417728cb0ea1160a898d8a7167a1/userdata/shm
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Sat 2025-10-11 03:39:20 UTC; 31min ago
      Until: Sat 2025-10-11 03:39:20 UTC; 31min ago
      Where: /var/lib/containers/storage/overlay-containers/26619da4fa972b2b2b8df272a799dceac616417728cb0ea1160a898d8a7167a1/userdata/shm
       What: shm

● var-lib-containers-storage-overlay\x2dcontainers-65c3a3d72e1cba3c83fc771a841564f690b47cc0f5012ce0acf16e2d9f8e3fe2-userdata-shm.mount - /var/lib/containers/storage/overlay-containers/65c3a3d72e1cba3c83fc771a841564f690b47cc0f5012ce0acf16e2d9f8e3fe2/userdata/shm
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Sat 2025-10-11 03:30:02 UTC; 40min ago
      Until: Sat 2025-10-11 03:30:02 UTC; 40min ago
      Where: /var/lib/containers/storage/overlay-containers/65c3a3d72e1cba3c83fc771a841564f690b47cc0f5012ce0acf16e2d9f8e3fe2/userdata/shm
       What: shm

● var-lib-containers-storage-overlay\x2dcontainers-8b003d65c8e439e280409825aa37dacfb921ffdd0ada54278b9746654fdc0aa8-userdata-shm.mount - /var/lib/containers/storage/overlay-containers/8b003d65c8e439e280409825aa37dacfb921ffdd0ada54278b9746654fdc0aa8/userdata/shm
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Sat 2025-10-11 03:37:25 UTC; 33min ago
      Until: Sat 2025-10-11 03:37:25 UTC; 33min ago
      Where: /var/lib/containers/storage/overlay-containers/8b003d65c8e439e280409825aa37dacfb921ffdd0ada54278b9746654fdc0aa8/userdata/shm
       What: shm

● var-lib-containers-storage-overlay\x2dcontainers-dd5285a58cbe29a90687a00af14b934b599bb4de55df5857e4d7b7ac9f22feab-userdata-shm.mount - /var/lib/containers/storage/overlay-containers/dd5285a58cbe29a90687a00af14b934b599bb4de55df5857e4d7b7ac9f22feab/userdata/shm
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Sat 2025-10-11 03:31:08 UTC; 39min ago
      Until: Sat 2025-10-11 03:31:08 UTC; 39min ago
      Where: /var/lib/containers/storage/overlay-containers/dd5285a58cbe29a90687a00af14b934b599bb4de55df5857e4d7b7ac9f22feab/userdata/shm
       What: shm

● var-lib-containers-storage-overlay\x2dcontainers-f2b19cad22d0fbdd185b264c3c8b6443d09a02319dc9fb0585dc69a0f24a758d-userdata-shm.mount - /var/lib/containers/storage/overlay-containers/f2b19cad22d0fbdd185b264c3c8b6443d09a02319dc9fb0585dc69a0f24a758d/userdata/shm
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Sat 2025-10-11 03:36:30 UTC; 34min ago
      Until: Sat 2025-10-11 03:36:30 UTC; 34min ago
      Where: /var/lib/containers/storage/overlay-containers/f2b19cad22d0fbdd185b264c3c8b6443d09a02319dc9fb0585dc69a0f24a758d/userdata/shm
       What: shm

○ var-lib-machines.mount - Virtual Machine and Container Storage (Compatibility)
     Loaded: loaded (/usr/lib/systemd/system/var-lib-machines.mount; static)
     Active: inactive (dead)
  Condition: start condition failed at Sat 2025-10-11 03:34:58 UTC; 35min ago
      Where: /var/lib/machines
       What: /var/lib/machines.raw

Oct 11 03:34:58 compute-0 systemd[1]: Virtual Machine and Container Storage (Compatibility) was skipped because of an unmet condition check (ConditionPathExists=/var/lib/machines.raw).

● var-lib-nfs-rpc_pipefs.mount - RPC Pipe File System
     Loaded: loaded (/proc/self/mountinfo; static)
     Active: active (mounted) since Sat 2025-10-11 02:42:17 UTC; 1h 28min ago
      Until: Sat 2025-10-11 02:42:17 UTC; 1h 28min ago
      Where: /var/lib/nfs/rpc_pipefs
       What: rpc_pipefs

● systemd-ask-password-console.path - Dispatch Password Requests to Console Directory Watch
     Loaded: loaded (/usr/lib/systemd/system/systemd-ask-password-console.path; static)
     Active: active (waiting) since Sat 2025-10-11 02:42:17 UTC; 1h 28min ago
      Until: Sat 2025-10-11 02:42:17 UTC; 1h 28min ago
   Triggers: ● systemd-ask-password-console.service
       Docs: man:systemd-ask-password-console.path(8)

● systemd-ask-password-wall.path - Forward Password Requests to Wall Directory Watch
     Loaded: loaded (/usr/lib/systemd/system/systemd-ask-password-wall.path; static)
     Active: active (waiting) since Sat 2025-10-11 02:42:17 UTC; 1h 28min ago
      Until: Sat 2025-10-11 02:42:17 UTC; 1h 28min ago
   Triggers: ● systemd-ask-password-wall.service
       Docs: man:systemd-ask-password-wall.path(8)

● init.scope - System and Service Manager
     Loaded: loaded
  Transient: yes
     Active: active (running) since Sat 2025-10-11 02:42:14 UTC; 1h 28min ago
       Docs: man:systemd(1)
         IO: 1.0M read, 0B written
      Tasks: 1 (limit: 48573)
     Memory: 49.9M (peak: 68.6M)
        CPU: 1min 24.343s
     CGroup: /init.scope
             └─1 /usr/lib/systemd/systemd --switched-root --system --deserialize 31

Oct 11 04:10:13 compute-0 systemd[1]: var-lib-containers-storage-overlay-56d3d552db340baac6a394077bbacfc91f51a67986c050196cbb8d418b04db91-merged.mount: Deactivated successfully.
Oct 11 04:10:13 compute-0 systemd[1]: libpod-conmon-c7a69b0f84f7e1cb8da3482d9ea77ad193006b4ccbc5f02682485a30ca6c3590.scope: Deactivated successfully.
Oct 11 04:10:14 compute-0 systemd[1]: Started libpod-conmon-62934323c7dacd31d940284e1901d7f46aeff7a5d1c2a6cf475415517bf1f78a.scope.
Oct 11 04:10:14 compute-0 systemd[1]: Started libcrun container.
Oct 11 04:10:15 compute-0 systemd[1]: libpod-62934323c7dacd31d940284e1901d7f46aeff7a5d1c2a6cf475415517bf1f78a.scope: Deactivated successfully.
Oct 11 04:10:15 compute-0 systemd[1]: libpod-62934323c7dacd31d940284e1901d7f46aeff7a5d1c2a6cf475415517bf1f78a.scope: Consumed 1.046s CPU time.
Oct 11 04:10:15 compute-0 systemd[1]: var-lib-containers-storage-overlay-e43d32d0f8addaec55941d7134f63f063e3d3e2e76f13a81d74e89dd50ddf381-merged.mount: Deactivated successfully.
Oct 11 04:10:15 compute-0 systemd[1]: libpod-conmon-62934323c7dacd31d940284e1901d7f46aeff7a5d1c2a6cf475415517bf1f78a.scope: Deactivated successfully.
Oct 11 04:10:16 compute-0 systemd[1]: Starting Hostname Service...
Oct 11 04:10:16 compute-0 systemd[1]: Started Hostname Service.

● libpod-26619da4fa972b2b2b8df272a799dceac616417728cb0ea1160a898d8a7167a1.scope - libcrun container
     Loaded: loaded (/run/systemd/transient/libpod-26619da4fa972b2b2b8df272a799dceac616417728cb0ea1160a898d8a7167a1.scope; transient)
  Transient: yes
     Active: active (running) since Sat 2025-10-11 03:39:20 UTC; 31min ago
         IO: 42.0M read, 42.2M written
      Tasks: 29 (limit: 4096)
     Memory: 467.8M (peak: 558.6M)
        CPU: 2min 28.428s
     CGroup: /machine.slice/libpod-26619da4fa972b2b2b8df272a799dceac616417728cb0ea1160a898d8a7167a1.scope
             └─container
               ├─260091 dumb-init --single-child -- kolla_start
               ├─260093 /usr/bin/python3 /usr/bin/nova-compute
               ├─267479 /usr/bin/python3 /bin/privsep-helper --config-file /etc/nova/nova.conf --config-file /etc/nova/nova-compute.conf --config-dir /etc/nova/nova.conf.d --privsep_context vif_plug_ovs.privsep.vif_plug --privsep_sock_path /tmp/tmpqv15d5ht/privsep.sock
               ├─268186 /usr/bin/python3 /bin/privsep-helper --config-file /etc/nova/nova.conf --config-file /etc/nova/nova-compute.conf --config-dir /etc/nova/nova.conf.d --privsep_context os_brick.privileged.default --privsep_sock_path /tmp/tmplcy3_gw7/privsep.sock
               └─268305 /usr/bin/python3 /bin/privsep-helper --config-file /etc/nova/nova.conf --config-file /etc/nova/nova-compute.conf --config-dir /etc/nova/nova.conf.d --privsep_context nova.privsep.sys_admin_pctxt --privsep_sock_path /tmp/tmpb19gkq7f/privsep.sock

Oct 11 03:39:20 compute-0 systemd[1]: Started libcrun container.
Oct 11 03:46:35 compute-0 systemd-coredump[268327]: Process 268307 (qemu-img) of user 0 dumped core.
                                                    
                                                    Stack trace of thread 767:
                                                    #0  0x00007f065dd3003c __pthread_kill_implementation (libc.so.6 + 0x8d03c)
                                                    #1  0x00007f065dce2b86 raise (libc.so.6 + 0x3fb86)
                                                    #2  0x00007f065dccc873 abort (libc.so.6 + 0x29873)
                                                    #3  0x00005636f4af556f ___interceptor_pthread_create (qemu-img + 0x4e56f)
                                                    #4  0x00007f065af06ff4 _ZN6Thread10try_createEm (libceph-common.so.2 + 0x258ff4)
                                                    #5  0x00007f065af096ae _ZN6Thread6createEPKcm (libceph-common.so.2 + 0x25b6ae)
                                                    #6  0x00007f065be1026b _ZNSt8_Rb_treeISt4pairINSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEESt10type_indexES0_IKS8_N4ceph12immobile_anyILm576EEEESt10_Select1stISD_ENSA_6common11CephContext19associated_objs_cmpESaISD_EE22_M_emplace_hint_uniqueIJRKSt21piecewise_construct_tSt5tupleIJRSt17basic_string_viewIcS4_ERS7_EESP_IJRKSt15in_place_type_tIN6librbd21TaskFinisherSingletonEERPSH_EEEEESt17_Rb_tree_iteratorISD_ESt23_Rb_tree_const_iteratorISD_EDpOT_.constprop.0 (librbd.so.1 + 0x51126b)
                                                    #7  0x00007f065ba3d7a6 _ZN6librbd8ImageCtx4initEv (librbd.so.1 + 0x13e7a6)
                                                    #8  0x00007f065bb172d3 _ZN6librbd5image11OpenRequestINS_8ImageCtxEE12send_refreshEv (librbd.so.1 + 0x2182d3)
                                                    #9  0x00007f065bb17f46 _ZN6librbd5image11OpenRequestINS_8ImageCtxEE23handle_v2_get_data_poolEPi (librbd.so.1 + 0x218f46)
                                                    #10 0x00007f065bb182a7 _ZN6librbd4util6detail20rados_state_callbackINS_5image11OpenRequestINS_8ImageCtxEEEXadL_ZNS6_23handle_v2_get_data_poolEPiEELb1EEEvPvS8_ (librbd.so.1 + 0x2192a7)
                                                    #11 0x00007f065b8160ac _ZN5boost4asio6detail18completion_handlerINS1_7binder0IN8librados14CB_AioCompleteEEENS0_10io_context19basic_executor_typeISaIvELm0EEEE11do_completeEPvPNS1_19scheduler_operationERKNS_6system10error_codeEm (librados.so.2 + 0xad0ac)
                                                    #12 0x00007f065b815585 _ZN5boost4asio6detail14strand_service11do_completeEPvPNS1_19scheduler_operationERKNS_6system10error_codeEm (librados.so.2 + 0xac585)
                                                    #13 0x00007f065b890498 _ZN5boost4asio6detail9scheduler3runERNS_6system10error_codeE.constprop.0.isra.0 (librados.so.2 + 0x127498)
                                                    #14 0x00007f065b82f4e4 _ZNSt6thread11_State_implINS_8_InvokerISt5tupleIJZ17make_named_threadIZN4ceph5async15io_context_pool5startEsEUlvE_JEES_St17basic_string_viewIcSt11char_traitsIcEEOT_DpOT0_EUlSD_SG_E_S7_EEEEE6_M_runEv (librados.so.2 + 0xc64e4)
                                                    #15 0x00007f065a59dae4 execute_native_thread_routine (libstdc++.so.6 + 0xdbae4)
                                                    #16 0x00007f065dd2e2fa start_thread (libc.so.6 + 0x8b2fa)
                                                    #17 0x00007f065ddb3540 __clone3 (libc.so.6 + 0x110540)
                                                    
                                                    Stack trace of thread 757:
                                                    #0  0x00007f065dd2b38a __futex_abstimed_wait_common (libc.so.6 + 0x8838a)
                                                    #1  0x00007f065dd2d8e2 pthread_cond_wait@@GLIBC_2.3.2 (libc.so.6 + 0x8a8e2)
                                                    #2  0x00007f065a5976c0 _ZNSt18condition_variable4waitERSt11unique_lockISt5mutexE (libstdc++.so.6 + 0xd56c0)
                                                    #3  0x00007f065ba44eb3 _ZN6librbd10ImageStateINS_8ImageCtxEE4openEm (librbd.so.1 + 0x145eb3)
                                                    #4  0x00007f065ba14fcb rbd_open (librbd.so.1 + 0x115fcb)
                                                    #5  0x00007f065bfbf89d qemu_rbd_open (block-rbd.so + 0x489d)
                                                    #6  0x00005636f4b05e4c bdrv_open_driver.llvm.6332234179151191066 (qemu-img + 0x5ee4c)
                                                    #7  0x00005636f4b0ab6b bdrv_open_inherit.llvm.6332234179151191066 (qemu-img + 0x63b6b)
                                                    #8  0x00005636f4b175ce bdrv_open_child_bs.llvm.6332234179151191066 (qemu-img + 0x705ce)
                                                    #9  0x00005636f4b0a396 bdrv_open_inherit.llvm.6332234179151191066 (qemu-img + 0x63396)
                                                    #10 0x00005636f4b381f5 blk_new_open (qemu-img + 0x911f5)
                                                    #11 0x00005636f4bf3e16 img_open_file (qemu-img + 0x14ce16)
                                                    #12 0x00005636f4bf39e0 img_open (qemu-img + 0x14c9e0)
                                                    #13 0x00005636f4befc1d img_info (qemu-img + 0x148c1d)
                                                    #14 0x00005636f4be9638 main (qemu-img + 0x142638)
                                                    #15 0x00007f065dccd610 __libc_start_call_main (libc.so.6 + 0x2a610)
                                                    #16 0x00007f065dccd6c0 __libc_start_main@@GLIBC_2.34 (libc.so.6 + 0x2a6c0)
                                                    #17 0x00005636f4af5215 _start (qemu-img + 0x4e215)
                                                    
                                                    Stack trace of thread 758:
                                                    #0  0x00007f065ddab96d syscall (libc.so.6 + 0x10896d)
                                                    #1  0x00005636f4c74f73 qemu_event_wait (qemu-img + 0x1cdf73)
                                                    #2  0x00005636f4c81f87 call_rcu_thread (qemu-img + 0x1daf87)
                                                    #3  0x00005636f4c752ba qemu_thread_start.llvm.7701297430486814853 (qemu-img + 0x1ce2ba)
                                                    #4  0x00007f065dd2e2fa start_thread (libc.so.6 + 0x8b2fa)
                                                    #5  0x00007f065ddb3540 __clone3 (libc.so.6 + 0x110540)
                                                    
                                                    Stack trace of thread 761:
                                                    #0  0x00007f065ddb2b7e epoll_wait (libc.so.6 + 0x10fb7e)
                                                    #1  0x00007f065b0ee618 _ZN11EpollDriver10event_waitERSt6vectorI14FiredFileEventSaIS1_EEP7timeval (libceph-common.so.2 + 0x440618)
                                                    #2  0x00007f065b0ec702 _ZN11EventCenter14process_eventsEjPNSt6chrono8durationImSt5ratioILl1ELl1000000000EEEE (libceph-common.so.2 + 0x43e702)
                                                    #3  0x00007f065b0ed2c6 _ZNSt17_Function_handlerIFvvEZN12NetworkStack10add_threadEP6WorkerEUlvE_E9_M_invokeERKSt9_Any_data (libceph-common.so.2 + 0x43f2c6)
                                                    #4  0x00007f065a59dae4 execute_native_thread_routine (libstdc++.so.6 + 0xdbae4)
                                                    #5  0x00007f065dd2e2fa start_thread (libc.so.6 + 0x8b2fa)
                                                    #6  0x00007f065ddb3540 __clone3 (libc.so.6 + 0x110540)
                                                    
                                                    Stack trace of thread 759:
                                                    #0  0x00007f065dd2b38a __futex_abstimed_wait_common (libc.so.6 + 0x8838a)
                                                    #1  0x00007f065dd2d8e2 pthread_cond_wait@@GLIBC_2.3.2 (libc.so.6 + 0x8a8e2)
                                                    #2  0x00007f065a5976c0 _ZNSt18condition_variable4waitERSt11unique_lockISt5mutexE (libstdc++.so.6 + 0xd56c0)
                                                    #3  0x00007f065b1190a2 _ZN4ceph7logging3Log5entryEv (libceph-common.so.2 + 0x46b0a2)
                                                    #4  0x00007f065dd2e2fa start_thread (libc.so.6 + 0x8b2fa)
                                                    #5  0x00007f065ddb3540 __clone3 (libc.so.6 + 0x110540)
                                                    
                                                    Stack trace of thread 769:
                                                    #0  0x00007f065dd2b38a __futex_abstimed_wait_common (libc.so.6 + 0x8838a)
                                                    #1  0x00007f065dd2dcc0 pthread_cond_clockwait@GLIBC_2.30 (libc.so.6 + 0x8acc0)
                                                    #2  0x00007f065b868364 _ZN4ceph5timerINS_17coarse_mono_clockEE12timer_threadEv (librados.so.2 + 0xff364)
                                                    #3  0x00007f065a59dae4 execute_native_thread_routine (libstdc++.so.6 + 0xdbae4)
                                                    #4  0x00007f065dd2e2fa start_thread (libc.so.6 + 0x8b2fa)
                                                    #5  0x00007f065ddb3540 __clone3 (libc.so.6 + 0x110540)
                                                    
                                                    Stack trace of thread 771:
                                                    #0  0x00007f065dd2b38a __futex_abstimed_wait_common (libc.so.6 + 0x8838a)
                                                    #1  0x00007f065dd2d8e2 pthread_cond_wait@@GLIBC_2.3.2 (libc.so.6 + 0x8a8e2)
                                                    #2  0x00007f065a5976c0 _ZNSt18condition_variable4waitERSt11unique_lockISt5mutexE (libstdc++.so.6 + 0xd56c0)
                                                    #3  0x00007f065b0150b9 _ZN13DispatchQueue18run_local_deliveryEv (libceph-common.so.2 + 0x3670b9)
                                                    #4  0x00007f065b0a6431 _ZN13DispatchQueue19LocalDeliveryThread5entryEv (libceph-common.so.2 + 0x3f8431)
                                                    #5  0x00007f065dd2e2fa start_thread (libc.so.6 + 0x8b2fa)
                                                    #6  0x00007f065ddb3540 __clone3 (libc.so.6 + 0x110540)
                                                    
                                                    Stack trace of thread 766:
                                                    #0  0x00007f065dd2b38a __futex_abstimed_wait_common (libc.so.6 + 0x8838a)
                                                    #1  0x00007f065dd2dcc0 pthread_cond_clockwait@GLIBC_2.30 (libc.so.6 + 0x8acc0)
                                                    #2  0x00007f065af27150 _ZN4ceph6common24CephContextServiceThread5entryEv (libceph-common.so.2 + 0x279150)
                                                    #3  0x00007f065dd2e2fa start_thread (libc.so.6 + 0x8b2fa)
                                                    #4  0x00007f065ddb3540 __clone3 (libc.so.6 + 0x110540)
                                                    
                                                    Stack trace of thread 773:
                                                    #0  0x00007f065dd2b38a __futex_abstimed_wait_common (libc.so.6 + 0x8838a)
                                                    #1  0x00007f065dd2d8e2 pthread_cond_wait@@GLIBC_2.3.2 (libc.so.6 + 0x8a8e2)
                                                    #2  0x00007f065a5976c0 _ZNSt18condition_variable4waitERSt11unique_lockISt5mutexE (libstdc++.so.6 + 0xd56c0)
                                                    #3  0x00007f065af0c7f8 _ZN15CommonSafeTimerISt5mutexE12timer_threadEv (libceph-common.so.2 + 0x25e7f8)
                                                    #4  0x00007f065af0cf81 _ZN21CommonSafeTimerThreadISt5mutexE5entryEv (libceph-common.so.2 + 0x25ef81)
                                                    #5  0x00007f065dd2e2fa start_thread (libc.so.6 + 0x8b2fa)
                                                    #6  0x00007f065ddb3540 __clone3 (libc.so.6 + 0x110540)
                                                    
                                                    Stack trace of thread 775:
                                                    #0  0x00007f065dd2b38a __futex_abstimed_wait_common (libc.so.6 + 0x8838a)
                                                    #1  0x00007f065dd2d8e2 pthread_cond_wait@@GLIBC_2.3.2 (libc.so.6 + 0x8a8e2)
                                                    #2  0x00007f065a5976c0 _ZNSt18condition_variable4waitERSt11unique_lockISt5mutexE (libstdc++.so.6 + 0xd56c0)
                                                    #3  0x00007f065af0c7f8 _ZN15CommonSafeTimerISt5mutexE12timer_threadEv (libceph-common.so.2 + 0x25e7f8)
                                                    #4  0x00007f065af0cf81 _ZN21CommonSafeTimerThreadISt5mutexE5entryEv (libceph-common.so.2 + 0x25ef81)
                                                    #5  0x00007f065dd2e2fa start_thread (libc.so.6 + 0x8b2fa)
                                                    #6  0x00007f065ddb3540 __clone3 (libc.so.6 + 0x110540)
                                                    
                                                    Stack trace of thread 762:
                                                    #0  0x00007f065ddb2b7e epoll_wait (libc.so.6 + 0x10fb7e)
                                                    #1  0x00007f065b0ee618 _ZN11EpollDriver10event_waitERSt6vectorI14FiredFileEventSaIS1_EEP7timeval (libceph-common.so.2 + 0x440618)
                                                    #2  0x00007f065b0ec702 _ZN11EventCenter14process_eventsEjPNSt6chrono8durationImSt5ratioILl1ELl1000000000EEEE (libceph-common.so.2 + 0x43e702)
                                                    #3  0x00007f065b0ed2c6 _ZNSt17_Function_handlerIFvvEZN12NetworkStack10add_threadEP6WorkerEUlvE_E9_M_invokeERKSt9_Any_data (libceph-common.so.2 + 0x43f2c6)
                                                    #4  0x00007f065a59dae4 execute_native_thread_routine (libstdc++.so.6 + 0xdbae4)
                                                    #5  0x00007f065dd2e2fa start_thread (libc.so.6 + 0x8b2fa)
                                                    #6  0x00007f065ddb3540 __clone3 (libc.so.6 + 0x110540)
                                                    
                                                    Stack trace of thread 768:
                                                    #0  0x00007f065dd2b38a __futex_abstimed_wait_common (libc.so.6 + 0x8838a)
                                                    #1  0x00007f065dd2d8e2 pthread_cond_wait@@GLIBC_2.3.2 (libc.so.6 + 0x8a8e2)
                                                    #2  0x00007f065b890266 _ZN5boost4asio6detail9scheduler3runERNS_6system10error_codeE.constprop.0.isra.0 (librados.so.2 + 0x127266)
                                                    #3  0x00007f065b82f4e4 _ZNSt6thread11_State_implINS_8_InvokerISt5tupleIJZ17make_named_threadIZN4ceph5async15io_context_pool5startEsEUlvE_JEES_St17basic_string_viewIcSt11char_traitsIcEEOT_DpOT0_EUlSD_SG_E_S7_EEEEE6_M_runEv (librados.so.2 + 0xc64e4)
                                                    #4  0x00007f065a59dae4 execute_native_thread_routine (libstdc++.so.6 + 0xdbae4)
                                                    #5  0x00007f065dd2e2fa start_thread (libc.so.6 + 0x8b2fa)
                                                    #6  0x00007f065ddb3540 __clone3 (libc.so.6 + 0x110540)
                                                    
                                                    Stack trace of thread 774:
                                                    #0  0x00007f065dd2b38a __futex_abstimed_wait_common (libc.so.6 + 0x8838a)
                                                    #1  0x00007f065dd2d8e2 pthread_cond_wait@@GLIBC_2.3.2 (libc.so.6 + 0x8a8e2)
                                                    #2  0x00007f065a5976c0 _ZNSt18condition_variable4waitERSt11unique_lockISt5mutexE (libstdc++.so.6 + 0xd56c0)
                                                    #3  0x00007f065af0c7f8 _ZN15CommonSafeTimerISt5mutexE12timer_threadEv (libceph-common.so.2 + 0x25e7f8)
                                                    #4  0x00007f065af0cf81 _ZN21CommonSafeTimerThreadISt5mutexE5entryEv (libceph-common.so.2 + 0x25ef81)
                                                    #5  0x00007f065dd2e2fa start_thread (libc.so.6 + 0x8b2fa)
                                                    #6  0x00007f065ddb3540 __clone3 (libc.so.6 + 0x110540)
                                                    
                                                    Stack trace of thread 760:
                                                    #0  0x00007f065ddb2b7e epoll_wait (libc.so.6 + 0x10fb7e)
                                                    #1  0x00007f065b0ee618 _ZN11EpollDriver10event_waitERSt6vectorI14FiredFileEventSaIS1_EEP7timeval (libceph-common.so.2 + 0x440618)
                                                    #2  0x00007f065b0ec702 _ZN11EventCenter14process_eventsEjPNSt6chrono8durationImSt5ratioILl1ELl1000000000EEEE (libceph-common.so.2 + 0x43e702)
                                                    #3  0x00007f065b0ed2c6 _ZNSt17_Function_handlerIFvvEZN12NetworkStack10add_threadEP6WorkerEUlvE_E9_M_invokeERKSt9_Any_data (libceph-common.so.2 + 0x43f2c6)
                                                    #4  0x00007f065a59dae4 execute_native_thread_routine (libstdc++.so.6 + 0xdbae4)
                                                    #5  0x00007f065dd2e2fa start_thread (libc.so.6 + 0x8b2fa)
                                                    #6  0x00007f065ddb3540 __clone3 (libc.so.6 + 0x110540)
                                                    
                                                    Stack trace of thread 770:
                                                    #0  0x00007f065dd2b38a __futex_abstimed_wait_common (libc.so.6 + 0x8838a)
                                                    #1  0x00007f065dd2d8e2 pthread_cond_wait@@GLIBC_2.3.2 (libc.so.6 + 0x8a8e2)
                                                    #2  0x00007f065a5976c0 _ZNSt18condition_variable4waitERSt11unique_lockISt5mutexE (libstdc++.so.6 + 0xd56c0)
                                                    #3  0x00007f065b01549f _ZN13DispatchQueue5entryEv (libceph-common.so.2 + 0x36749f)
                                                    #4  0x00007f065b0a6411 _ZN13DispatchQueue14DispatchThread5entryEv (libceph-common.so.2 + 0x3f8411)
                                                    #5  0x00007f065dd2e2fa start_thread (libc.so.6 + 0x8b2fa)
                                                    #6  0x00007f065ddb3540 __clone3 (libc.so.6 + 0x110540)
                                                    
                                                    Stack trace of thread 772:
                                                    #0  0x00007f065dd2b38a __futex_abstimed_wait_common (libc.so.6 + 0x8838a)
                                                    #1  0x00007f065dd2dcc0 pthread_cond_clockwait@GLIBC_2.30 (libc.so.6 + 0x8acc0)
                                                    #2  0x00007f065af0cb23 _ZN15CommonSafeTimerISt5mutexE12timer_threadEv (libceph-common.so.2 + 0x25eb23)
                                                    #3  0x00007f065af0cf81 _ZN21CommonSafeTimerThreadISt5mutexE5entryEv (libceph-common.so.2 + 0x25ef81)
                                                    #4  0x00007f065dd2e2fa start_thread (libc.so.6 + 0x8b2fa)
                                                    #5  0x00007f065ddb3540 __clone3 (libc.so.6 + 0x110540)
                                                    ELF object binary architecture: AMD x86-64

● libpod-65c3a3d72e1cba3c83fc771a841564f690b47cc0f5012ce0acf16e2d9f8e3fe2.scope - libcrun container
     Loaded: loaded (/run/systemd/transient/libpod-65c3a3d72e1cba3c83fc771a841564f690b47cc0f5012ce0acf16e2d9f8e3fe2.scope; transient)
  Transient: yes
    Drop-In: /run/systemd/transient/libpod-65c3a3d72e1cba3c83fc771a841564f690b47cc0f5012ce0acf16e2d9f8e3fe2.scope.d
             └─dep.conf
     Active: active (running) since Sat 2025-10-11 03:30:02 UTC; 40min ago
         IO: 7.6M read, 4.0K written
      Tasks: 6 (limit: 4096)
     Memory: 20.8M (peak: 27.3M)
        CPU: 8.438s
     CGroup: /machine.slice/libpod-65c3a3d72e1cba3c83fc771a841564f690b47cc0f5012ce0acf16e2d9f8e3fe2.scope
             └─container
               ├─152669 dumb-init --single-child -- kolla_start
               └─152684 /usr/bin/ovn-controller --pidfile unix:/run/openvswitch/db.sock -p /etc/pki/tls/private/ovndb.key -c /etc/pki/tls/certs/ovndb.crt -C /etc/pki/tls/certs/ovndbca.crt

Oct 11 03:30:02 compute-0 systemd[1]: Started libcrun container.

● libpod-8b003d65c8e439e280409825aa37dacfb921ffdd0ada54278b9746654fdc0aa8.scope - libcrun container
     Loaded: loaded (/run/systemd/transient/libpod-8b003d65c8e439e280409825aa37dacfb921ffdd0ada54278b9746654fdc0aa8.scope; transient)
  Transient: yes
     Active: active (running) since Sat 2025-10-11 03:37:25 UTC; 33min ago
         IO: 96.0K read, 4.0K written
      Tasks: 8 (limit: 4096)
     Memory: 19.1M (peak: 21.0M)
        CPU: 1.471s
     CGroup: /machine.slice/libpod-8b003d65c8e439e280409825aa37dacfb921ffdd0ada54278b9746654fdc0aa8.scope
             └─container
               ├─241114 dumb-init --single-child -- kolla_start
               └─241117 /usr/sbin/multipathd -d

Oct 11 03:37:25 compute-0 systemd[1]: Started libcrun container.
Oct 11 03:37:25 compute-0 sudo[241118]:     root : PWD=/ ; USER=root ; COMMAND=/usr/local/bin/kolla_set_configs
Oct 11 03:37:25 compute-0 sudo[241118]: pam_systemd(sudo:session): Failed to connect to system bus: No such file or directory
Oct 11 03:37:25 compute-0 sudo[241118]: pam_unix(sudo:session): session opened for user root(uid=0) by (uid=0)
Oct 11 03:37:25 compute-0 sudo[241118]: pam_unix(sudo:session): session closed for user root
Oct 11 03:37:25 compute-0 sudo[241140]:     root : PWD=/ ; USER=root ; COMMAND=/usr/local/bin/kolla_copy_cacerts
Oct 11 03:37:25 compute-0 sudo[241140]: pam_systemd(sudo:session): Failed to connect to system bus: No such file or directory
Oct 11 03:37:25 compute-0 sudo[241140]: pam_unix(sudo:session): session opened for user root(uid=0) by (uid=0)
Oct 11 03:37:25 compute-0 sudo[241140]: pam_unix(sudo:session): session closed for user root

● libpod-dd5285a58cbe29a90687a00af14b934b599bb4de55df5857e4d7b7ac9f22feab.scope - libcrun container
     Loaded: loaded (/run/systemd/transient/libpod-dd5285a58cbe29a90687a00af14b934b599bb4de55df5857e4d7b7ac9f22feab.scope; transient)
  Transient: yes
    Drop-In: /run/systemd/transient/libpod-dd5285a58cbe29a90687a00af14b934b599bb4de55df5857e4d7b7ac9f22feab.scope.d
             └─dep.conf
     Active: active (running) since Sat 2025-10-11 03:31:08 UTC; 39min ago
         IO: 16.0M read, 16.9M written
      Tasks: 11 (limit: 4096)
     Memory: 437.4M (peak: 480.9M)
        CPU: 49.475s
     CGroup: /machine.slice/libpod-dd5285a58cbe29a90687a00af14b934b599bb4de55df5857e4d7b7ac9f22feab.scope
             └─container
               ├─162242 dumb-init --single-child -- kolla_start
               ├─162245 "neutron-ovn-metadata-agent (/usr/bin/python3 /usr/bin/neutron-ovn-metadata-agent)"
               ├─162583 "neutron-ovn-metadata-agent (/usr/bin/python3 /usr/bin/neutron-ovn-metadata-agent)"
               ├─162666 /usr/bin/python3 /bin/privsep-helper --config-file /etc/neutron/neutron.conf --config-dir /etc/neutron.conf.d --privsep_context neutron.privileged.namespace_cmd --privsep_sock_path /tmp/tmpf884qsmb/privsep.sock
               ├─267859 /usr/bin/python3 /bin/privsep-helper --config-file /etc/neutron/neutron.conf --config-dir /etc/neutron.conf.d --privsep_context neutron.privileged.default --privsep_sock_path /tmp/tmph48b70lz/privsep.sock
               └─268023 /usr/bin/python3 /bin/privsep-helper --config-file /etc/neutron/neutron.conf --config-dir /etc/neutron.conf.d --privsep_context neutron.privileged.link_cmd --privsep_sock_path /tmp/tmp8g8ifqko/privsep.sock

Oct 11 04:07:22 compute-0 podman[306181]: 2025-10-11 04:07:22.392065819 +0000 UTC m=+0.049159711 container died 981af86b39fd8e33172fc3f662f1bad99543554a0b9125e6022f16e42ea55e03 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=neutron-haproxy-ovnmeta-abadcf46-9a41-4911-85e0-fbcde2d48b79, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, tcib_managed=true, io.buildah.version=1.41.3, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 04:07:22 compute-0 podman[306181]: 2025-10-11 04:07:22.442793086 +0000 UTC m=+0.099886978 container cleanup 981af86b39fd8e33172fc3f662f1bad99543554a0b9125e6022f16e42ea55e03 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=neutron-haproxy-ovnmeta-abadcf46-9a41-4911-85e0-fbcde2d48b79, maintainer=OpenStack Kubernetes Operator team, org.label-schema.license=GPLv2, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, org.label-schema.build-date=20251009, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, io.buildah.version=1.41.3)
Oct 11 04:07:22 compute-0 podman[306217]: 2025-10-11 04:07:22.511467809 +0000 UTC m=+0.045173419 container remove 981af86b39fd8e33172fc3f662f1bad99543554a0b9125e6022f16e42ea55e03 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=neutron-haproxy-ovnmeta-abadcf46-9a41-4911-85e0-fbcde2d48b79, maintainer=OpenStack Kubernetes Operator team, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, org.label-schema.build-date=20251009, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.41.3)
Oct 11 04:07:42 compute-0 podman[306564]: 2025-10-11 04:07:42.082271752 +0000 UTC m=+0.051552107 container create d829d03ab28983488ced206da705bf8a9cc88efbc2dfc4a5cfce244aab6eb513 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=neutron-haproxy-ovnmeta-abadcf46-9a41-4911-85e0-fbcde2d48b79, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, org.label-schema.build-date=20251009, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, tcib_managed=true, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS)
Oct 11 04:07:42 compute-0 podman[306564]: 2025-10-11 04:07:42.051415406 +0000 UTC m=+0.020695771 image pull 1061e4fafe13e0b9aa1ef2c904ba4ad70c44f3e87b1d831f16c6db34937f4022 quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified
Oct 11 04:07:42 compute-0 podman[306564]: 2025-10-11 04:07:42.172077744 +0000 UTC m=+0.141358119 container init d829d03ab28983488ced206da705bf8a9cc88efbc2dfc4a5cfce244aab6eb513 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=neutron-haproxy-ovnmeta-abadcf46-9a41-4911-85e0-fbcde2d48b79, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, maintainer=OpenStack Kubernetes Operator team, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.41.3, org.label-schema.license=GPLv2, tcib_managed=true, org.label-schema.build-date=20251009)
Oct 11 04:07:42 compute-0 podman[306564]: 2025-10-11 04:07:42.177141692 +0000 UTC m=+0.146422047 container start d829d03ab28983488ced206da705bf8a9cc88efbc2dfc4a5cfce244aab6eb513 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=neutron-haproxy-ovnmeta-abadcf46-9a41-4911-85e0-fbcde2d48b79, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, io.buildah.version=1.41.3, org.label-schema.build-date=20251009, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, maintainer=OpenStack Kubernetes Operator team)
Oct 11 04:08:21 compute-0 podman[307684]: 2025-10-11 04:08:21.304101595 +0000 UTC m=+0.052002817 container died d829d03ab28983488ced206da705bf8a9cc88efbc2dfc4a5cfce244aab6eb513 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=neutron-haproxy-ovnmeta-abadcf46-9a41-4911-85e0-fbcde2d48b79, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.41.3, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, org.label-schema.schema-version=1.0, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251009)
Oct 11 04:08:21 compute-0 podman[307684]: 2025-10-11 04:08:21.346021427 +0000 UTC m=+0.093922649 container cleanup d829d03ab28983488ced206da705bf8a9cc88efbc2dfc4a5cfce244aab6eb513 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=neutron-haproxy-ovnmeta-abadcf46-9a41-4911-85e0-fbcde2d48b79, io.buildah.version=1.41.3, org.label-schema.license=GPLv2, org.label-schema.build-date=20251009, org.label-schema.name=CentOS Stream 9 Base Image, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, maintainer=OpenStack Kubernetes Operator team, tcib_managed=true)
Oct 11 04:08:21 compute-0 podman[307737]: 2025-10-11 04:08:21.415392116 +0000 UTC m=+0.046958410 container remove d829d03ab28983488ced206da705bf8a9cc88efbc2dfc4a5cfce244aab6eb513 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=neutron-haproxy-ovnmeta-abadcf46-9a41-4911-85e0-fbcde2d48b79, maintainer=OpenStack Kubernetes Operator team, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, org.label-schema.build-date=20251009, io.buildah.version=1.41.3, org.label-schema.schema-version=1.0)

● libpod-f2b19cad22d0fbdd185b264c3c8b6443d09a02319dc9fb0585dc69a0f24a758d.scope - libcrun container
     Loaded: loaded (/run/systemd/transient/libpod-f2b19cad22d0fbdd185b264c3c8b6443d09a02319dc9fb0585dc69a0f24a758d.scope; transient)
  Transient: yes
     Active: active (running) since Sat 2025-10-11 03:36:30 UTC; 34min ago
         IO: 6.7M read, 0B written
      Tasks: 2 (limit: 4096)
     Memory: 9.1M (peak: 11.2M)
        CPU: 1.013s
     CGroup: /machine.slice/libpod-f2b19cad22d0fbdd185b264c3c8b6443d09a02319dc9fb0585dc69a0f24a758d.scope
             └─container
               ├─230879 dumb-init --single-child -- kolla_start
               └─230882 /usr/sbin/iscsid -f

Oct 11 03:36:30 compute-0 systemd[1]: Started libcrun container.
Oct 11 03:36:30 compute-0 sudo[230884]:     root : PWD=/ ; USER=root ; COMMAND=/usr/local/bin/kolla_set_configs
Oct 11 03:36:30 compute-0 sudo[230884]: pam_unix(sudo:session): session opened for user root(uid=0) by (uid=0)
Oct 11 03:36:30 compute-0 sudo[230884]: pam_unix(sudo:session): session closed for user root
Oct 11 03:36:30 compute-0 sudo[231068]:     root : PWD=/ ; USER=root ; COMMAND=/usr/local/bin/kolla_copy_cacerts
Oct 11 03:36:30 compute-0 sudo[231068]: pam_unix(sudo:session): session opened for user root(uid=0) by (uid=0)
Oct 11 03:36:30 compute-0 sudo[231068]: pam_unix(sudo:session): session closed for user root

● session-1.scope - Session 1 of User zuul
     Loaded: loaded (/run/systemd/transient/session-1.scope; transient)
  Transient: yes
     Active: active (abandoned) since Sat 2025-10-11 02:42:36 UTC; 1h 28min ago
         IO: 0B read, 0B written
      Tasks: 1
     Memory: 17.8M (peak: 39.4M)
        CPU: 1min 18.274s
     CGroup: /user.slice/user-1000.slice/session-1.scope
             └─1267 /usr/bin/python3

Oct 11 02:45:27 np0005480824.novalocal sudo[4120]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:45:27 np0005480824.novalocal python3[4122]: ansible-ansible.legacy.stat Invoked with path=/etc/ci/env/networking-info.yml follow=False get_checksum=True checksum_algorithm=sha1 get_md5=False get_mime=True get_attributes=True
Oct 11 02:45:27 np0005480824.novalocal sudo[4120]: pam_unix(sudo:session): session closed for user root
Oct 11 02:45:28 np0005480824.novalocal sudo[4193]:     zuul : PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-tamplxyrloaukgintlwfnmaxhabgvdug ; OS_CLOUD=vexxhost /usr/bin/python3'
Oct 11 02:45:28 np0005480824.novalocal sudo[4193]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 02:45:28 np0005480824.novalocal python3[4195]: ansible-ansible.legacy.copy Invoked with dest=/etc/ci/env/networking-info.yml owner=root group=root mode=0644 src=/home/zuul/.ansible/tmp/ansible-tmp-1760150727.676546-267-154807508792448/source _original_basename=tmp304vlwku follow=False checksum=2553e50585cc5f9697aa07838c8832962da24a3c backup=False force=True unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None remote_src=None local_follow=None seuser=None serole=None selevel=None setype=None attributes=None
Oct 11 02:45:28 np0005480824.novalocal sudo[4193]: pam_unix(sudo:session): session closed for user root
Oct 11 02:46:28 np0005480824.novalocal sshd-session[1066]: Received disconnect from 38.102.83.114 port 47536:11: disconnected by user
Oct 11 02:46:28 np0005480824.novalocal sshd-session[1066]: Disconnected from user zuul 38.102.83.114 port 47536
Oct 11 02:46:28 np0005480824.novalocal sshd-session[1053]: pam_unix(sshd:session): session closed for user zuul

● session-21.scope - Session 21 of User ceph-admin
     Loaded: loaded (/run/systemd/transient/session-21.scope; transient)
  Transient: yes
     Active: active (running) since Sat 2025-10-11 03:19:33 UTC; 51min ago
         IO: 0B read, 0B written
      Tasks: 2
     Memory: 1.1M (peak: 1.3M)
        CPU: 124ms
     CGroup: /user.slice/user-42477.slice/session-21.scope
             ├─75886 "sshd-session: ceph-admin [priv]"
             └─75908 "sshd-session: ceph-admin"

Oct 11 03:19:33 compute-0 systemd[1]: Started Session 21 of User ceph-admin.

● session-23.scope - Session 23 of User ceph-admin
     Loaded: loaded (/run/systemd/transient/session-23.scope; transient)
  Transient: yes
     Active: active (running) since Sat 2025-10-11 03:19:33 UTC; 51min ago
         IO: 0B read, 0B written
      Tasks: 2
     Memory: 1.1M (peak: 4.0M)
        CPU: 249ms
     CGroup: /user.slice/user-42477.slice/session-23.scope
             ├─75893 "sshd-session: ceph-admin [priv]"
             └─75909 "sshd-session: ceph-admin@notty"

Oct 11 03:19:33 compute-0 systemd[1]: Started Session 23 of User ceph-admin.
Oct 11 03:19:34 compute-0 sudo[75910]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 03:19:34 compute-0 sudo[75910]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 03:19:34 compute-0 sudo[75910]: pam_unix(sudo:session): session closed for user root
Oct 11 03:19:34 compute-0 sudo[75935]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Oct 11 03:19:34 compute-0 sudo[75935]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 03:19:34 compute-0 sudo[75935]: pam_unix(sudo:session): session closed for user root

● session-24.scope - Session 24 of User ceph-admin
     Loaded: loaded (/run/systemd/transient/session-24.scope; transient)
  Transient: yes
     Active: active (running) since Sat 2025-10-11 03:19:34 UTC; 51min ago
         IO: 0B read, 0B written
      Tasks: 2
     Memory: 1.1M (peak: 5.8M)
        CPU: 281ms
     CGroup: /user.slice/user-42477.slice/session-24.scope
             ├─75960 "sshd-session: ceph-admin [priv]"
             └─75963 "sshd-session: ceph-admin@notty"

Oct 11 03:19:34 compute-0 systemd[1]: Started Session 24 of User ceph-admin.
Oct 11 03:19:34 compute-0 sudo[75964]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 03:19:34 compute-0 sudo[75964]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 03:19:34 compute-0 sudo[75964]: pam_unix(sudo:session): session closed for user root
Oct 11 03:19:34 compute-0 sudo[75989]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/92cfe4d4-4917-5be1-9d00-73758793a62b/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --timeout 895 check-host --expect-hostname compute-0
Oct 11 03:19:34 compute-0 sudo[75989]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 03:19:34 compute-0 sudo[75989]: pam_unix(sudo:session): session closed for user root

● session-25.scope - Session 25 of User ceph-admin
     Loaded: loaded (/run/systemd/transient/session-25.scope; transient)
  Transient: yes
     Active: active (running) since Sat 2025-10-11 03:19:34 UTC; 51min ago
         IO: 0B read, 0B written
      Tasks: 2
     Memory: 1.1M (peak: 4.2M)
        CPU: 280ms
     CGroup: /user.slice/user-42477.slice/session-25.scope
             ├─76014 "sshd-session: ceph-admin [priv]"
             └─76017 "sshd-session: ceph-admin@notty"

Oct 11 03:19:34 compute-0 systemd[1]: Started Session 25 of User ceph-admin.
Oct 11 03:19:34 compute-0 sudo[76018]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 03:19:34 compute-0 sudo[76018]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 03:19:34 compute-0 sudo[76018]: pam_unix(sudo:session): session closed for user root
Oct 11 03:19:35 compute-0 sudo[76043]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/ls /var/lib/ceph/92cfe4d4-4917-5be1-9d00-73758793a62b/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d
Oct 11 03:19:35 compute-0 sudo[76043]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 03:19:35 compute-0 sudo[76043]: pam_unix(sudo:session): session closed for user root

● session-26.scope - Session 26 of User ceph-admin
     Loaded: loaded (/run/systemd/transient/session-26.scope; transient)
  Transient: yes
     Active: active (running) since Sat 2025-10-11 03:19:35 UTC; 51min ago
         IO: 0B read, 0B written
      Tasks: 2
     Memory: 1.1M (peak: 4.1M)
        CPU: 242ms
     CGroup: /user.slice/user-42477.slice/session-26.scope
             ├─76068 "sshd-session: ceph-admin [priv]"
             └─76071 "sshd-session: ceph-admin@notty"

Oct 11 03:19:35 compute-0 systemd[1]: Started Session 26 of User ceph-admin.
Oct 11 03:19:35 compute-0 sudo[76072]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 03:19:35 compute-0 sudo[76072]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 03:19:35 compute-0 sudo[76072]: pam_unix(sudo:session): session closed for user root
Oct 11 03:19:35 compute-0 sudo[76097]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/mkdir -p /var/lib/ceph/92cfe4d4-4917-5be1-9d00-73758793a62b
Oct 11 03:19:35 compute-0 sudo[76097]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 03:19:35 compute-0 sudo[76097]: pam_unix(sudo:session): session closed for user root

● session-27.scope - Session 27 of User ceph-admin
     Loaded: loaded (/run/systemd/transient/session-27.scope; transient)
  Transient: yes
     Active: active (running) since Sat 2025-10-11 03:19:35 UTC; 51min ago
         IO: 0B read, 0B written
      Tasks: 2
     Memory: 1.1M (peak: 4.1M)
        CPU: 269ms
     CGroup: /user.slice/user-42477.slice/session-27.scope
             ├─76122 "sshd-session: ceph-admin [priv]"
             └─76125 "sshd-session: ceph-admin@notty"

Oct 11 03:19:35 compute-0 systemd[1]: Started Session 27 of User ceph-admin.
Oct 11 03:19:35 compute-0 sudo[76126]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 03:19:35 compute-0 sudo[76126]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 03:19:35 compute-0 sudo[76126]: pam_unix(sudo:session): session closed for user root
Oct 11 03:19:35 compute-0 sudo[76151]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/mkdir -p /tmp/cephadm-92cfe4d4-4917-5be1-9d00-73758793a62b/var/lib/ceph/92cfe4d4-4917-5be1-9d00-73758793a62b
Oct 11 03:19:35 compute-0 sudo[76151]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 03:19:35 compute-0 sudo[76151]: pam_unix(sudo:session): session closed for user root

● session-28.scope - Session 28 of User ceph-admin
     Loaded: loaded (/run/systemd/transient/session-28.scope; transient)
  Transient: yes
     Active: active (running) since Sat 2025-10-11 03:19:36 UTC; 51min ago
         IO: 0B read, 0B written
      Tasks: 2
     Memory: 1.1M (peak: 4.2M)
        CPU: 242ms
     CGroup: /user.slice/user-42477.slice/session-28.scope
             ├─76176 "sshd-session: ceph-admin [priv]"
             └─76179 "sshd-session: ceph-admin@notty"

Oct 11 03:19:36 compute-0 systemd[1]: Started Session 28 of User ceph-admin.
Oct 11 03:19:36 compute-0 sudo[76180]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 03:19:36 compute-0 sudo[76180]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 03:19:36 compute-0 sudo[76180]: pam_unix(sudo:session): session closed for user root
Oct 11 03:19:36 compute-0 sudo[76205]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/touch /tmp/cephadm-92cfe4d4-4917-5be1-9d00-73758793a62b/var/lib/ceph/92cfe4d4-4917-5be1-9d00-73758793a62b/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d.new
Oct 11 03:19:36 compute-0 sudo[76205]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 03:19:36 compute-0 sudo[76205]: pam_unix(sudo:session): session closed for user root

● session-29.scope - Session 29 of User ceph-admin
     Loaded: loaded (/run/systemd/transient/session-29.scope; transient)
  Transient: yes
     Active: active (running) since Sat 2025-10-11 03:19:36 UTC; 51min ago
         IO: 0B read, 0B written
      Tasks: 2
     Memory: 1.1M (peak: 4.2M)
        CPU: 263ms
     CGroup: /user.slice/user-42477.slice/session-29.scope
             ├─76230 "sshd-session: ceph-admin [priv]"
             └─76233 "sshd-session: ceph-admin@notty"

Oct 11 03:19:36 compute-0 systemd[1]: Started Session 29 of User ceph-admin.
Oct 11 03:19:36 compute-0 sudo[76234]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 03:19:36 compute-0 sudo[76234]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 03:19:36 compute-0 sudo[76234]: pam_unix(sudo:session): session closed for user root
Oct 11 03:19:36 compute-0 sudo[76259]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/chown -R ceph-admin /tmp/cephadm-92cfe4d4-4917-5be1-9d00-73758793a62b
Oct 11 03:19:36 compute-0 sudo[76259]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 03:19:36 compute-0 sudo[76259]: pam_unix(sudo:session): session closed for user root

● session-30.scope - Session 30 of User ceph-admin
     Loaded: loaded (/run/systemd/transient/session-30.scope; transient)
  Transient: yes
     Active: active (running) since Sat 2025-10-11 03:19:36 UTC; 51min ago
         IO: 0B read, 0B written
      Tasks: 2
     Memory: 1.1M (peak: 4.2M)
        CPU: 276ms
     CGroup: /user.slice/user-42477.slice/session-30.scope
             ├─76284 "sshd-session: ceph-admin [priv]"
             └─76287 "sshd-session: ceph-admin@notty"

Oct 11 03:19:36 compute-0 systemd[1]: Started Session 30 of User ceph-admin.
Oct 11 03:19:37 compute-0 sudo[76288]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 03:19:37 compute-0 sudo[76288]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 03:19:37 compute-0 sudo[76288]: pam_unix(sudo:session): session closed for user root
Oct 11 03:19:37 compute-0 sudo[76313]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/chmod 644 /tmp/cephadm-92cfe4d4-4917-5be1-9d00-73758793a62b/var/lib/ceph/92cfe4d4-4917-5be1-9d00-73758793a62b/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d.new
Oct 11 03:19:37 compute-0 sudo[76313]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 03:19:37 compute-0 sudo[76313]: pam_unix(sudo:session): session closed for user root

● session-31.scope - Session 31 of User ceph-admin
     Loaded: loaded (/run/systemd/transient/session-31.scope; transient)
  Transient: yes
     Active: active (running) since Sat 2025-10-11 03:19:37 UTC; 51min ago
         IO: 0B read, 216.0K written
      Tasks: 2
     Memory: 1.4M (peak: 3.8M)
        CPU: 188ms
     CGroup: /user.slice/user-42477.slice/session-31.scope
             ├─76338 "sshd-session: ceph-admin [priv]"
             └─76341 "sshd-session: ceph-admin@notty"

Oct 11 03:19:37 compute-0 systemd[1]: Started Session 31 of User ceph-admin.

● session-32.scope - Session 32 of User ceph-admin
     Loaded: loaded (/run/systemd/transient/session-32.scope; transient)
  Transient: yes
     Active: active (running) since Sat 2025-10-11 03:19:38 UTC; 50min ago
         IO: 144.0K read, 0B written
      Tasks: 2
     Memory: 1.3M (peak: 4.2M)
        CPU: 288ms
     CGroup: /user.slice/user-42477.slice/session-32.scope
             ├─76365 "sshd-session: ceph-admin [priv]"
             └─76368 "sshd-session: ceph-admin@notty"

Oct 11 03:19:38 compute-0 systemd[1]: Started Session 32 of User ceph-admin.
Oct 11 03:19:38 compute-0 sudo[76369]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 03:19:38 compute-0 sudo[76369]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 03:19:38 compute-0 sudo[76369]: pam_unix(sudo:session): session closed for user root
Oct 11 03:19:38 compute-0 sudo[76394]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/mv /tmp/cephadm-92cfe4d4-4917-5be1-9d00-73758793a62b/var/lib/ceph/92cfe4d4-4917-5be1-9d00-73758793a62b/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d.new /var/lib/ceph/92cfe4d4-4917-5be1-9d00-73758793a62b/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d
Oct 11 03:19:38 compute-0 sudo[76394]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 03:19:38 compute-0 sudo[76394]: pam_unix(sudo:session): session closed for user root

● session-33.scope - Session 33 of User ceph-admin
     Loaded: loaded (/run/systemd/transient/session-33.scope; transient)
  Transient: yes
     Active: active (running) since Sat 2025-10-11 03:19:38 UTC; 50min ago
         IO: 2.7M read, 170.2M written
      Tasks: 2
     Memory: 7.3M (peak: 71.5M)
        CPU: 4min 33.342s
     CGroup: /user.slice/user-42477.slice/session-33.scope
             ├─76419 "sshd-session: ceph-admin [priv]"
             └─76422 "sshd-session: ceph-admin@notty"

Oct 11 04:10:14 compute-0 podman[312731]: 2025-10-11 04:10:14.216724725 +0000 UTC m=+0.255550979 container attach 62934323c7dacd31d940284e1901d7f46aeff7a5d1c2a6cf475415517bf1f78a (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=loving_cannon, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 04:10:15 compute-0 podman[312731]: 2025-10-11 04:10:15.227679315 +0000 UTC m=+1.266505519 container died 62934323c7dacd31d940284e1901d7f46aeff7a5d1c2a6cf475415517bf1f78a (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=loving_cannon, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default)
Oct 11 04:10:15 compute-0 podman[312731]: 2025-10-11 04:10:15.406281947 +0000 UTC m=+1.445108131 container remove 62934323c7dacd31d940284e1901d7f46aeff7a5d1c2a6cf475415517bf1f78a (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=loving_cannon, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, CEPH_REF=reef, ceph=True, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 04:10:15 compute-0 sudo[312558]: pam_unix(sudo:session): session closed for user root
Oct 11 04:10:15 compute-0 sudo[312933]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 04:10:15 compute-0 sudo[312933]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 04:10:15 compute-0 sudo[312933]: pam_unix(sudo:session): session closed for user root
Oct 11 04:10:15 compute-0 sudo[312966]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/ls /etc/sysctl.d
Oct 11 04:10:15 compute-0 sudo[312966]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 04:10:15 compute-0 sudo[312966]: pam_unix(sudo:session): session closed for user root

● session-53.scope - Session 53 of User zuul
     Loaded: loaded (/run/systemd/transient/session-53.scope; transient)
  Transient: yes
     Active: active (running) since Sat 2025-10-11 04:09:47 UTC; 50s ago
         IO: 495.8M read, 304.2M written
      Tasks: 19
     Memory: 966.8M (peak: 1004.4M)
        CPU: 2min 3.759s
     CGroup: /user.slice/user-1000.slice/session-53.scope
             ├─308926 "sshd-session: zuul [priv]"
             ├─308929 "sshd-session: zuul@notty"
             ├─308930 sudo bash -c "rm -rf /var/tmp/sos-osp && mkdir /var/tmp/sos-osp && sos report --batch --all-logs --tmp-dir=/var/tmp/sos-osp -p container,openstack_edpm,system,storage,virt"
             ├─308954 /usr/bin/python3 -s /sbin/sos report --batch --all-logs --tmp-dir=/var/tmp/sos-osp -p container,openstack_edpm,system,storage,virt
             ├─315707 timeout 15s turbostat --debug sleep 10
             ├─316407 timeout 300s systemctl status --all
             ├─316408 systemctl status --all
             ├─316433 timeout 300s semanage boolean -l
             ├─316434 /usr/bin/python3 -EsI /usr/sbin/semanage boolean -l
             ├─316447 timeout --foreground 300s virsh -r nodedev-dumpxml net_vlan21_4a_8b_1f_3a_14_d4
             └─316448 virsh -r nodedev-dumpxml net_vlan21_4a_8b_1f_3a_14_d4

Oct 11 04:09:47 compute-0 systemd[1]: Started Session 53 of User zuul.
Oct 11 04:09:47 compute-0 sudo[308930]:     zuul : PWD=/home/zuul ; USER=root ; COMMAND=/bin/bash -c 'rm -rf /var/tmp/sos-osp && mkdir /var/tmp/sos-osp && sos report --batch --all-logs --tmp-dir=/var/tmp/sos-osp -p container,openstack_edpm,system,storage,virt'
Oct 11 04:09:47 compute-0 sudo[308930]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 04:09:57 compute-0 ovs-vsctl[309342]: ovs|00001|db_ctl_base|ERR|no key "dpdk-init" in Open_vSwitch record "." column other_config

○ 65c3a3d72e1cba3c83fc771a841564f690b47cc0f5012ce0acf16e2d9f8e3fe2-d9f1cc3a1b07bd0.service - /usr/bin/podman healthcheck run 65c3a3d72e1cba3c83fc771a841564f690b47cc0f5012ce0acf16e2d9f8e3fe2
     Loaded: loaded (/run/systemd/transient/65c3a3d72e1cba3c83fc771a841564f690b47cc0f5012ce0acf16e2d9f8e3fe2-d9f1cc3a1b07bd0.service; transient)
  Transient: yes
     Active: inactive (dead) since Sat 2025-10-11 04:10:33 UTC; 4s ago
   Duration: 99ms
TriggeredBy: ● 65c3a3d72e1cba3c83fc771a841564f690b47cc0f5012ce0acf16e2d9f8e3fe2-d9f1cc3a1b07bd0.timer
    Process: 316049 ExecStart=/usr/bin/podman healthcheck run 65c3a3d72e1cba3c83fc771a841564f690b47cc0f5012ce0acf16e2d9f8e3fe2 (code=exited, status=0/SUCCESS)
   Main PID: 316049 (code=exited, status=0/SUCCESS)
        CPU: 81ms

Oct 11 04:10:33 compute-0 podman[316049]: 2025-10-11 04:10:33.020474025 +0000 UTC m=+0.080446587 container health_status 65c3a3d72e1cba3c83fc771a841564f690b47cc0f5012ce0acf16e2d9f8e3fe2 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, maintainer=OpenStack Kubernetes Operator team, org.label-schema.schema-version=1.0, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, io.buildah.version=1.41.3, managed_by=edpm_ansible, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, config_id=ovn_controller, container_name=ovn_controller, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS)

○ 8b003d65c8e439e280409825aa37dacfb921ffdd0ada542Unit apparmor.service could not be found.
Unit apt-daily.service could not be found.
Unit auto-cpufreq.service could not be found.
Unit autofs.service could not be found.
78b9746654fdc0aa8-5d762caefe58de62.service - /usr/bin/podman healthcheck run 8b003d65c8e439e280409825aa37dacfb921ffdd0ada54278b9746654fdc0aa8
     Loaded: loaded (/run/systemd/transient/8b003d65c8e439e280409825aa37dacfb921ffdd0ada54278b9746654fdc0aa8-5d762caefe58de62.service; transient)
  Transient: yes
     Active: inactive (dead) since Sat 2025-10-11 04:10:25 UTC; 12s ago
   Duration: 83ms
TriggeredBy: ● 8b003d65c8e439e280409825aa37dacfb921ffdd0ada54278b9746654fdc0aa8-5d762caefe58de62.timer
    Process: 314596 ExecStart=/usr/bin/podman healthcheck run 8b003d65c8e439e280409825aa37dacfb921ffdd0ada54278b9746654fdc0aa8 (code=exited, status=0/SUCCESS)
   Main PID: 314596 (code=exited, status=0/SUCCESS)
        CPU: 68ms

Oct 11 04:10:25 compute-0 podman[314596]: 2025-10-11 04:10:25.001991533 +0000 UTC m=+0.064375184 container health_status 8b003d65c8e439e280409825aa37dacfb921ffdd0ada54278b9746654fdc0aa8 (image=quay.io/podified-antelope-centos9/openstack-multipathd:current-podified, name=multipathd, health_status=healthy, health_failing_streak=0, health_log=, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/multipathd', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-multipathd:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/multipathd.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run/udev:/run/udev', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:ro', '/var/lib/iscsi:/var/lib/iscsi:z', '/etc/multipath:/etc/multipath:z', '/etc/multipath.conf:/etc/multipath.conf:ro', '/var/lib/openstack/healthchecks/multipathd:/openstack:ro,z']}, config_id=multipathd, container_name=multipathd, io.buildah.version=1.41.3, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, org.label-schema.build-date=20251009, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, tcib_managed=true)

● auditd.service - Security Auditing Service
     Loaded: loaded (/usr/lib/systemd/system/auditd.service; enabled; preset: enabled)
     Active: active (running) since Sat 2025-10-11 02:42:18 UTC; 1h 28min ago
       Docs: man:auditd(8)
             https://github.com/linux-audit/audit-documentation
   Main PID: 700 (auditd)
         IO: 0B read, 27.9M written
      Tasks: 4 (limit: 48573)
     Memory: 17.6M (peak: 18.0M)
        CPU: 7.075s
     CGroup: /system.slice/auditd.service
             ├─700 /sbin/auditd
             └─702 /usr/sbin/sedispatch

Oct 11 02:42:18 localhost augenrules[721]: failure 1
Oct 11 02:42:18 localhost augenrules[721]: pid 700
Oct 11 02:42:18 localhost augenrules[721]: rate_limit 0
Oct 11 02:42:18 localhost augenrules[721]: backlog_limit 8192
Oct 11 02:42:18 localhost augenrules[721]: lost 0
Oct 11 02:42:18 localhost augenrules[721]: backlog 0
Oct 11 02:42:18 localhost augenrules[721]: backlog_wait_time 60000
Oct 11 02:42:18 localhost augenrules[721]: backlog_wait_time_actual 0
Oct 11 02:42:18 localhost systemd[1]: Started Security Auditing Service.
Oct 11 03:33:39 compute-0 auditd[700]: Audit daemon rotating log files

○ auth-rpcgss-module.service - Kernel Module supporting RPCSEC_GSS
     Loaded: loaded (/usr/lib/systemd/system/auth-rpcgss-module.service; static)
     Active: inactive (dead)
  Condition: start condition failed at Sat 2025-10-11 02:42:17 UTC; 1h 28min ago

● ceph-92cfe4d4-4917-5be1-9d00-73758793a62b@crash.compute-0.service - Ceph crash.compute-0 for 92cfe4d4-4917-5be1-9d00-73758793a62b
     Loaded: loaded (/etc/systemd/system/ceph-92cfe4d4-4917-5be1-9d00-73758793a62b@.service; enabled; preset: disabled)
     Active: active (running) since Sat 2025-10-11 03:19:55 UTC; 50min ago
   Main PID: 82390 (conmon)
         IO: 0B read, 2.3M written
      Tasks: 3 (limit: 48573)
     Memory: 12.2M (peak: 33.4M)
        CPU: 866ms
     CGroup: /system.slice/system-ceph\x2d92cfe4d4\x2d4917\x2d5be1\x2d9d00\x2d73758793a62b.slice/ceph-92cfe4d4-4917-5be1-9d00-73758793a62b@crash.compute-0.service
             ├─libpod-payload-90b5b5a031904a9fddf3ed90b02a240d3a0ebaaf670415137274ba4391b5f476
             │ ├─82392 /run/podman-init -- /usr/bin/ceph-crash -n client.crash.compute-0
             │ └─82394 /usr/bin/python3 -s /usr/bin/ceph-crash -n client.crash.compute-0
             └─runtime
               └─82390 /usr/bin/conmon --api-version 1 -c 90b5b5a031904a9fddf3ed90b02a240d3a0ebaaf670415137274ba4391b5f476 -u 90b5b5a031904a9fddf3ed90b02a240d3a0ebaaf670415137274ba4391b5f476 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/90b5b5a031904a9fddf3ed90b02a240d3a0ebaaf670415137274ba4391b5f476/userdata -p /run/containers/storage/overlay-containers/90b5b5a031904a9fddf3ed90b02a240d3a0ebaaf670415137274ba4391b5f476/userdata/pidfile -n ceph-92cfe4d4-4917-5be1-9d00-73758793a62b-crash-compute-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/90b5b5a031904a9fddf3ed90b02a240d3a0ebaaf670415137274ba4391b5f476 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/90b5b5a031904a9fddf3ed90b02a240d3a0ebaaf670415137274ba4391b5f476/userdata/oci-log --conmon-pidfile /run/ceph-92cfe4d4-4917-5be1-9d00-73758793a62b@crash.compute-0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 90b5b5a031904a9fddf3ed90b02a240d3a0ebaaf670415137274ba4391b5f476

Oct 11 03:19:55 compute-0 systemd[1]: Started Ceph crash.compute-0 for 92cfe4d4-4917-5be1-9d00-73758793a62b.
Oct 11 03:19:55 compute-0 ceph-92cfe4d4-4917-5be1-9d00-73758793a62b-crash-compute-0[82390]: INFO:ceph-crash:pinging cluster to exercise our key
Oct 11 03:19:55 compute-0 ceph-92cfe4d4-4917-5be1-9d00-73758793a62b-crash-compute-0[82390]: 2025-10-11T03:19:55.976+0000 7f08d9b01640 -1 auth: unable to find a keyring on /etc/ceph/ceph.client.admin.keyring,/etc/ceph/ceph.keyring,/etc/ceph/keyring,/etc/ceph/keyring.bin: (2) No such file or directory
Oct 11 03:19:55 compute-0 ceph-92cfe4d4-4917-5be1-9d00-73758793a62b-crash-compute-0[82390]: 2025-10-11T03:19:55.976+0000 7f08d9b01640 -1 AuthRegistry(0x7f08d4066fe0) no keyring found at /etc/ceph/ceph.client.admin.keyring,/etc/ceph/ceph.keyring,/etc/ceph/keyring,/etc/ceph/keyring.bin, disabling cephx
Oct 11 03:19:55 compute-0 ceph-92cfe4d4-4917-5be1-9d00-73758793a62b-crash-compute-0[82390]: 2025-10-11T03:19:55.978+0000 7f08d9b01640 -1 auth: unable to find a keyring on /etc/ceph/ceph.client.admin.keyring,/etc/ceph/ceph.keyring,/etc/ceph/keyring,/etc/ceph/keyring.bin: (2) No such file or directory
Oct 11 03:19:55 compute-0 ceph-92cfe4d4-4917-5be1-9d00-73758793a62b-crash-compute-0[82390]: 2025-10-11T03:19:55.978+0000 7f08d9b01640 -1 AuthRegistry(0x7f08d9b00000) no keyring found at /etc/ceph/ceph.client.admin.keyring,/etc/ceph/ceph.keyring,/etc/ceph/keyring,/etc/ceph/keyring.bin, disabling cephx
Oct 11 03:19:55 compute-0 ceph-92cfe4d4-4917-5be1-9d00-73758793a62b-crash-compute-0[82390]: 2025-10-11T03:19:55.979+0000 7f08d37fe640 -1 monclient(hunting): handle_auth_bad_method server allowed_methods [2] but i only support [1]
Oct 11 03:19:55 compute-0 ceph-92cfe4d4-4917-5be1-9d00-73758793a62b-crash-compute-0[82390]: 2025-10-11T03:19:55.979+0000 7f08d9b01640 -1 monclient: authenticate NOTE: no keyring found; disabled cephx authentication
Oct 11 03:19:55 compute-0 ceph-92cfe4d4-4917-5be1-9d00-73758793a62b-crash-compute-0[82390]: [errno 13] RADOS permission denied (error connecting to the cluster)
Oct 11 03:19:55 compute-0 ceph-92cfe4d4-4917-5be1-9d00-73758793a62b-crash-compute-0[82390]: INFO:ceph-crash:monitoring path /var/lib/ceph/crash, delay 600s

● ceph-92cfe4d4-4917-5be1-9d00-73758793a62b@mds.cephfs.compute-0.uxaxgb.service - Ceph mds.cephfs.compute-0.uxaxgb for 92cfe4d4-4917-5be1-9d00-73758793a62b
     Loaded: loaded (/etc/systemd/system/ceph-92cfe4d4-4917-5be1-9d00-73758793a62b@.service; enabled; preset: disabled)
     Active: active (running) since Sat 2025-10-11 03:21:52 UTC; 48min ago
   Main PID: 101044 (conmon)
         IO: 0B read, 715.0K written
      Tasks: 28 (limit: 48573)
     Memory: 27.0M (peak: 27.7M)
        CPU: 5.952s
     CGroup: /system.slice/system-ceph\x2d92cfe4d4\x2d4917\x2d5be1\x2d9d00\x2d73758793a62b.slice/ceph-92cfe4d4-4917-5be1-9d00-73758793a62b@mds.cephfs.compute-0.uxaxgb.service
             ├─libpod-payload-53426c3eb317616c2b70d50730a6965f375d00d0ca023da2460bbd9c696b45a6
             │ ├─101046 /run/podman-init -- /usr/bin/ceph-mds -n mds.cephfs.compute-0.uxaxgb -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ └─101067 /usr/bin/ceph-mds -n mds.cephfs.compute-0.uxaxgb -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             └─runtime
               └─101044 /usr/bin/conmon --api-version 1 -c 53426c3eb317616c2b70d50730a6965f375d00d0ca023da2460bbd9c696b45a6 -u 53426c3eb317616c2b70d50730a6965f375d00d0ca023da2460bbd9c696b45a6 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/53426c3eb317616c2b70d50730a6965f375d00d0ca023da2460bbd9c696b45a6/userdata -p /run/containers/storage/overlay-containers/53426c3eb317616c2b70d50730a6965f375d00d0ca023da2460bbd9c696b45a6/userdata/pidfile -n ceph-92cfe4d4-4917-5be1-9d00-73758793a62b-mds-cephfs-compute-0-uxaxgb --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/53426c3eb317616c2b70d50730a6965f375d00d0ca023da2460bbd9c696b45a6 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/53426c3eb317616c2b70d50730a6965f375d00d0ca023da2460bbd9c696b45a6/userdata/oci-log --conmon-pidfile /run/ceph-92cfe4d4-4917-5be1-9d00-73758793a62b@mds.cephfs.compute-0.uxaxgb.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 53426c3eb317616c2b70d50730a6965f375d00d0ca023da2460bbd9c696b45a6

Oct 11 04:09:59 compute-0 ceph-mds[101067]: mds.cephfs.compute-0.uxaxgb asok_command: dump loads {prefix=dump loads} (starting...)
Oct 11 04:09:59 compute-0 ceph-mds[101067]: mds.cephfs.compute-0.uxaxgb asok_command: dump tree {prefix=dump tree,root=/} (starting...)
Oct 11 04:10:00 compute-0 ceph-mds[101067]: mds.cephfs.compute-0.uxaxgb asok_command: dump_blocked_ops {prefix=dump_blocked_ops} (starting...)
Oct 11 04:10:00 compute-0 ceph-mds[101067]: mds.cephfs.compute-0.uxaxgb asok_command: dump_historic_ops {prefix=dump_historic_ops} (starting...)
Oct 11 04:10:00 compute-0 ceph-mds[101067]: mds.cephfs.compute-0.uxaxgb asok_command: dump_historic_ops_by_duration {prefix=dump_historic_ops_by_duration} (starting...)
Oct 11 04:10:00 compute-0 ceph-mds[101067]: mds.cephfs.compute-0.uxaxgb asok_command: dump_ops_in_flight {prefix=dump_ops_in_flight} (starting...)
Oct 11 04:10:00 compute-0 ceph-mds[101067]: mds.cephfs.compute-0.uxaxgb asok_command: get subtrees {prefix=get subtrees} (starting...)
Oct 11 04:10:00 compute-0 ceph-mds[101067]: mds.cephfs.compute-0.uxaxgb asok_command: ops {prefix=ops} (starting...)
Oct 11 04:10:01 compute-0 ceph-mds[101067]: mds.cephfs.compute-0.uxaxgb asok_command: session ls {prefix=session ls} (starting...)
Oct 11 04:10:01 compute-0 ceph-mds[101067]: mds.cephfs.compute-0.uxaxgb asok_command: status {prefix=status} (starting...)

● ceph-92cfe4d4-4917-5be1-9d00-73758793a62b@mgr.compute-0.pdyrua.service - Ceph mgr.compute-0.pdyrua for 92cfe4d4-4917-5be1-9d00-73758793a62b
     Loaded: loaded (/etc/systemd/system/ceph-92cfe4d4-4917-5be1-9d00-73758793a62b@.service; enabled; preset: disabled)
     Active: active (running) since Sat 2025-10-11 03:18:40 UTC; 51min ago
   Main PID: 74613 (conmon)
         IO: 872.0K read, 2.8M written
      Tasks: 149 (limit: 48573)
     Memory: 535.0M (peak: 536.5M)
        CPU: 1min 25.465s
     CGroup: /system.slice/system-ceph\x2d92cfe4d4\x2d4917\x2d5be1\x2d9d00\x2d73758793a62b.slice/ceph-92cfe4d4-4917-5be1-9d00-73758793a62b@mgr.compute-0.pdyrua.service
             ├─libpod-payload-5396d33f03d79fbf0e6626513c29ad41ed11bd6b7439ed7e048b771ff7bb44ba
             │ ├─74615 /run/podman-init -- /usr/bin/ceph-mgr -n mgr.compute-0.pdyrua -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ └─74617 /usr/bin/ceph-mgr -n mgr.compute-0.pdyrua -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             └─runtime
               └─74613 /usr/bin/conmon --api-version 1 -c 5396d33f03d79fbf0e6626513c29ad41ed11bd6b7439ed7e048b771ff7bb44ba -u 5396d33f03d79fbf0e6626513c29ad41ed11bd6b7439ed7e048b771ff7bb44ba -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/5396d33f03d79fbf0e6626513c29ad41ed11bd6b7439ed7e048b771ff7bb44ba/userdata -p /run/containers/storage/overlay-containers/5396d33f03d79fbf0e6626513c29ad41ed11bd6b7439ed7e048b771ff7bb44ba/userdata/pidfile -n ceph-92cfe4d4-4917-5be1-9d00-73758793a62b-mgr-compute-0-pdyrua --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/5396d33f03d79fbf0e6626513c29ad41ed11bd6b7439ed7e048b771ff7bb44ba --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/5396d33f03d79fbf0e6626513c29ad41ed11bd6b7439ed7e048b771ff7bb44ba/userdata/oci-log --conmon-pidfile /run/ceph-92cfe4d4-4917-5be1-9d00-73758793a62b@mgr.compute-0.pdyrua.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 5396d33f03d79fbf0e6626513c29ad41ed11bd6b7439ed7e048b771ff7bb44ba

Oct 11 04:10:30 compute-0 ceph-mgr[74617]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 04:10:30 compute-0 ceph-mgr[74617]: [pg_autoscaler INFO root] Pool 'default.rgw.control' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 04:10:30 compute-0 ceph-mgr[74617]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 04:10:30 compute-0 ceph-mgr[74617]: [pg_autoscaler INFO root] Pool 'default.rgw.meta' root_id -1 using 1.2718141564107572e-07 of space, bias 4.0, pg target 0.00015261769876929088 quantized to 32 (current 32)
Oct 11 04:10:31 compute-0 ceph-mgr[74617]: log_channel(cluster) log [DBG] : pgmap v2018: 321 pgs: 321 active+clean; 271 MiB data, 653 MiB used, 59 GiB / 60 GiB avail
Oct 11 04:10:31 compute-0 ceph-mgr[74617]: log_channel(audit) log [DBG] : from='client.19427 -' entity='client.admin' cmd=[{"prefix": "pg dump", "target": ["mon-mgr", ""], "format": "json-pretty"}]: dispatch
Oct 11 04:10:31 compute-0 ceph-mgr[74617]: log_channel(audit) log [DBG] : from='client.19429 -' entity='client.admin' cmd=[{"prefix": "pg stat", "target": ["mon-mgr", ""], "format": "json-pretty"}]: dispatch
Oct 11 04:10:33 compute-0 ceph-mgr[74617]: log_channel(cluster) log [DBG] : pgmap v2019: 321 pgs: 321 active+clean; 271 MiB data, 653 MiB used, 59 GiB / 60 GiB avail
Oct 11 04:10:35 compute-0 ceph-mgr[74617]: log_channel(cluster) log [DBG] : pgmap v2020: 321 pgs: 321 active+clean; 271 MiB data, 653 MiB used, 59 GiB / 60 GiB avail
Oct 11 04:10:37 compute-0 ceph-mgr[74617]: log_channel(cluster) log [DBG] : pgmap v2021: 321 pgs: 321 active+clean; 271 MiB data, 653 MiB used, 59 GiB / 60 GiB avail

● ceph-92cfe4d4-4917-5be1-9d00-73758793a62b@mon.compute-0.service - Ceph mon.compute-0 for 92cfe4d4-4917-5be1-9d00-73758793a62b
     Loaded: loaded (/etc/systemd/system/ceph-92cfe4d4-4917-5be1-9d00-73758793a62b@.service; enabled; preset: disabled)
     Active: active (running) since Sat 2025-10-11 03:18:38 UTC; 51min ago
   Main PID: 74322 (conmon)
         IO: 1.5M read, 397.0M written
      Tasks: 27 (limit: 48573)
     Memory: 92.0M (peak: 107.2M)
        CPU: 46.180s
     CGroup: /system.slice/system-ceph\x2d92cfe4d4\x2d4917\x2d5be1\x2d9d00\x2d73758793a62b.slice/ceph-92cfe4d4-4917-5be1-9d00-73758793a62b@mon.compute-0.service
             ├─libpod-payload-a848fe58749db588a5a4b8471e0c9916b9e4a1ccc899f04343e6491a43c45c05
             │ ├─74324 /run/podman-init -- /usr/bin/ceph-mon -n mon.compute-0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false
             │ └─74326 /usr/bin/ceph-mon -n mon.compute-0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false
             └─runtime
               └─74322 /usr/bin/conmon --api-version 1 -c a848fe58749db588a5a4b8471e0c9916b9e4a1ccc899f04343e6491a43c45c05 -u a848fe58749db588a5a4b8471e0c9916b9e4a1ccc899f04343e6491a43c45c05 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/a848fe58749db588a5a4b8471e0c9916b9e4a1ccc899f04343e6491a43c45c05/userdata -p /run/containers/storage/overlay-containers/a848fe58749db588a5a4b8471e0c9916b9e4a1ccc899f04343e6491a43c45c05/userdata/pidfile -n ceph-92cfe4d4-4917-5be1-9d00-73758793a62b-mon-compute-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/a848fe58749db588a5a4b8471e0c9916b9e4a1ccc899f04343e6491a43c45c05 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/a848fe58749db588a5a4b8471e0c9916b9e4a1ccc899f04343e6491a43c45c05/userdata/oci-log --conmon-pidfile /run/ceph-92cfe4d4-4917-5be1-9d00-73758793a62b@mon.compute-0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg a848fe58749db588a5a4b8471e0c9916b9e4a1ccc899f04343e6491a43c45c05

Oct 11 04:10:32 compute-0 ceph-mon[74326]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "status", "format": "json-pretty"} v 0) v1
Oct 11 04:10:32 compute-0 ceph-mon[74326]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/2271775652' entity='client.admin' cmd=[{"prefix": "status", "format": "json-pretty"}]: dispatch
Oct 11 04:10:32 compute-0 ceph-mon[74326]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "time-sync-status", "format": "json-pretty"} v 0) v1
Oct 11 04:10:32 compute-0 ceph-mon[74326]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/705023936' entity='client.admin' cmd=[{"prefix": "time-sync-status", "format": "json-pretty"}]: dispatch
Oct 11 04:10:33 compute-0 ceph-mon[74326]: from='client.19429 -' entity='client.admin' cmd=[{"prefix": "pg stat", "target": ["mon-mgr", ""], "format": "json-pretty"}]: dispatch
Oct 11 04:10:33 compute-0 ceph-mon[74326]: from='client.? 192.168.122.100:0/2271775652' entity='client.admin' cmd=[{"prefix": "status", "format": "json-pretty"}]: dispatch
Oct 11 04:10:33 compute-0 ceph-mon[74326]: from='client.? 192.168.122.100:0/705023936' entity='client.admin' cmd=[{"prefix": "time-sync-status", "format": "json-pretty"}]: dispatch
Oct 11 04:10:34 compute-0 ceph-mon[74326]: pgmap v2019: 321 pgs: 321 active+clean; 271 MiB data, 653 MiB used, 59 GiB / 60 GiB avail
Oct 11 04:10:35 compute-0 ceph-mon[74326]: mon.compute-0@0(leader).osd e496 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 343932928 full_alloc: 348127232 kv_alloc: 318767104
Oct 11 04:10:36 compute-0 ceph-mon[74326]: pgmap v2020: 321 pgs: 321 active+clean; 271 MiB data, 653 MiB used, 59 GiB / 60 GiB avail

● ceph-92cfe4d4-4917-5be1-9d00-73758793a62b@osd.0.service - Ceph osd.0 for 92cfe4d4-4917-5be1-9d00-73758793a62b
     Loaded: loaded (/etc/systemd/system/ceph-92cfe4d4-4917-5be1-9d00-73758793a62b@.service; enabled; preset: disabled)
     Active: active (running) since Sat 2025-10-11 03:20:28 UTC; 50min ago
   Main PID: 88321 (conmon)
         IO: 530.4M read, 8.5G written
      Tasks: 60 (limit: 48573)
     Memory: 939.4M (peak: 1.3G)
        CPU: 53.935s
     CGroup: /system.slice/system-ceph\x2d92cfe4d4\x2d4917\x2d5be1\x2d9d00\x2d73758793a62b.slice/ceph-92cfe4d4-4917-5be1-9d00-73758793a62b@osd.0.service
             ├─libpod-payload-47f64e87e5871d7c071a121b83455d21f2bfdfce6b6c64a3ceca78155daa9205
             │ ├─88323 /run/podman-init -- /usr/bin/ceph-osd -n osd.0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ └─88325 /usr/bin/ceph-osd -n osd.0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             └─runtime
               └─88321 /usr/bin/conmon --api-version 1 -c 47f64e87e5871d7c071a121b83455d21f2bfdfce6b6c64a3ceca78155daa9205 -u 47f64e87e5871d7c071a121b83455d21f2bfdfce6b6c64a3ceca78155daa9205 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/47f64e87e5871d7c071a121b83455d21f2bfdfce6b6c64a3ceca78155daa9205/userdata -p /run/containers/storage/overlay-containers/47f64e87e5871d7c071a121b83455d21f2bfdfce6b6c64a3ceca78155daa9205/userdata/pidfile -n ceph-92cfe4d4-4917-5be1-9d00-73758793a62b-osd-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/47f64e87e5871d7c071a121b83455d21f2bfdfce6b6c64a3ceca78155daa9205 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/47f64e87e5871d7c071a121b83455d21f2bfdfce6b6c64a3ceca78155daa9205/userdata/oci-log --conmon-pidfile /run/ceph-92cfe4d4-4917-5be1-9d00-73758793a62b@osd.0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 47f64e87e5871d7c071a121b83455d21f2bfdfce6b6c64a3ceca78155daa9205

Oct 11 04:10:14 compute-0 ceph-osd[88325]: monclient: tick
Oct 11 04:10:14 compute-0 ceph-osd[88325]: monclient: _check_auth_tickets
Oct 11 04:10:14 compute-0 ceph-osd[88325]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T04:09:42.605910+0000)
Oct 11 04:10:14 compute-0 ceph-osd[88325]: prioritycache tune_memory target: 4294967296 mapped: 221249536 unmapped: 27369472 heap: 248619008 old mem: 2845415832 new mem: 2845415832
Oct 11 04:10:14 compute-0 ceph-osd[88325]: monclient: tick
Oct 11 04:10:14 compute-0 ceph-osd[88325]: monclient: _check_auth_tickets
Oct 11 04:10:14 compute-0 ceph-osd[88325]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T04:09:43.606036+0000)
Oct 11 04:10:14 compute-0 ceph-osd[88325]: do_command 'log dump' '{prefix=log dump}'
Oct 11 04:10:29 compute-0 ceph-osd[88325]: rocksdb: [db/db_impl/db_impl.cc:1109] ------- DUMPING STATS -------
Oct 11 04:10:29 compute-0 ceph-osd[88325]: rocksdb: [db/db_impl/db_impl.cc:1111] 
                                           ** DB Stats **
                                           Uptime(secs): 3000.1 total, 600.0 interval
                                           Cumulative writes: 30K writes, 116K keys, 30K commit groups, 1.0 writes per commit group, ingest: 0.08 GB, 0.03 MB/s
                                           Cumulative WAL: 30K writes, 11K syncs, 2.74 writes per sync, written: 0.08 GB, 0.03 MB/s
                                           Cumulative stall: 00:00:0.000 H:M:S, 0.0 percent
                                           Interval writes: 5911 writes, 25K keys, 5911 commit groups, 1.0 writes per commit group, ingest: 19.16 MB, 0.03 MB/s
                                           Interval WAL: 5911 writes, 2407 syncs, 2.46 writes per sync, written: 0.02 GB, 0.03 MB/s
                                           Interval stall: 00:00:0.000 H:M:S, 0.0 percent

● ceph-92cfe4d4-4917-5be1-9d00-73758793a62b@osd.1.service - Ceph osd.1 for 92cfe4d4-4917-5be1-9d00-73758793a62b
     Loaded: loaded (/etc/systemd/system/ceph-92cfe4d4-4917-5be1-9d00-73758793a62b@.service; enabled; preset: disabled)
     Active: active (running) since Sat 2025-10-11 03:20:33 UTC; 50min ago
   Main PID: 89397 (conmon)
         IO: 578.5M read, 8.7G written
      Tasks: 60 (limit: 48573)
     Memory: 1.0G (peak: 1.4G)
        CPU: 54.140s
     CGroup: /system.slice/system-ceph\x2d92cfe4d4\x2d4917\x2d5be1\x2d9d00\x2d73758793a62b.slice/ceph-92cfe4d4-4917-5be1-9d00-73758793a62b@osd.1.service
             ├─libpod-payload-159562a3a15003ca855365b6b045a9e9a594d967d5f41c2a02d35e99d6a005e2
             │ ├─89399 /run/podman-init -- /usr/bin/ceph-osd -n osd.1 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ └─89401 /usr/bin/ceph-osd -n osd.1 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             └─runtime
               └─89397 /usr/bin/conmon --api-version 1 -c 159562a3a15003ca855365b6b045a9e9a594d967d5f41c2a02d35e99d6a005e2 -u 159562a3a15003ca855365b6b045a9e9a594d967d5f41c2a02d35e99d6a005e2 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/159562a3a15003ca855365b6b045a9e9a594d967d5f41c2a02d35e99d6a005e2/userdata -p /run/containers/storage/overlay-containers/159562a3a15003ca855365b6b045a9e9a594d967d5f41c2a02d35e99d6a005e2/userdata/pidfile -n ceph-92cfe4d4-4917-5be1-9d00-73758793a62b-osd-1 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/159562a3a15003ca855365b6b045a9e9a594d967d5f41c2a02d35e99d6a005e2 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/159562a3a15003ca855365b6b045a9e9a594d967d5f41c2a02d35e99d6a005e2/userdata/oci-log --conmon-pidfile /run/ceph-92cfe4d4-4917-5be1-9d00-73758793a62b@osd.1.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 159562a3a15003ca855365b6b045a9e9a594d967d5f41c2a02d35e99d6a005e2

Oct 11 04:10:09 compute-0 ceph-osd[89401]: monclient: _check_auth_tickets
Oct 11 04:10:09 compute-0 ceph-osd[89401]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T04:09:38.120404+0000)
Oct 11 04:10:09 compute-0 ceph-osd[89401]: prioritycache tune_memory target: 4294967296 mapped: 221691904 unmapped: 65642496 heap: 287334400 old mem: 2845415832 new mem: 2845415832
Oct 11 04:10:09 compute-0 ceph-osd[89401]: monclient: tick
Oct 11 04:10:09 compute-0 ceph-osd[89401]: monclient: _check_auth_tickets
Oct 11 04:10:09 compute-0 ceph-osd[89401]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T04:09:39.120550+0000)
Oct 11 04:10:09 compute-0 ceph-osd[89401]: prioritycache tune_memory target: 4294967296 mapped: 221224960 unmapped: 66109440 heap: 287334400 old mem: 2845415832 new mem: 2845415832
Oct 11 04:10:09 compute-0 ceph-osd[89401]: do_command 'log dump' '{prefix=log dump}'
Oct 11 04:10:35 compute-0 ceph-osd[89401]: rocksdb: [db/db_impl/db_impl.cc:1109] ------- DUMPING STATS -------
Oct 11 04:10:35 compute-0 ceph-osd[89401]: rocksdb: [db/db_impl/db_impl.cc:1111] 
                                           ** DB Stats **
                                           Uptime(secs): 3000.1 total, 600.0 interval
                                           Cumulative writes: 29K writes, 116K keys, 29K commit groups, 1.0 writes per commit group, ingest: 0.08 GB, 0.03 MB/s
                                           Cumulative WAL: 29K writes, 10K syncs, 2.77 writes per sync, written: 0.08 GB, 0.03 MB/s
                                           Cumulative stall: 00:00:0.000 H:M:S, 0.0 percent
                                           Interval writes: 4587 writes, 21K keys, 4587 commit groups, 1.0 writes per commit group, ingest: 14.58 MB, 0.02 MB/s
                                           Interval WAL: 4587 writes, 1912 syncs, 2.40 writes per sync, written: 0.01 GB, 0.02 MB/s
                                           Interval stall: 00:00:0.000 H:M:S, 0.0 percent

● ceph-92cfe4d4-4917-5be1-9d00-73758793a62b@osd.2.service - Ceph osd.2 for 92cfe4d4-4917-5be1-9d00-73758793a62b
     Loaded: loaded (/etc/systemd/system/ceph-92cfe4d4-4917-5be1-9d00-73758793a62b@.service; enabled; preset: disabled)
     Active: active (running) since Sat 2025-10-11 03:20:39 UTC; 49min ago
   Main PID: 90439 (conmon)
         IO: 494.7M read, 7.8G written
      Tasks: 60 (limit: 48573)
     Memory: 807.1M (peak: 1.0G)
        CPU: 47.101s
     CGroup: /system.slice/system-ceph\x2d92cfe4d4\x2d4917\x2d5be1\x2d9d00\x2d73758793a62b.slice/ceph-92cfe4d4-4917-5be1-9d00-73758793a62b@osd.2.service
             ├─libpod-payload-1ea030e74696bd87ca10210c9afc9c6b7cb96088b5f077a47c4d25c6a9cd0784
             │ ├─90441 /run/podman-init -- /usr/bin/ceph-osd -n osd.2 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ └─90443 /usr/bin/ceph-osd -n osd.2 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             └─runtime
               └─90439 /usr/bin/conmon --api-version 1 -c 1ea030e74696bd87ca10210c9afc9c6b7cb96088b5f077a47c4d25c6a9cd0784 -u 1ea030e74696bd87ca10210c9afc9c6b7cb96088b5f077a47c4d25c6a9cd0784 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/1ea030e74696bd87ca10210c9afc9c6b7cb96088b5f077a47c4d25c6a9cd0784/userdata -p /run/containers/storage/overlay-containers/1ea030e74696bd87ca10210c9afc9c6b7cb96088b5f077a47c4d25c6a9cd0784/userdata/pidfile -n ceph-92cfe4d4-4917-5be1-9d00-73758793a62b-osd-2 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/1ea030e74696bd87ca10210c9afc9c6b7cb96088b5f077a47c4d25c6a9cd0784 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/1ea030e74696bd87ca10210c9afc9c6b7cb96088b5f077a47c4d25c6a9cd0784/userdata/oci-log --conmon-pidfile /run/ceph-92cfe4d4-4917-5be1-9d00-73758793a62b@osd.2.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 1ea030e74696bd87ca10210c9afc9c6b7cb96088b5f077a47c4d25c6a9cd0784

Oct 11 04:10:05 compute-0 ceph-osd[90443]: osd.2 496 heartbeat osd_stat(store_statfs(0x4f3aa8000/0x0/0x4ffc00000, data 0x3aa262c/0x3d46000, compress 0x0/0x0/0x0, omap 0x63a, meta 0x840f9c6), peers [0,1] op hist [])
Oct 11 04:10:05 compute-0 ceph-osd[90443]: prioritycache tune_memory target: 4294967296 mapped: 189079552 unmapped: 36134912 heap: 225214464 old mem: 2845415832 new mem: 2845415832
Oct 11 04:10:05 compute-0 ceph-osd[90443]: monclient: tick
Oct 11 04:10:05 compute-0 ceph-osd[90443]: monclient: _check_auth_tickets
Oct 11 04:10:05 compute-0 ceph-osd[90443]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T04:09:34.319252+0000)
Oct 11 04:10:05 compute-0 ceph-osd[90443]: prioritycache tune_memory target: 4294967296 mapped: 189128704 unmapped: 36085760 heap: 225214464 old mem: 2845415832 new mem: 2845415832
Oct 11 04:10:05 compute-0 ceph-osd[90443]: monclient: tick
Oct 11 04:10:05 compute-0 ceph-osd[90443]: monclient: _check_auth_tickets
Oct 11 04:10:05 compute-0 ceph-osd[90443]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-10-11T04:09:35.319415+0000)
Oct 11 04:10:05 compute-0 ceph-osd[90443]: do_command 'log dump' '{prefix=log dump}'

● ceph-92cfe4d4-4917-5be1-9d00-73758793a62b@rgw.rgw.compute-0.bqunnq.service - Ceph rgw.rgw.compute-0.bqunnq for 92cfe4d4-4917-5be1-9d00-73758793a62b
     Loaded: loaded (/etc/systemd/system/ceph-92cfe4d4-4917-5be1-9d00-73758793a62b@.service; enabled; preset: disabled)
     Active: active (running) since Sat 2025-10-11 03:21:50 UTC; 48min ago
   Main PID: 100585 (conmon)
         IO: 1.4M read, 646.5K written
      Tasks: 605 (limit: 48573)
     Memory: 103.0M (peak: 103.9M)
        CPU: 13.314s
     CGroup: /system.slice/system-ceph\x2d92cfe4d4\x2d4917\x2d5be1\x2d9d00\x2d73758793a62b.slice/ceph-92cfe4d4-4917-5be1-9d00-73758793a62b@rgw.rgw.compute-0.bqunnq.service
             ├─libpod-payload-25ff1f079ca6d5d68281bf95dc2311e742798de4e5895710535ed7a1e9682f60
             │ ├─100587 /run/podman-init -- /usr/bin/radosgw -n client.rgw.rgw.compute-0.bqunnq -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ └─100589 /usr/bin/radosgw -n client.rgw.rgw.compute-0.bqunnq -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             └─runtime
               └─100585 /usr/bin/conmon --api-version 1 -c 25ff1f079ca6d5d68281bf95dc2311e742798de4e5895710535ed7a1e9682f60 -u 25ff1f079ca6d5d68281bf95dc2311e742798de4e5895710535ed7a1e9682f60 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/25ff1f079ca6d5d68281bf95dc2311e742798de4e5895710535ed7a1e9682f60/userdata -p /run/containers/storage/overlay-containers/25ff1f079ca6d5d68281bf95dc2311e742798de4e5895710535ed7a1e9682f60/userdata/pidfile -n ceph-92cfe4d4-4917-5be1-9d00-73758793a62b-rgw-rgw-compute-0-bqunnq --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/25ff1f079ca6d5d68281bf95dc2311e742798de4e5895710535ed7a1e9682f60 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/25ff1f079ca6d5d68281bf95dc2311e742798de4e5895710535ed7a1e9682f60/userdata/oci-log --conmon-pidfile /run/ceph-92cfe4d4-4917-5be1-9d00-73758793a62b@rgw.rgw.compute-0.bqunnq.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 25ff1f079ca6d5d68281bf95dc2311e742798de4e5895710535ed7a1e9682f60

Oct 11 03:21:51 compute-0 radosgw[100589]: framework conf key: endpoint, val: 192.168.122.100:8082
Oct 11 03:21:51 compute-0 radosgw[100589]: init_numa not setting numa affinity
Oct 11 03:22:00 compute-0 radosgw[100589]: LDAP not started since no server URIs were provided in the configuration.
Oct 11 03:22:00 compute-0 ceph-92cfe4d4-4917-5be1-9d00-73758793a62b-rgw-rgw-compute-0-bqunnq[100585]: 2025-10-11T03:22:00.279+0000 7f1d9498f940 -1 LDAP not started since no server URIs were provided in the configuration.
Oct 11 03:22:00 compute-0 radosgw[100589]: framework: beast
Oct 11 03:22:00 compute-0 radosgw[100589]: framework conf key: ssl_certificate, val: config://rgw/cert/$realm/$zone.crt
Oct 11 03:22:00 compute-0 radosgw[100589]: framework conf key: ssl_private_key, val: config://rgw/cert/$realm/$zone.key
Oct 11 03:22:00 compute-0 radosgw[100589]: starting handler: beast
Oct 11 03:22:00 compute-0 radosgw[100589]: set uid:gid to 167:167 (ceph:ceph)
Oct 11 03:22:00 compute-0 radosgw[100589]: mgrc service_daemon_register rgw.14273 metadata {arch=x86_64,ceph_release=reef,ceph_version=ceph version 18.2.7 (6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad) reef (stable),ceph_version_short=18.2.7,container_hostname=compute-0,container_image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0,cpu=AMD EPYC-Rome Processor,distro=centos,distro_description=CentOS Stream 9,distro_version=9,frontend_config#0=beast endpoint=192.168.122.100:8082,frontend_type#0=beast,hostname=compute-0,id=rgw.compute-0.bqunnq,kernel_description=#1 SMP PREEMPT_DYNAMIC Tue Sep 30 07:37:35 UTC 2025,kernel_version=5.14.0-621.el9.x86_64,mem_swap_kb=1048572,mem_total_kb=7864356,num_handles=1,os=Linux,pid=2,realm_id=,realm_name=,zone_id=d1b20eea-863a-4642-a5b9-414ad62d01d2,zone_name=default,zonegroup_id=2b23d063-6bef-4b29-8e72-4dac4ad3045b,zonegroup_name=default}

● ceph-osd-losetup-0.service - Ceph OSD losetup
     Loaded: loaded (/etc/systemd/system/ceph-osd-losetup-0.service; enabled; preset: disabled)
     Active: active (exited) since Sat 2025-10-11 03:17:50 UTC; 52min ago
   Main PID: 71754 (code=exited, status=0/SUCCESS)
        CPU: 27ms

Oct 11 03:17:50 compute-0 systemd[1]: Starting Ceph OSD losetup...
Oct 11 03:17:50 compute-0 bash[71755]: /dev/loop3: [64513]:4427970 (/var/lib/ceph-osd-0.img)
Oct 11 03:17:50 compute-0 systemd[1]: Finished Ceph OSD losetup.

● ceph-osd-losetup-1.service - Ceph OSD losetup
     Loaded: loaded (/etc/systemd/system/ceph-osd-losetup-1.service; enabled; preset: disabled)
     Active: active (exited) since Sat 2025-10-11 03:17:55 UTC; 52min ago
   Main PID: 72123 (code=exited, status=0/SUCCESS)
        CPU: 18ms

Oct 11 03:17:55 compute-0 systemd[1]: Starting Ceph OSD losetup...
Oct 11 03:17:55 compute-0 bash[72124]: /dev/loop4: [64513]:4427971 (/var/lib/ceph-osd-1.img)
Oct 11 03:17:55 compute-0 systemd[1]: Finished Ceph OSD losetup.

● ceph-osd-losetup-2.service - Ceph OSD losetup
     Loaded: loaded (/etc/systemd/system/ceph-osd-losetup-2.service; enabled; preset: disabled)
     Active: active (exited) since Sat 2025-10-11 03:18:00 UTC; 52min ago
   Main PID: 72493 (code=exited, status=0/SUCCESS)
        CPU: 27ms

Oct 11 03:18:00 compute-0 systemd[1]: Starting Ceph OSD losetup...
Oct 11 03:18:00 compute-0 bash[72494]: /dev/loop5: [64513]:4427972 (/var/lib/ceph-osd-2.img)
Oct 11 03:18:00 compute-0 systemd[1]: Finished Ceph OSD losetup.

● chronyd.service - NTP client/server
     Loaded: loaded (/usr/lib/systemd/system/chronyd.service; enabled; preset: enabled)
     Active: active (running) since Sat 2025-10-11 03:13:12 UTC; 57min ago
       Docs: man:chronyd(8)
             man:chrony.conf(5)
   Main PID: 54376 (chronyd)
         IO: 0B read, 0B written
      Tasks: 1 (limit: 48573)
     Memory: 1004.0K (peak: 1.7M)
        CPU: 107ms
     CGroup: /system.slice/chronyd.service
             └─54376 /usr/sbin/chronyd -F 2

Oct 11 03:13:12 compute-0 systemd[1]: Starting NTP client/server...
Oct 11 03:13:12 compute-0 chronyd[54376]: chronyd version 4.6.1 starting (+CMDMON +NTP +REFCLOCK +RTC +PRIVDROP +SCFILTER +SIGND +ASYNCDNS +NTS +SECHASH +IPV6 +DEBUG)
Oct 11 03:13:12 compute-0 chronyd[54376]: Frequency -22.670 +/- 0.155 ppm read from /var/lib/chrony/drift
Oct 11 03:13:12 compute-0 chronyd[54376]: Loaded seccomp filter (level 2)
Oct 11 03:13:12 compute-0 systemd[1]: Started NTP client/server.
Oct 11 03:15:22 compute-0 chronyd[54376]: Selected source 149.56.19.163 (pool.ntp.org)

● cloud-config.service - Cloud-init: Config Stage
     Loaded: loaded (/usr/lib/systemd/system/cloud-config.service; enabled; preset: disabled)
     Active: active (exited) since Sat 2025-10-11 02:42:22 UTC; 1h 28min ago
   Main PID: 1002 (code=exited, status=0/SUCCESS)
        CPU: 402ms

Oct 11 02:42:21 np0005480824.novalocal systemd[1]: Starting Cloud-init: Config Stage...
Oct 11 02:42:22 np0005480824.novalocal cloud-init[1030]: Cloud-init v. 24.4-7.el9 running 'modules:config' at Sat, 11 Oct 2025 02:42:22 +0000. Up 9.93 seconds.
Oct 11 02:42:22 np0005480824.novalocal systemd[1]: Finished Cloud-init: Config Stage.

● cloud-final.service - Cloud-init: Final Stage
     Loaded: loaded (/usr/lib/systemd/system/cloud-final.service; enabled; preset: disabled)
     Active: active (exited) since Sat 2025-10-11 02:42:22 UTC; 1h 28min ago
   Main PID: 1035 (code=exited, status=0/SUCCESS)
        CPU: 609ms

Oct 11 02:42:22 np0005480824.novalocal systemd[1]: Starting Cloud-init: Final Stage...
Oct 11 02:42:22 np0005480824.novalocal cloud-init[1038]: Cloud-init v. 24.4-7.el9 running 'modules:final' at Sat, 11 Oct 2025 02:42:22 +0000. Up 10.36 seconds.
Oct 11 02:42:22 np0005480824.novalocal cloud-init[1040]: #############################################################
Oct 11 02:42:22 np0005480824.novalocal cloud-init[1041]: -----BEGIN SSH HOST KEY FINGERPRINTS-----
Oct 11 02:42:22 np0005480824.novalocal cloud-init[1045]: 256 SHA256:6ldOyiwpPVjGT1DeqXhNrz6iHqETStj+8TQKcNO/BW4 root@np0005480824.novalocal (ED25519)
Oct 11 02:42:22 np0005480824.novalocal cloud-init[1048]: -----END SSH HOST KEY FINGERPRINTS-----
Oct 11 02:42:22 np0005480824.novalocal cloud-init[1049]: #############################################################
Oct 11 02:42:22 np0005480824.novalocal cloud-init[1038]: Cloud-init v. 24.4-7.el9 finished at Sat, 11 Oct 2025 02:42:22 +0000. Datasource DataSourceConfigDrive [net,ver=2][source=/dev/sr0].  Up 10.60 seconds
Oct 11 02:42:22 np0005480824.novalocal systemd[1]: Finished Cloud-init: Final Stage.

● cloud-init-local.service - Cloud-init: Local Stage (pre-network)
     Loaded: loaded (/usr/lib/systemd/system/cloud-init-local.service; enabled; preset: disabled)
     Active: active (exited) since Sat 2025-10-11 02:42:19 UTC; 1h 28min ago
   Main PID: 774 (code=exited, status=0/SUCCESS)
        CPU: 703ms

Oct 11 02:42:18 localhost systemd[1]: Starting Cloud-init: Local Stage (pre-network)...
Oct 11 02:42:19 localhost cloud-init[837]: Cloud-init v. 24.4-7.el9 running 'init-local' at Sat, 11 Oct 2025 02:42:19 +0000. Up 7.02 seconds.
Oct 11 02:42:19 np0005480824.novalocal systemd[1]: Finished Cloud-init: Local Stage (pre-network).

● cloud-init.service - Cloud-init: Network Stage
     Loaded: loaded (/usr/lib/systemd/system/cloud-init.service; enabled; preset: disabled)
     Active: active (exited) since Sat 2025-10-11 02:42:21 UTC; 1h 28min ago
   Main PID: 884 (code=exited, status=0/SUCCESS)
        CPU: 1.218s

Oct 11 02:42:21 np0005480824.novalocal cloud-init[920]: |        .        |
Oct 11 02:42:21 np0005480824.novalocal cloud-init[920]: |       o . .     |
Oct 11 02:42:21 np0005480824.novalocal cloud-init[920]: | o .  . . +      |
Oct 11 02:42:21 np0005480824.novalocal cloud-init[920]: |o = o.ooS+ .     |
Oct 11 02:42:21 np0005480824.novalocal cloud-init[920]: | = o ==++ + .    |
Oct 11 02:42:21 np0005480824.novalocal cloud-init[920]: |  + +=EB.= .     |
Oct 11 02:42:21 np0005480824.novalocal cloud-init[920]: |   ooO==B +      |
Oct 11 02:42:21 np0005480824.novalocal cloud-init[920]: |    oo*= o..     |
Oct 11 02:42:21 np0005480824.novalocal cloud-init[920]: +----[SHA256]-----+
Oct 11 02:42:21 np0005480824.novalocal systemd[1]: Finished Cloud-init: Network Stage.

○ cpupower.service - Configure CPU power related settings
     Loaded: loaded (/usr/lib/systemd/system/cpupower.service; disabled; preset: disabled)
     Active: inactive (dead)

● crond.service - Command Scheduler
     Loaded: loaded (/usr/lib/systemd/system/crond.service; enabled; preset: enabled)
     Active: active (running) since Sat 2025-10-11 02:42:21 UTC; 1h 28min ago
   Main PID: 1007 (crond)
         IO: 176.0K read, 8.0K written
      Tasks: 2 (limit: 48573)
     Memory: 1.6M (peak: 4.9M)
        CPU: 211ms
     CGroup: /system.slice/crond.service
             ├─ 1007 /usr/sbin/crond -n
             └─27476 /usr/sbin/anacron -s

Oct 11 03:01:02 compute-0 run-parts[27478]: (/etc/cron.hourly) finished 0anacron
Oct 11 03:01:02 compute-0 CROND[27464]: (root) CMDEND (run-parts /etc/cron.hourly)
Oct 11 03:36:02 compute-0 anacron[27476]: Job `cron.daily' started
Oct 11 03:36:02 compute-0 anacron[27476]: Job `cron.daily' terminated
Oct 11 03:56:02 compute-0 anacron[27476]: Job `cron.weekly' started
Oct 11 03:56:02 compute-0 anacron[27476]: Job `cron.weekly' terminated
Oct 11 04:01:01 compute-0 CROND[297125]: (root) CMD (run-parts /etc/cron.hourly)
Oct 11 04:01:01 compute-0 run-parts[297128]: (/etc/cron.hourly) starting 0anacron
Oct 11 04:01:01 compute-0 run-parts[297134]: (/etc/cron.hourly) finished 0anacron
Oct 11 04:01:01 compute-0 CROND[297124]: (root) CMDEND (run-parts /etc/cron.hourly)

● dbus-broker.service - D-Bus System Message Bus
     Loaded: loaded (/usr/lib/systemd/system/dbus-broker.service; enabled; preset: enabled)
     Active: active (running) since Sat 2025-10-11 02:42:18 UTC; 1h 28min ago
TriggeredBy: ● dbus.socket
       Docs: man:dbus-broker-launch(1)
   Main PID: 738 (dbus-broker-lau)
         IO: 0B read, 0B written
      Tasks: 2 (limit: 48573)
     Memory: 2.9M (peak: 3.6M)
        CPU: 8.094s
     CGroup: /system.slice/dbus-broker.service
             ├─738 /usr/bin/dbus-broker-launch --scope system --audit
             └─770 dbus-broker --log 4 --controller 9 --machine-id a1727ec20198bc6caf436a6e13c4ff5e --max-bytes 536870912 --max-fds 4096 --max-matches 131072 --audit

Oct 11 03:10:37 compute-0 dbus-broker-launch[738]: Noticed file-system modification, trigger reload.
Oct 11 03:11:22 compute-0 dbus-broker-launch[770]: avc:  op=load_policy lsm=selinux seqno=9 res=1
Oct 11 03:11:32 compute-0 dbus-broker-launch[770]: avc:  op=load_policy lsm=selinux seqno=10 res=1
Oct 11 03:29:02 compute-0 dbus-broker-launch[770]: avc:  op=load_policy lsm=selinux seqno=11 res=1
Oct 11 03:32:19 compute-0 dbus-broker-launch[770]: avc:  op=load_policy lsm=selinux seqno=12 res=1
Oct 11 03:32:35 compute-0 dbus-broker-launch[770]: avc:  op=load_policy lsm=selinux seqno=13 res=1
Oct 11 03:33:24 compute-0 dbus-broker-launch[738]: Noticed file-system modification, trigger reload.
Oct 11 03:33:24 compute-0 dbus-broker-launch[770]: avc:  op=load_policy lsm=selinux seqno=14 res=1
Oct 11 03:33:24 compute-0 dbus-broker-launch[738]: Noticed file-system modification, trigger reload.
Oct 11 03:34:47 compute-0 dbus-broker-launch[770]: avc:  op=load_policy lsm=selinux seqno=15 res=1

○ dd5285a58cbe29a90687a00af14b934b599bb4de55df5857e4d7b7ac9f22feab-6e6beab5453a19ab.service - /usr/bin/podman healthcheck run dd5285a58cbe29a90687a00af14b934b599bb4de55df5857e4d7b7ac9f22feab
     Loaded: loaded (/run/systemd/transient/dd5285a58cbe29a90687a00af14b934b599bb4de55df5857e4d7b7ac9f22feab-6e6beab5453a19ab.service; transient)
  Transient: yes
     Active: inactive (dead) since Sat 2025-10-11 04:10:08 UTC; 29s ago
   Duration: 109ms
TriggeredBy: ● dd5285a58cbe29a90687a00af14b934b599bb4de55df5857e4d7b7ac9f22feab-6e6beab5453a19ab.timer
    Process: 311113 ExecStart=/usr/bin/podman healthcheck run dd5285a58cbe29a90687a00af14b934b599bb4de55df5857e4d7b7ac9f22feab (code=exited, status=0/SUCCESS)
   Main PID: 311113 (code=exited, status=0/SUCCESS)
        CPU: 77ms

Oct 11 04:10:08 compute-0 podman[311113]: 2025-10-11 04:10:08.030419551 +0000 UTC m=+0.091680777 container health_status dd5285a58cbe29a90687a00af14b934b599bb4de55df5857e4d7b7ac9f22feab (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=ovn_metadata_agent, health_status=healthy, health_failing_streak=0, health_log=, managed_by=edpm_ansible, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2, container_name=ovn_metadata_agent, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, config_id=ovn_metadata_agent, org.label-schema.vendor=CentOS, tcib_managed=true, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_data={'cgroupns': 'host', 'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'EDPM_CONFIG_HASH': '0823bd3e096c75f72e4a95820d41b0d4b6a1172bd2892ddb9f29b788a11bc87d'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_metadata_agent', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified', 'net': 'host', 'pid': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/run/openvswitch:/run/openvswitch:z', '/var/lib/config-data/ansible-generated/neutron-ovn-metadata-agent:/etc/neutron.conf.d:z', '/run/netns:/run/netns:shared', '/var/lib/kolla/config_files/ovn_metadata_agent.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/neutron:/var/lib/neutron:shared,z', '/var/lib/neutron/ovn_metadata_haproxy_wrapper:/usr/local/bin/haproxy:ro', '/var/lib/neutron/kill_scripts:/etc/neutron/kill_scripts:ro', '/var/lib/openstack/cacerts/neutron-metadata/tls-ca-bundlUnit display-manager.service could not be found.
e.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_metadata_agent:/openstack:ro,z']})

○ dm-event.service - Device-mapper event daemon
     Loaded: loaded (/usr/lib/systemd/system/dm-event.service; static)
     Active: inactive (dead)
TriggeredBy: ● dm-event.socket
       Docs: man:dmeventd(8)

○ dnf-makecache.service - dnf makecache
     Loaded: loaded (/usr/lib/systemd/system/dnf-makecache.service; static)
     Active: inactive (dead) since Sat 2025-10-11 03:06:20 UTC; 1h 4min ago
TriggeredBy: ● dnf-makecache.timer
   Main PID: 27481 (code=exited, status=0/SUCCESS)
        CPU: 23.417s

Oct 11 03:05:58 compute-0 dnf[27481]: NFV SIG OpenvSwitch                              25 MB/s | 449 kB     00:00
Oct 11 03:05:58 compute-0 dnf[27481]: repo-setup-centos-appstream                     130 MB/s |  25 MB     00:00
Oct 11 03:06:04 compute-0 dnf[27481]: repo-setup-centos-baseos                         77 MB/s | 8.8 MB     00:00
Oct 11 03:06:05 compute-0 dnf[27481]: repo-setup-centos-highavailability               31 MB/s | 744 kB     00:00
Oct 11 03:06:05 compute-0 dnf[27481]: repo-setup-centos-powertools                     91 MB/s | 7.2 MB     00:00
Oct 11 03:06:08 compute-0 dnf[27481]: Extra Packages for Enterprise Linux 9 - x86_64   18 MB/s |  20 MB     00:01
Oct 11 03:06:20 compute-0 dnf[27481]: Metadata cache created.
Oct 11 03:06:20 compute-0 systemd[1]: dnf-makecache.service: Deactivated successfully.
Oct 11 03:06:20 compute-0 systemd[1]: Finished dnf makecache.
Oct 11 03:06:20 compute-0 systemd[1]: dnf-makecache.service: Consumed 23.417s CPU time.

○ dracut-cmdline.service - dracut cmdline hook
     Loaded: loaded (/usr/lib/systemd/system/dracut-cmdline.service; static)
     Active: inactive (dead) since Sat 2025-10-11 02:42:16 UTC; 1h 28min ago
   Duration: 2.000s
       Docs: man:dracut-cmdline.service(8)
             man:dracut.bootup(7)
   Main PID: 324 (code=exited, status=0/SUCCESS)
        CPU: 168ms

Oct 11 02:42:14 localhost systemd[1]: Starting dracut cmdline hook...
Oct 11 02:42:14 localhost dracut-cmdline[324]: dracut-9 dracut-057-102.git20250818.el9
Oct 11 02:42:14 localhost dracut-cmdline[324]: Using kernel command line parameters:    BOOT_IMAGE=(hd0,msdos1)/boot/vmlinuz-5.14.0-621.el9.x86_64 root=UUID=9839e2e1-98a2-4594-b609-79d514deb0a3 ro console=ttyS0,115200n8 no_timer_check net.ifnames=0 crashkernel=1G-2G:192M,2G-64G:256M,64G-:512M
Oct 11 02:42:14 localhost systemd[1]: Finished dracut cmdline hook.
Oct 11 02:42:16 localhost systemd[1]: dracut-cmdline.service: Deactivated successfully.
Oct 11 02:42:16 localhost systemd[1]: Stopped dracut cmdline hook.

○ dracut-initqueue.service - dracut initqueue hook
     Loaded: loaded (/usr/lib/systemd/system/dracut-initqueue.service; static)
     Active: inactive (dead) since Sat 2025-10-11 02:42:16 UTC; 1h 28min ago
   Duration: 926ms
       Docs: man:dracut-initqueue.service(8)
             man:dracut.bootup(7)
   Main PID: 501 (code=exited, status=0/SUCCESS)
        CPU: 51ms

Oct 11 02:42:15 localhost systemd[1]: Starting dracut initqueue hook...
Oct 11 02:42:15 localhost systemd[1]: Finished dracut initqueue hook.
Oct 11 02:42:16 localhost systemd[1]: dracut-initqueue.service: Deactivated successfully.
Oct 11 02:42:16 localhost systemd[1]: Stopped dracut initqueue hook.

○ dracut-mount.service - dracut mount hook
     Loaded: loaded (/usr/lib/systemd/system/dracut-mount.service; static)
     Active: inactive (dead) since Sat 2025-10-11 02:42:16 UTC; 1h 28min ago
   Duration: 206ms
       Docs: man:dracut-mount.service(8)
             man:dracut.bootup(7)
   Main PID: 565 (code=exited, status=0/SUCCESS)
        CPU: 23ms

Oct 11 02:42:16 localhost systemd[1]: Starting dracut mount hook...
Oct 11 02:42:16 localhost systemd[1]: Finished dracut mount hook.
Oct 11 02:42:16 localhost systemd[1]: dracut-mount.service: Deactivated successfully.
Oct 11 02:42:16 localhost systemd[1]: Stopped dracut mount hook.

○ dracut-pre-mount.service - dracut pre-mount hook
     Loaded: loaded (/usr/lib/systemd/system/dracut-pre-mount.service; static)
     Active: inactive (dead) since Sat 2025-10-11 02:42:16 UTC; 1h 28min ago
   Duration: 881ms
       Docs: man:dracut-pre-mount.service(8)
             man:dracut.bootup(7)
   Main PID: 544 (code=exited, status=0/SUCCESS)
        CPU: 15ms

Oct 11 02:42:15 localhost systemd[1]: Starting dracut pre-mount hook...
Oct 11 02:42:15 localhost systemd[1]: Finished dracut pre-mount hook.
Oct 11 02:42:16 localhost systemd[1]: dracut-pre-mount.service: Deactivated successfully.
Oct 11 02:42:16 localhost systemd[1]: Stopped dracut pre-mount hook.

○ dracut-pre-pivot.service - dracut pre-pivot and cleanup hook
     Loaded: loaded (/usr/lib/systemd/system/dracut-pre-pivot.service; static)
     Active: inactive (dead) since Sat 2025-10-11 02:42:16 UTC; 1h 28min ago
   Duration: 43ms
       Docs: man:dracut-pre-pivot.service(8)
             man:dracut.bootup(7)
   Main PID: 570 (code=exited, status=0/SUCCESS)
        CPU: 125ms

Oct 11 02:42:16 localhost systemd[1]: Starting dracut pre-pivot and cleanup hook...
Oct 11 02:42:16 localhost systemd[1]: Finished dracut pre-pivot and cleanup hook.
Oct 11 02:42:16 localhost systemd[1]: dracut-pre-pivot.service: Deactivated successfully.
Oct 11 02:42:16 localhost systemd[1]: Stopped dracut pre-pivot and cleanup hook.

○ dracut-pre-trigger.service - dracut pre-trigger hook
     Loaded: loaded (/usr/lib/systemd/system/dracut-pre-trigger.service; static)
     Active: inactive (dead) since Sat 2025-10-11 02:42:16 UTC; 1h 28min ago
   Duration: 1.507s
       Docs: man:dracut-pre-trigger.service(8)
             man:dracut.bootup(7)
   Main PID: 465 (code=exited, status=0/SUCCESS)
        CPU: 30ms

Oct 11 02:42:15 localhost systemd[1]: Starting dracut pre-trigger hook...
Oct 11 02:42:15 localhost systemd[1]: Finished dracut pre-trigger hook.
Oct 11 02:42:16 localhost systemd[1]: dracut-pre-trigger.service: Deactivated successfully.
Oct 11 02:42:16 localhost systemd[1]: Stopped dracut pre-trigger hook.

○ dracut-pre-udev.service - dracut pre-udev hook
     Loaded: loaded (/usr/lib/systemd/system/dracut-pre-udev.service; static)
     Active: inactive (dead) since Sat 2025-10-11 02:42:16 UTC; 1h 28min ago
   Duration: 1.637s
       Docs: man:dracut-pre-udev.service(8)
             man:dracut.bootup(7)
   Main PID: 414 (code=exited, status=0/SUCCESS)
        CPU: 363ms

Oct 11 02:42:14 localhost systemd[1]: Starting dracut pre-udev hook...
Oct 11 02:42:15 localhost rpc.statd[441]: Version 2.5.4 starting
Oct 11 02:42:15 localhost rpc.statd[441]: Initializing NSM state
Oct 11 02:42:15 localhost rpc.idmapd[446]: Setting log level to 0
Oct 11 02:42:15 localhost systemd[1]: Finished dracut pre-udev hook.
Oct 11 02:42:16 localhost rpc.idmapd[446]: exiting on signal 15
Oct 11 02:42:16 localhost systemd[1]: dracut-pre-udev.service: Deactivated successfully.
Oct 11 02:42:16 localhost systemd[1]: Stopped dracut pre-udev hook.

○ dracut-shutdown-onfailure.service - Service executing upon dracut-shutdown failure to perform cleanup
     Loaded: loaded (/usr/lib/systemd/system/dracut-shutdown-onfailure.service; static)
     Active: inactive (dead)
       Docs: man:dracut-shutdown.service(8)

● dracut-shutdown.service - Restore /run/initramfs on shutdown
     Loaded: loaded (/usr/lib/systemd/system/dracut-shutdown.service; static)
     Active: active (exited) since Sat 2025-10-11 02:42:18 UTC; 1h 28min ago
       Docs: man:dracut-shutdown.service(8)
   Main PID: 775 (code=exited, status=0/SUCCESS)
        CPU: 4ms

Oct 11 02:42:18 localhost systemd[1]: Starting Restore /run/initramfs on shutdown...
Oct 11 02:42:18 localhost systemd[1]: Finished Restore /run/initramfs on shutdown.

● edpm-container-shutdown.service - EDPM Container Shutdown
     Loaded: loaded (/etc/systemd/system/edpm-container-shutdown.service; enabled; preset: enabled)
     Active: active (exited) since Sat 2025-10-11 03:14:52 UTC; 55min ago
       Docs: https://github.com/openstack-k8s-operators/docs
   Main PID: 57373 (code=exited, status=0/SUCCESS)
        CPU: 2ms

Oct 11 03:14:52 compute-0 systemd[1]: Starting EDPM Container Shutdown...
Oct 11 03:14:52 compute-0 systemd[1]: Finished EDPM Container Shutdown.

● edpm_iscsid.service - iscsid container
     Loaded: loaded (/etc/systemd/system/edpm_iscsid.service; enabled; preset: disabled)
     Active: active (running) since Sat 2025-10-11 03:36:30 UTC; 34min ago
   Main PID: 230877 (conmon)
         IO: 0B read, 130.5K written
      Tasks: 1 (limit: 48573)
     Memory: 676.0K (peak: 17.9M)
        CPU: 168ms
     CGroup: /system.slice/edpm_iscsid.service
             └─230877 /usr/bin/conmon --api-version 1 -c f2b19cad22d0fbdd185b264c3c8b6443d09a02319dc9fb0585dc69a0f24a758d -u f2b19cad22d0fbdd185b264c3c8b6443d09a02319dc9fb0585dc69a0f24a758d -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/f2b19cad22d0fbdd185b264c3c8b6443d09a02319dc9fb0585dc69a0f24a758d/userdata -p /run/containers/storage/overlay-containers/f2b19cad22d0fbdd185b264c3c8b6443d09a02319dc9fb0585dc69a0f24a758d/userdata/pidfile -n iscsid --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/f2b19cad22d0fbdd185b264c3c8b6443d09a02319dc9fb0585dc69a0f24a758d --full-attach -s -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/f2b19cad22d0fbdd185b264c3c8b6443d09a02319dc9fb0585dc69a0f24a758d/userdata/oci-log --conmon-pidfile /run/iscsid.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg f2b19cad22d0fbdd185b264c3c8b6443d09a02319dc9fb0585dc69a0f24a758d

Oct 11 03:36:30 compute-0 iscsid[230877]: + CMD='/usr/sbin/iscsid -f'
Oct 11 03:36:30 compute-0 iscsid[230877]: + ARGS=
Oct 11 03:36:30 compute-0 iscsid[230877]: + sudo kolla_copy_cacerts
Oct 11 03:36:30 compute-0 iscsid[230877]: + [[ ! -n '' ]]
Oct 11 03:36:30 compute-0 iscsid[230877]: + . kolla_extend_start
Oct 11 03:36:30 compute-0 iscsid[230877]: ++ [[ ! -f /etc/iscsi/initiatorname.iscsi ]]
Oct 11 03:36:30 compute-0 iscsid[230877]: + echo 'Running command: '\''/usr/sbin/iscsid -f'\'''
Oct 11 03:36:30 compute-0 iscsid[230877]: Running command: '/usr/sbin/iscsid -f'
Oct 11 03:36:30 compute-0 iscsid[230877]: + umask 0022
Oct 11 03:36:30 compute-0 iscsid[230877]: + exec /usr/sbin/iscsid -f

○ edpm_libvirt_guests.service - Suspend libvirt Guests in edpm
     Loaded: loaded (/etc/systemd/system/edpm_libvirt_guests.service; enabled; preset: disabled)
     Active: inactive (dead)
       Docs: man:libvirtd(8)
             https://libvirt.org

● edpm_multipathd.service - multipathd container
     Loaded: loaded (/etc/systemd/system/edpm_multipathd.service; enabled; preset: disabled)
     Active: active (running) since Sat 2025-10-11 03:37:25 UTC; 33min ago
   Main PID: 241112 (conmon)
         IO: 0B read, 115.5K written
      Tasks: 1 (limit: 48573)
     Memory: 664.0K (peak: 20.2M)
        CPU: 156ms
     CGroup: /system.slice/edpm_multipathd.service
             └─241112 /usr/bin/conmon --api-version 1 -c 8b003d65c8e439e280409825aa37dacfb921ffdd0ada54278b9746654fdc0aa8 -u 8b003d65c8e439e280409825aa37dacfb921ffdd0ada54278b9746654fdc0aa8 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/8b003d65c8e439e280409825aa37dacfb921ffdd0ada54278b9746654fdc0aa8/userdata -p /run/containers/storage/overlay-containers/8b003d65c8e439e280409825aa37dacfb921ffdd0ada54278b9746654fdc0aa8/userdata/pidfile -n multipathd --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/8b003d65c8e439e280409825aa37dacfb921ffdd0ada54278b9746654fdc0aa8 --full-attach -s -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/8b003d65c8e439e280409825aa37dacfb921ffdd0ada54278b9746654fdc0aa8/userdata/oci-log --conmon-pidfile /run/multipathd.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg 8b003d65c8e439e280409825aa37dacfb921ffdd0ada54278b9746654fdc0aa8

Oct 11 03:37:25 compute-0 multipathd[241112]: + sudo kolla_copy_cacerts
Oct 11 03:37:25 compute-0 multipathd[241112]: + [[ ! -n '' ]]
Oct 11 03:37:25 compute-0 multipathd[241112]: + . kolla_extend_start
Oct 11 03:37:25 compute-0 multipathd[241112]: Running command: '/usr/sbin/multipathd -d'
Oct 11 03:37:25 compute-0 multipathd[241112]: + echo 'Running command: '\''/usr/sbin/multipathd -d'\'''
Oct 11 03:37:25 compute-0 multipathd[241112]: + umask 0022
Oct 11 03:37:25 compute-0 multipathd[241112]: + exec /usr/sbin/multipathd -d
Oct 11 03:37:25 compute-0 multipathd[241112]: 3313.000685 | --------start up--------
Oct 11 03:37:25 compute-0 multipathd[241112]: 3313.000704 | read /etc/multipath.conf
Oct 11 03:37:25 compute-0 multipathd[241112]: 3313.007056 | path checkers start up

● edpm_nova_compute.service - nova_compute container
     Loaded: loaded (/etc/systemd/system/edpm_nova_compute.service; enabled; preset: disabled)
     Active: active (running) since Sat 2025-10-11 03:39:20 UTC; 31min ago
    Process: 260074 ExecStart=/usr/bin/podman start nova_compute (code=exited, status=0/SUCCESS)
   Main PID: 260089 (conmon)
         IO: 0B read, 93.5K written
      Tasks: 1 (limit: 48573)
     Memory: 680.0K (peak: 16.8M)
        CPU: 1.329s
     CGroup: /system.slice/edpm_nova_compute.service
             └─260089 /usr/bin/conmon --api-version 1 -c 26619da4fa972b2b2b8df272a799dceac616417728cb0ea1160a898d8a7167a1 -u 26619da4fa972b2b2b8df272a799dceac616417728cb0ea1160a898d8a7167a1 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/26619da4fa972b2b2b8df272a799dceac616417728cb0ea1160a898d8a7167a1/userdata -p /run/containers/storage/overlay-containers/26619da4fa972b2b2b8df272a799dceac616417728cb0ea1160a898d8a7167a1/userdata/pidfile -n nova_compute --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/26619da4fa972b2b2b8df272a799dceac616417728cb0ea1160a898d8a7167a1 --full-attach -s -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/26619da4fa972b2b2b8df272a799dceac616417728cb0ea1160a898d8a7167a1/userdata/oci-log --conmon-pidfile /run/nova_compute.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg 26619da4fa972b2b2b8df272a799dceac616417728cb0ea1160a898d8a7167a1

Oct 11 04:10:16 compute-0 nova_compute[260089]: 2025-10-11 04:10:16.377 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263[00m
Oct 11 04:10:17 compute-0 nova_compute[260089]: 2025-10-11 04:10:17.764 2 DEBUG oslo_service.periodic_task [None req-d79b6b22-ab5a-41f8-9631-0f44ba1473d0 - - - - - -] Running periodic task ComputeManager._cleanup_running_deleted_instances run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210[00m
Oct 11 04:10:20 compute-0 nova_compute[260089]: 2025-10-11 04:10:20.690 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263[00m
Oct 11 04:10:21 compute-0 nova_compute[260089]: 2025-10-11 04:10:21.378 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263[00m
Oct 11 04:10:25 compute-0 nova_compute[260089]: 2025-10-11 04:10:25.692 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263[00m
Oct 11 04:10:26 compute-0 nova_compute[260089]: 2025-10-11 04:10:26.379 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263[00m
Oct 11 04:10:30 compute-0 nova_compute[260089]: 2025-10-11 04:10:30.693 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263[00m
Oct 11 04:10:31 compute-0 nova_compute[260089]: 2025-10-11 04:10:31.381 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263[00m
Oct 11 04:10:35 compute-0 nova_compute[260089]: 2025-10-11 04:10:35.695 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263[00m
Oct 11 04:10:36 compute-0 nova_compute[260089]: 2025-10-11 04:10:36.384 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263[00m

● edpm_ovn_controller.service - ovn_controller container
     Loaded: loaded (/etc/systemd/system/edpm_ovn_controller.service; enabled; preset: disabled)
     Active: active (running) since Sat 2025-10-11 03:30:03 UTC; 40min ago
   Main PID: 152667 (conmon)
         IO: 0B read, 135.5K written
      Tasks: 1 (limit: 48573)
     Memory: 696.0K (peak: 18.9M)
        CPU: 325ms
     CGroup: /system.slice/edpm_ovn_controller.service
             └─152667 /usr/bin/conmon --api-version 1 -c 65c3a3d72e1cba3c83fc771a841564f690b47cc0f5012ce0acf16e2d9f8e3fe2 -u 65c3a3d72e1cba3c83fc771a841564f690b47cc0f5012ce0acf16e2d9f8e3fe2 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/65c3a3d72e1cba3c83fc771a841564f690b47cc0f5012ce0acf16e2d9f8e3fe2/userdata -p /run/containers/storage/overlay-containers/65c3a3d72e1cba3c83fc771a841564f690b47cc0f5012ce0acf16e2d9f8e3fe2/userdata/pidfile -n ovn_controller --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/65c3a3d72e1cba3c83fc771a841564f690b47cc0f5012ce0acf16e2d9f8e3fe2 --full-attach -s -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/65c3a3d72e1cba3c83fc771a841564f690b47cc0f5012ce0acf16e2d9f8e3fe2/userdata/oci-log --conmon-pidfile /run/ovn_controller.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg 65c3a3d72e1cba3c83fc771a841564f690b47cc0f5012ce0acf16e2d9f8e3fe2

Oct 11 04:07:57 compute-0 ovn_controller[152667]: 2025-10-11T04:07:57Z|00075|pinctrl(ovn_pinctrl0)|INFO|DHCPNAK fa:16:3e:55:04:a0 10.100.0.14
Oct 11 04:08:01 compute-0 ovn_controller[152667]: 2025-10-11T04:08:01Z|00076|pinctrl(ovn_pinctrl0)|WARN|DHCPREQUEST requested IP 10.100.0.13 does not match offer 10.100.0.14
Oct 11 04:08:01 compute-0 ovn_controller[152667]: 2025-10-11T04:08:01Z|00077|pinctrl(ovn_pinctrl0)|INFO|DHCPNAK fa:16:3e:55:04:a0 10.100.0.14
Oct 11 04:08:02 compute-0 ovn_controller[152667]: 2025-10-11T04:08:02Z|00078|pinctrl(ovn_pinctrl0)|INFO|DHCPOFFER fa:16:3e:55:04:a0 10.100.0.14
Oct 11 04:08:02 compute-0 ovn_controller[152667]: 2025-10-11T04:08:02Z|00079|pinctrl(ovn_pinctrl0)|INFO|DHCPACK fa:16:3e:55:04:a0 10.100.0.14
Oct 11 04:08:18 compute-0 ovn_controller[152667]: 2025-10-11T04:08:18Z|00280|memory_trim|INFO|Detected inactivity (last active 30001 ms ago): trimming memory
Oct 11 04:08:21 compute-0 ovn_controller[152667]: 2025-10-11T04:08:21Z|00281|binding|INFO|Releasing lport 1cecff65-5dca-4e92-9f18-a4729f87c434 from this chassis (sb_readonly=0)
Oct 11 04:08:21 compute-0 ovn_controller[152667]: 2025-10-11T04:08:21Z|00282|binding|INFO|Setting lport 1cecff65-5dca-4e92-9f18-a4729f87c434 down in Southbound
Oct 11 04:08:21 compute-0 ovn_controller[152667]: 2025-10-11T04:08:21Z|00283|binding|INFO|Removing iface tap1cecff65-5d ovn-installed in OVS
Oct 11 04:09:12 compute-0 ovn_controller[152667]: 2025-10-11T04:09:12Z|00284|memory_trim|INFO|Detected inactivity (last active 30009 ms ago): trimming memory

● edpm_ovn_metadata_agent.service - ovn_metadata_agent container
     Loaded: loaded (/etc/systemd/system/edpm_ovn_metadata_agent.service; enabled; preset: disabled)
     Active: active (running) since Sat 2025-10-11 03:31:09 UTC; 39min ago
   Main PID: 162240 (conmon)
         IO: 0B read, 102.0K written
      Tasks: 1 (limit: 48573)
     Memory: 720.0K (peak: 18.6M)
        CPU: 481ms
     CGroup: /system.slice/edpm_ovn_metadata_agent.service
             └─162240 /usr/bin/conmon --api-version 1 -c dd5285a58cbe29a90687a00af14b934b599bb4de55df5857e4d7b7ac9f22feab -u dd5285a58cbe29a90687a00af14b934b599bb4de55df5857e4d7b7ac9f22feab -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/dd5285a58cbe29a90687a00af14b934b599bb4de55df5857e4d7b7ac9f22feab/userdata -p /run/containers/storage/overlay-containers/dd5285a58cbe29a90687a00af14b934b599bb4de55df5857e4d7b7ac9f22feab/userdata/pidfile -n ovn_metadata_agent --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/dd5285a58cbe29a90687a00af14b934b599bb4de55df5857e4d7b7ac9f22feab --full-attach -s -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/dd5285a58cbe29a90687a00af14b934b599bb4de55df5857e4d7b7ac9f22feab/userdata/oci-log --conmon-pidfile /run/ovn_metadata_agent.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg dd5285a58cbe29a90687a00af14b934b599bb4de55df5857e4d7b7ac9f22feab

Oct 11 04:08:21 compute-0 ovn_metadata_agent[162240]: 2025-10-11 04:08:21.489 162666 DEBUG oslo.privsep.daemon [-] privsep: reply[4de7e29b-51f1-4248-94f0-1dbbf3209e18]: (4, None) _call_back /usr/lib/python3.9/site-packages/oslo_privsep/daemon.py:501[00m
Oct 11 04:08:25 compute-0 ovn_metadata_agent[162240]: 2025-10-11 04:08:25.069 162245 DEBUG ovsdbapp.backend.ovs_idl.event [-] Matched UPDATE: SbGlobalUpdateEvent(events=('update',), table='SB_Global', conditions=None, old_conditions=None), priority=20 to row=SB_Global(external_ids={}, nb_cfg=24, options={'arp_ns_explicit_output': 'true', 'mac_prefix': '2e:30:f4', 'max_tunid': '16711680', 'northd_internal_version': '24.03.7-20.33.0-76.8', 'svc_monitor_mac': 'fe:89:7c:57:3f:71'}, ipsec=False) old=SB_Global(nb_cfg=23) matches /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/event.py:43[00m
Oct 11 04:08:25 compute-0 ovn_metadata_agent[162240]: 2025-10-11 04:08:25.070 162245 DEBUG neutron.agent.ovn.metadata.agent [-] Delaying updating chassis table for 7 seconds run /usr/lib/python3.9/site-packages/neutron/agent/ovn/metadata/agent.py:274[00m
Oct 11 04:08:32 compute-0 ovn_metadata_agent[162240]: 2025-10-11 04:08:32.072 162245 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Running txn n=1 command(idx=0): DbSetCommand(_result=None, table=Chassis_Private, record=14b06507-d00b-4e27-a47d-46a5c2644635, col_values=(('external_ids', {'neutron:ovn-metadata-sb-cfg': '24'}),), if_exists=True) do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:89[00m
Oct 11 04:09:10 compute-0 ovn_metadata_agent[162240]: 2025-10-11 04:09:10.513 162245 DEBUG oslo_concurrency.lockutils [-] Acquiring lock "_check_child_processes" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404[00m
Oct 11 04:09:10 compute-0 ovn_metadata_agent[162240]: 2025-10-11 04:09:10.514 162245 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" acquired by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409[00m
Oct 11 04:09:10 compute-0 ovn_metadata_agent[162240]: 2025-10-11 04:09:10.514 162245 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" "released" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: held 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423[00m
Oct 11 04:10:10 compute-0 ovn_metadata_agent[162240]: 2025-10-11 04:10:10.514 162245 DEBUG oslo_concurrency.lockutils [-] Acquiring lock "_check_child_processes" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404[00m
Oct 11 04:10:10 compute-0 ovn_metadata_agent[162240]: 2025-10-11 04:10:10.516 162245 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" acquired by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409[00m
Oct 11 04:10:10 compute-0 ovn_metadata_agent[162240]: 2025-10-11 04:10:10.516 162245 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" "released" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: held 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423[00m

○ emergency.service - Emergency Shell
     Loaded: loaded (/usr/lib/systemd/system/emergency.service; static)
     Active: inactive (dead)
       Docs: man:sulogin(8)

○ f2b19cad22d0fbdd185b264c3c8b6443d09a02319dc9fb0585dc69a0f24a758d-38b9144b98f6cb45.service - /usr/bin/podman healthcheck run f2b19cad22d0fbdd185b264c3c8b6443d09a02319dc9fb0585dc69a0f24a758d
     Loaded: loaded (/run/systemd/transient/f2b19cad22d0fbdd185b264c3c8b6443d09a02319dc9fb0585dc69a0f24a758d-38b9144b98f6cb45.service; transient)
  Transient: yes
     Active: inactive (dead) since Sat 2025-10-11 04:10:25 UTC; 13s ago
   Duration: 76ms
TriggeredBy: ● f2b19cad22d0fbdd185b264c3c8b6443d09a02319dc9fb0585dc69a0f24a758d-38b9144b98f6cb45.timer
    Process: 314601 ExecStart=/usr/bin/podman healthcheck run f2b19cad22d0fbdd185b264c3c8b6443d09a02319dc9fb0585dc69a0f24a758d (code=exited, status=0/SUCCESS)
   Main PID: 314601 (code=exited, status=0/SUCCESS)
        CPU: 74ms

Oct 11 04:10:24 compute-0 podman[314601]: 2025-10-11 04:10:24.996846754 +0000 UTC m=+0.058202572 container health_status f2b19cad22d0fbdd185b264c3c8b6443d09a02319dc9fb0585dc69a0f24a758d (image=quay.io/podified-antelope-centos9/openstack-iscsid:current-podified, name=iscsid, health_status=healthy, health_failing_streak=0, health_log=, io.buildah.version=1.41.3, org.label-schema.build-date=20251009, org.label-schema.name=CentOS Stream 9 Base Image, config_id=iscsid, managed_by=edpm_ansible, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/iscsid', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-iscsid:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/iscsid.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run:/run', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/isUnit hv_kvp_daemon.service could not be found.
csi:z', '/etc/target:/etc/target:z', '/var/lib/iscsi:/var/lib/iscsi:z', '/var/lib/openstack/healthchecks/iscsid:/openstack:ro,z']}, container_name=iscsid, maintainer=OpenStack Kubernetes Operator team, org.label-schema.license=GPLv2)

● getty@tty1.service - Getty on tty1
     Loaded: loaded (/usr/lib/systemd/system/getty@.service; enabled; preset: enabled)
     Active: active (running) since Sat 2025-10-11 02:42:21 UTC; 1h 28min ago
       Docs: man:agetty(8)
             man:systemd-getty-generator(8)
             http://0pointer.de/blog/projects/serial-console.html
   Main PID: 1008 (agetty)
         IO: 0B read, 0B written
      Tasks: 1 (limit: 48573)
     Memory: 220.0K (peak: 692.0K)
        CPU: 14ms
     CGroup: /system.slice/system-getty.slice/getty@tty1.service
             └─1008 /sbin/agetty -o "-p -- \\u" --noclear - linux

Oct 11 02:42:21 np0005480824.novalocal systemd[1]: Started Getty on tty1.

● gssproxy.service - GSSAPI Proxy Daemon
     Loaded: loaded (/usr/lib/systemd/system/gssproxy.service; disabled; preset: disabled)
     Active: active (running) since Sat 2025-10-11 02:42:20 UTC; 1h 28min ago
   Main PID: 874 (gssproxy)
         IO: 0B read, 0B written
      Tasks: 6 (limit: 48573)
     Memory: 1.8M (peak: 3.4M)
        CPU: 28ms
     CGroup: /system.slice/gssproxy.service
             └─874 /usr/sbin/gssproxy -D

Oct 11 02:42:20 np0005480824.novalocal systemd[1]: Starting GSSAPI Proxy Daemon...
Oct 11 02:42:20 np0005480824.novalocal systemd[1]: Started GSSAPI Proxy Daemon.

○ import-state.service - Import network configuration from initramfs
     Loaded: loaded (/usr/lib/systemd/system/import-state.service; enabled; preset: enabled)
     Active: inactive (dead)

○ initrd-cleanup.service - Cleaning Up and Shutting Down Daemons
     Loaded: loaded (/usr/lib/systemd/system/initrd-cleanup.service; static)
     Active: inactive (dead) since Sat 2025-10-11 02:42:16 UTC; 1h 28min ago
   Main PID: 612 (code=exited, status=0/SUCCESS)
        CPU: 16ms

Oct 11 02:42:16 localhost systemd[1]: Starting Cleaning Up and Shutting Down Daemons...
Oct 11 02:42:16 localhost systemd[1]: initrd-cleanup.service: Deactivated successfully.
Oct 11 02:42:16 localhost systemd[1]: Finished Cleaning Up and Shutting Down Daemons.

○ initrd-parse-etc.service - Mountpoints Configured in the Real Root
     Loaded: loaded (/usr/lib/systemd/system/initrd-parse-etc.service; static)
     Active: inactive (dead) since Sat 2025-10-11 02:42:16 UTC; 1h 28min ago
   Main PID: 564 (code=exited, status=0/SUCCESS)
        CPU: 25ms

Oct 11 02:42:16 localhost systemd[1]: Starting Mountpoints Configured in the Real Root...
Oct 11 02:42:16 localhost systemd[1]: initrd-parse-etc.service: Deactivated successfully.
Oct 11 02:42:16 localhost systemd[1]: Finished Mountpoints Configured in the Real Root.

○ initrd-switch-root.service - Switch Root
     Loaded: loaded (/usr/lib/systemd/system/initrd-switch-root.service; static)
     Active: inactive (dead) since Sat 2025-10-11 02:42:17 UTC; 1h 28min ago
   Main PID: 617 (code=exited, status=0/SUCCESS)
        CPU: 11ms

Oct 11 02:42:16 localhost systemd[1]: Starting Switch Root...

○ initrd-udevadm-cleanup-db.service - Cleanup udev Database
     Loaded: loaded (/usr/lib/systemd/system/initrd-udevadm-cleanup-db.service; static)
     Active: inactive (dead) since Sat 2025-10-11 02:42:16 UTC; 1h 28min ago
   Main PID: 615 (code=exited, status=0/SUCCESS)
        CPU: 19ms

Oct 11 02:42:16 localhost systemd[1]: Starting Cleanup udev Database...
Oct 11 02:42:16 localhost systemd[1]: initrd-udevadm-cleanup-db.service: Deactivated successfully.
Oct 11 02:42:16 localhost systemd[1]: Finished Cleanup udev Database.

○ ip6tables.service - IPv6 firewall with ip6tables
     Loaded: loaded (/usr/lib/systemd/system/ip6tables.service; disabled; preset: disabled)
     Active: inactive (dead)

○ iptables.service - IPv4 firewall with iptables
     Loaded: loaded (/usr/lib/systemd/system/iptables.service; disabled; preset: disabled)
     Active: inactive (dead) since Sat 2025-10-11 03:15:12 UTC; 55min ago
   Duration: 32min 53.004s
   Main PID: 776 (code=exited, status=0/SUCCESS)
        CPU: 155ms

Oct 11 02:42:18 localhost systemd[1]: Starting IPv4 firewall with iptables...
Oct 11 02:42:18 localhost iptables.init[776]: iptables: Applying firewall rules: [  OK  ]
Oct 11 02:42:18 localhost systemd[1]: Finished IPv4 firewall with iptables.
Oct 11 03:15:11 compute-0 systemd[1]: Stopping IPv4 firewall with iptables...
Oct 11 03:15:12 compute-0 iptables.init[58624]: iptables: Setting chains to policy ACCEPT: raw mangle filter nat [  OK  ]
Oct 11 03:15:12 compute-0 iptables.init[58624]: iptables: Flushing firewall rules: [  OK  ]
Oct 11 03:15:12 compute-0 systemd[1]: iptables.service: Deactivated successfully.
Oct 11 03:15:12 compute-0 systemd[1]: Stopped IPv4 firewall with iptables.

● irqbalance.service - irqbalance daemon
     Loaded: loaded (/usr/lib/systemd/system/irqbalance.service; enabled; preset: enabled)
     Active: active (running) since Sat 2025-10-11 02:42:18 UTC; 1h 28min ago
       Docs: man:irqbalance(1)
             https://github.com/Irqbalance/irqbalance
   Main PID: 777 (irqbalance)
         IO: 0B read, 0B written
      Tasks: 2 (limit: 48573)
     Memory: 1.1M (peak: 1.5M)
        CPU: 531ms
     CGroup: /system.slice/irqbalance.service
             └─777 /usr/sbin/irqbalance

Oct 11 02:42:29 np0005480824.novalocal irqbalance[777]: Cannot change IRQ 32 affinity: Operation not permitted
Oct 11 02:42:29 np0005480824.novalocal irqbalance[777]: IRQ 32 affinity is now unmanaged
Oct 11 02:42:29 np0005480824.novalocal irqbalance[777]: Cannot change IRQ 30 affinity: Operation not permitted
Oct 11 02:42:29 np0005480824.novalocal irqbalance[777]: IRQ 30 affinity is now unmanaged
Oct 11 02:42:29 np0005480824.novalocal irqbalance[777]: Cannot change IRQ 29 affinity: Operation not permitted
Oct 11 02:42:29 np0005480824.novalocal irqbalance[777]: IRQ 29 affinity is now unmanaged
Oct 11 02:53:09 compute-0 irqbalance[777]: Cannot change IRQ 27 affinity: Operation not permitted
Oct 11 02:53:09 compute-0 irqbalance[777]: IRQ 27 affinity is now unmanaged
Oct 11 03:08:09 compute-0 irqbalance[777]: Cannot change IRQ 26 affinity: Operation not permitted
Oct 11 03:08:09 compute-0 irqbalance[777]: IRQ 26 affinity is now unmanaged

○ iscsi-init.service - One time configuration for iscsi.service
     Loaded: loaded (/usr/lib/systemd/system/iscsi-init.service; disabled; preset: disabled)
     Active: inactive (dead)

○ iscsi-onboot.service - Special handling of early boot iSCSI sessions
     Loaded: loaded (/usr/lib/systemd/system/iscsi-onboot.service; enabled; preset: enabled)
     Active: inactive (dead)
       Docs: man:iscsiadm(8)
             man:iscsid(8)

○ iscsi-shutdown.service - Logout off all iSCSI sessions on shutdown
     Loaded: loaded (/usr/lib/systemd/system/iscsi-shutdown.service; static)
     Active: inactive (dead)
       Docs: man:iscsid(8)
             man:iscsiadm(8)

○ iscsi.service - Login and scanning of iSCSI devices
     Loaded: loaded (/usr/lib/systemd/system/iscsi.service; indirect; preset: enabled)
     Active: inactive (dead)
       Docs: man:iscsiadm(8)
             man:iscsid(8)

Oct 11 03:35:57 compute-0 systemd[1]: iscsi.service: Unit cannot be reloaded because it is inactive.

○ iscsid.service - Open-iSCSI
     Loaded: loaded (/usr/lib/systemd/system/iscsid.service; disabled; preset: disabled)
     Active: inactive (dead)
TriggeredBy: ○ iscsid.socket
       Docs: man:iscsid(8)
             man:iscsiuio(8)
             man:iscsiadm(8)

○ iscsiuio.service - iSCSI UserSpace I/O driver
     Loaded: loaded (/usr/lib/systemd/system/iscsiuio.service; disabled; preset: disabled)
     Active: inactive (dead)
TriggeredBy: ○ iscsiuio.socket
       Docs: man:iscsiuio(8)

● kmod-static-nodes.service - Create List of Static Device Nodes
     Loaded: loaded (/usr/lib/systemd/system/kmod-static-nodes.service; static)
     Active: active (exited) since Sat 2025-10-11 02:42:17 UTC; 1h 28min ago
   Main PID: 668 (code=exited, status=0/SUCCESS)
        CPU: 7ms

● ldconfig.service - Rebuild Dynamic Linker Cache
     Loaded: loaded (/usr/lib/systemd/system/ldconfig.service; static)
     Active: active (exited) since Sat 2025-10-11 02:42:18 UTC; 1h 28min ago
       Docs: man:ldconfig(8)
   Main PID: 693 (code=exited, status=0/SUCCESS)
        CPU: 80ms

Oct 11 02:42:18 localhost systemd[1]: Starting Rebuild Dynamic Linker Cache...
Oct 11 02:42:18 localhost systemd[1]: Finished Rebuild Dynamic Linker Cache.

○ libvirtd.service
     Loaded: masked (Reason: Unit libvirtd.service is masked.)
     Active: inactive (dead)
TriggeredBy: ○ libvirtd.socket
             ○ libvirtd-admin.socket
             ○ libvirtd-ro.socket

○ loadmodules.service - Load legacy module configuration
     Loaded: loaded (/usr/lib/systemd/system/loadmodules.service; enabled; preset: enabled)
     Active: inactive (dead)

○ logrotate.service - Rotate log files
     Loaded: loaded (/usr/lib/systemd/system/logrotate.service; static)
     Active: inactive (dead)
TriggeredBy: ● logrotate.timer
       Docs: man:logrotate(8)
             man:logrotate.conf(5)

○ lvm2-lvmpolld.service - LVM2 poll daemon
     Loaded: loaded (/usr/lib/systemd/system/lvm2-lvmpolld.service; static)
     Active: inactive (dead)
TriggeredBy: ● lvm2-lvmpolld.socket
       Docs: man:lvmpolld(8)

● lvm2-monitor.service - Monitoring of LVM2 mirrors, snapshots etc. using dmeventd or progress polling
     Loaded: loaded (/usr/lib/systemd/system/lvm2-monitor.service; enabled; preset: enabled)
     Active: active (exited) since Sat 2025-10-11 03:08:57 UTC; 1h 1min ago
       Docs: man:dmeventd(8)
             man:lvcreate(8)
             man:lvchange(8)
             man:vgchange(8)
   Main PID: 30541 (code=exited, status=0/SUCCESS)
        CPU: 20ms

Oct 11 03:08:57 compute-0 systemd[1]: Starting Monitoring of LVM2 mirrors, snapshots etc. using dmeventd or progress polling...
Oct 11 03:08:57 compute-0 systemd[1]: Finished Monitoring of LVM2 mirrors, snapshots etc. using dmeventd or progress polling.

○ mdmonitor.service - Software RAID monitoring and management
     Loaded: loaded (/usr/lib/systemd/system/mdmonitor.service; enabled; preset: enabled)
     Active: inactive (dead)

○ microcode.service - Load CPU microcode update
     Loaded: loaded (/usr/lib/systemd/system/microcode.service; enabled; preset: enabled)
     Active: inactive (dead)
  Condition: start condition failed at Sat 2025-10-11 02:42:18 UTC; 1h 28min ago

Oct 11 02:42:18 localhost systemd[1]: Load CPU microcode update was skipped because of an unmet condition check (ConditionPathExists=/sys/devices/system/cpu/microcode/reload).

○ modprobe@configfs.service - Load Kernel Module configfs
     Loaded: loaded (/usr/lib/systemd/system/modprobe@.service; static)
     Active: inactive (dead) since Sat 2025-10-11 02:42:18 UTC; 1h 28min ago
       Docs: man:modprobe(8)
   Main PID: 746 (code=exited, status=0/SUCCESS)
        CPU: 5ms

Oct 11 02:42:18 localhost systemd[1]: Starting Load Kernel Module configfs...
Oct 11 02:42:18 localhost systemd[1]: modprobe@configfs.service: Deactivated successfully.
Oct 11 02:42:18 localhost systemd[1]: Finished Load Kernel Module configfs.

○ modprobe@drm.service - Load Kernel Module drm
     Loaded: loaded (/usr/lib/systemd/system/modprobe@.service; static)
     Active: inactive (dead) since Sat 2025-10-11 02:42:17 UTC; 1h 28min ago
       Docs: man:modprobe(8)
   Main PID: 670 (code=exited, status=0/SUCCESS)
        CPU: 92ms

Oct 11 02:42:17 localhost systemd[1]: modprobe@drm.service: Deactivated successfully.
Oct 11 02:42:17 localhost systemd[1]: Finished Load Kernel Module drm.

○ modprobe@efi_pstore.service - Load Kernel Module efi_pstore
     Loaded: loaded (/usr/lib/systemd/system/modprobe@.service; static)
     Active: inactive (dead) since Sat 2025-10-11 02:42:17 UTC; 1h 28min ago
       Docs: man:modprobe(8)
   Main PID: 671 (code=exited, status=0/SUCCESS)
        CPU: 5ms

Oct 11 02:42:17 localhost systemd[1]: modprobe@efi_pstore.service: Deactivated successfully.
Oct 11 02:42:17 localhost systemd[1]: Finished Load Kernel Module efi_pstore.

○ modprobe@fuse.service - Load Kernel Module fuse
     Loaded: loaded (/usr/lib/systemd/system/modprobe@.service; static)
     Active: inactive (dead) since Sat 2025-10-11 02:42:17 UTC; 1h 28min ago
       Docs: man:modprobe(8)
   Main PID: 672 (code=exited, status=0/SUCCESS)
        CPU: 46ms

Oct 11 02:42:17 localhost systemd[1]: modprobe@fuse.service: Deactivated successfully.
Oct 11 02:42:17 localhost systemd[1]: Finished Load Kernel Module fuse.

○ netns-placeholder.service - Create netns directory
     Loaded: loaded (/etc/systemd/system/netns-placeholder.service; enabled; preset: enabled)
     Active: inactive (dead) since Sat 2025-10-11 03:37:04 UTC; 33min ago
   Main PID: 237689 (code=exited, status=0/SUCCESS)
        CPU: 7ms

Oct 11 03:37:04 compute-0 systemd[1]: Starting Create netns directory...
Oct 11 03:37:04 compute-0 systemd[1]: netns-placeholder.service: Deactivated successfully.
Oct 11 03:37:04 compute-0 systemd[1]: Finished Create netns directory.

○ network.service - LSB: Bring up/down networking
     Loaded: loaded (/etc/rc.d/init.d/network; generated)
     Active: inactive (dead)
       Docs: man:systemd-sysv-generator(8)

● NetworkManager-wait-online.service - Network Manager Wait Online
     Loaded: loaded (/usr/lib/systemd/system/NetworkManager-wait-online.service; enabled; preset: disabled)
     Active: active (exited) since Sat 2025-10-11 03:11:42 UTC; 58min ago
       Docs: man:NetworkManager-wait-online.service(8)
   Main PID: 44978 (code=exited, status=0/SUCCESS)
        CPU: 32ms

Oct 11 03:11:42 compute-0 systemd[1]: Starting Network Manager Wait Online...
Oct 11 03:11:42 compute-0 systemd[1]: Finished Network Manager Wait Online.

● NetworkManager.service - Network Manager
     Loaded: loaded (/usr/lib/systemd/system/NetworkManager.service; enabled; preset: enabled)
    Drop-In: /usr/lib/systemd/system/NetworkManager.service.d
             └─NetworkManager-ovs.conf
     Active: active (running) since Sat 2025-10-11 03:11:42 UTC; 58min ago
       Docs: man:NetworkManager(8)
   Main PID: 44969 (NetworkManager)
         IO: 104.0K read, 328.5K written
      Tasks: 3 (limit: 48573)
     Memory: 5.6M (peak: 6.6M)
        CPU: 24.039s
     CGroup: /system.slice/NetworkManager.service
             └─44969 /usr/sbin/NetworkManager --no-daemon

Oct 11 04:06:56 compute-0 NetworkManager[44969]: <info>  [1760155616.6967] manager: (tapabadcf46-90): new Open vSwitch Port device (/org/freedesktop/NetworkManager/Devices/145)
Oct 11 04:07:22 compute-0 NetworkManager[44969]: <info>  [1760155642.2315] device (tap7452b5ba-83): state change: disconnected -> unmanaged (reason 'unmanaged', managed-type: 'removed')
Oct 11 04:07:40 compute-0 NetworkManager[44969]: <info>  [1760155660.1104] manager: (tap1cecff65-5d): new Open vSwitch Port device (/org/freedesktop/NetworkManager/Devices/146)
Oct 11 04:07:41 compute-0 NetworkManager[44969]: <info>  [1760155661.3642] manager: (tap1cecff65-5d): new Tun device (/org/freedesktop/NetworkManager/Devices/147)
Oct 11 04:07:41 compute-0 NetworkManager[44969]: <info>  [1760155661.4409] device (tap1cecff65-5d): state change: unmanaged -> unavailable (reason 'connection-assumed', managed-type: 'external')
Oct 11 04:07:41 compute-0 NetworkManager[44969]: <info>  [1760155661.4420] device (tap1cecff65-5d): state change: unavailable -> disconnected (reason 'none', managed-type: 'external')
Oct 11 04:07:41 compute-0 NetworkManager[44969]: <info>  [1760155661.5099] manager: (tapabadcf46-90): new Veth device (/org/freedesktop/NetworkManager/Devices/148)
Oct 11 04:07:41 compute-0 NetworkManager[44969]: <info>  [1760155661.5589] device (tapabadcf46-90): carrier: link connected
Oct 11 04:07:41 compute-0 NetworkManager[44969]: <info>  [1760155661.7014] manager: (tapabadcf46-90): new Open vSwitch Port device (/org/freedesktop/NetworkManager/Devices/149)
Oct 11 04:08:21 compute-0 NetworkManager[44969]: <info>  [1760155701.1451] device (tap1cecff65-5d): state change: disconnected -> unmanaged (reason 'unmanaged', managed-type: 'removed')

○ nfs-idmapd.serUnit ntpd.service could not be found.
Unit ntpdate.service could not be found.
vice - NFSv4 ID-name mapping service
     Loaded: loaded (/usr/lib/systemd/system/nfs-idmapd.service; static)
     Active: inactive (dead)
       Docs: man:idmapd(8)

○ nfs-mountd.service - NFS Mount Daemon
     Loaded: loaded (/usr/lib/systemd/system/nfs-mountd.service; static)
     Active: inactive (dead)
       Docs: man:rpc.mountd(8)

○ nfs-server.service - NFS server and services
     Loaded: loaded (/usr/lib/systemd/system/nfs-server.service; disabled; preset: disabled)
     Active: inactive (dead)
       Docs: man:rpc.nfsd(8)
             man:exportfs(8)

○ nfs-utils.service - NFS server and client services
     Loaded: loaded (/usr/lib/systemd/system/nfs-utils.service; static)
     Active: inactive (dead)

○ nfsdcld.service - NFSv4 Client Tracking Daemon
     Loaded: loaded (/usr/lib/systemd/system/nfsdcld.service; static)
     Active: inactive (dead)
       Docs: man:nfsdcld(8)

● nftables.service - Netfilter Tables
     Loaded: loaded (/usr/lib/systemd/system/nftables.service; enabled; preset: disabled)
     Active: active (exited) since Sat 2025-10-11 03:15:14 UTC; 55min ago
       Docs: man:nft(8)
   Main PID: 59014 (code=exited, status=0/SUCCESS)
        CPU: 40ms

Oct 11 03:15:14 compute-0 systemd[1]: Starting Netfilter Tables...
Oct 11 03:15:14 compute-0 systemd[1]: Finished Netfilter Tables.

● nis-domainname.service - Read and set NIS domainname from /etc/sysconfig/network
     Loaded: loaded (/usr/lib/systemd/system/nis-domainname.service; enabled; preset: enabled)
     Active: active (exited) since Sat 2025-10-11 02:42:17 UTC; 1h 28min ago
   Main PID: 673 (code=exited, status=0/SUCCESS)
        CPU: 6ms

Oct 11 02:42:17 localhost systemd[1]: Finished Read and set NIS domainname from /etc/sysconfig/network.

○ nvmefc-boot-connections.service - Auto-connect to subsystems on FC-NVME devices found during boot
     Loaded: loaded (/usr/lib/systemd/system/nvmefc-boot-connections.service; enabled; preset: enabled)
     Active: inactive (dead)

● openvswitch.service - Open vSwitch
     Loaded: loaded (/usr/lib/systemd/system/openvswitch.service; enabled; preset: disabled)
     Active: active (exited) since Sat 2025-10-11 03:11:27 UTC; 59min ago
   Main PID: 43274 (code=exited, status=0/SUCCESS)
        CPU: 4ms

Oct 11 03:11:27 compute-0 systemd[1]: Starting Open vSwitch...
Oct 11 03:11:27 compute-0 systemd[1]: Finished Open vSwitch.

● ovs-delete-transient-ports.service - Open vSwitch Delete Transient Ports
     Loaded: loaded (/usr/lib/systemd/system/ovs-delete-transient-ports.service; static)
     Active: active (exited) since Sat 2025-10-11 03:11:26 UTC; 59min ago
   Main PID: 43207 (code=exited, status=0/SUCCESS)
        CPU: 51ms

Oct 11 03:11:26 compute-0 systemd[1]: Starting Open vSwitch Delete Transient Ports...
Oct 11 03:11:26 compute-0 systemd[1]: Finished Open vSwitch Delete Transient Ports.

● ovs-vswitchd.service - Open vSwitch Forwarding Unit
     Loaded: loaded (/usr/lib/systemd/system/ovs-vswitchd.service; static)
     Active: active (running) since Sat 2025-10-11 03:11:27 UTC; 59min ago
   Main PID: 43265 (ovs-vswitchd)
         IO: 3.7M read, 440.0K written
      Tasks: 13 (limit: 48573)
     Memory: 246.8M (peak: 248.6M)
        CPU: 20.615s
     CGroup: /system.slice/ovs-vswitchd.service
             └─43265 ovs-vswitchd unix:/var/run/openvswitch/db.sock -vconsole:emer -vsyslog:err -vfile:info --mlockall --user openvswitch:hugetlbfs --no-chdir --log-file=/var/log/openvswitch/ovs-vswitchd.log --pidfile=/var/run/openvswitch/ovs-vswitchd.pid --detach

Oct 11 03:11:26 compute-0 systemd[1]: Starting Open vSwitch Forwarding Unit...
Oct 11 03:11:27 compute-0 ovs-ctl[43251]: Inserting openvswitch module [  OK  ]
Oct 11 03:11:27 compute-0 ovs-ctl[43220]: Starting ovs-vswitchd [  OK  ]
Oct 11 03:11:27 compute-0 ovs-vsctl[43272]: ovs|00001|vsctl|INFO|Called as ovs-vsctl --no-wait add Open_vSwitch . external-ids hostname=compute-0
Oct 11 03:11:27 compute-0 ovs-ctl[43220]: Enabling remote OVSDB managers [  OK  ]
Oct 11 03:11:27 compute-0 systemd[1]: Started Open vSwitch ForwardUnit pacemaker.service could not be found.
Unit plymouth-quit-wait.service could not be found.
Unit plymouth-start.service could not be found.
ing Unit.

● ovsdb-server.service - Open vSwitch Database Unit
     Loaded: loaded (/usr/lib/systemd/system/ovsdb-server.service; static)
     Active: active (running) since Sat 2025-10-11 03:11:26 UTC; 59min ago
   Main PID: 43180 (ovsdb-server)
         IO: 668.0K read, 716.0K written
      Tasks: 1 (limit: 48573)
     Memory: 4.4M (peak: 40.3M)
        CPU: 22.304s
     CGroup: /system.slice/ovsdb-server.service
             └─43180 ovsdb-server /etc/openvswitch/conf.db -vconsole:emer -vsyslog:err -vfile:info --remote=punix:/var/run/openvswitch/db.sock --private-key=db:Open_vSwitch,SSL,private_key --certificate=db:Open_vSwitch,SSL,certificate --bootstrap-ca-cert=db:Open_vSwitch,SSL,ca_cert --user openvswitch:hugetlbfs --no-chdir --log-file=/var/log/openvswitch/ovsdb-server.log --pidfile=/var/run/openvswitch/ovsdb-server.pid --detach

Oct 11 03:11:26 compute-0 chown[43127]: /usr/bin/chown: cannot access '/run/openvswitch': No such file or directory
Oct 11 03:11:26 compute-0 ovs-ctl[43132]: /etc/openvswitch/conf.db does not exist ... (warning).
Oct 11 03:11:26 compute-0 ovs-ctl[43132]: Creating empty database /etc/openvswitch/conf.db [  OK  ]
Oct 11 03:11:26 compute-0 ovs-ctl[43132]: Starting ovsdb-server [  OK  ]
Oct 11 03:11:26 compute-0 ovs-vsctl[43181]: ovs|00001|vsctl|INFO|Called as ovs-vsctl --no-wait -- init -- set Open_vSwitch . db-version=8.5.1
Oct 11 03:11:26 compute-0 ovs-vsctl[43200]: ovs|00001|vsctl|INFO|Called as ovs-vsctl --no-wait set Open_vSwitch . ovs-version=3.3.5-115.el9s "external-ids:system-id=\"14b06507-d00b-4e27-a47d-46a5c2644635\"" "external-ids:rundir=\"/var/run/openvswitch\"" "system-type=\"centos\"" "system-version=\"9\""
Oct 11 03:11:26 compute-0 ovs-ctl[43132]: Configuring Open vSwitch system IDs [  OK  ]
Oct 11 03:11:26 compute-0 ovs-ctl[43132]: Enabling remote OVSDB managers [  OK  ]
Oct 11 03:11:26 compute-0 systemd[1]: Started Open vSwitch Database Unit.
Oct 11 03:11:26 compute-0 ovs-vsctl[43206]: ovs|00001|vsctl|INFO|Called as ovs-vsctl --no-wait add Open_vSwitch . external-ids hostname=compute-0

● polkit.service - Authorization Manager
     Loaded: loaded (/usr/lib/systemd/system/polkit.service; static)
     Active: active (running) since Sat 2025-10-11 02:52:17 UTC; 1h 18min ago
       Docs: man:polkit(8)
   Main PID: 6205 (polkitd)
         IO: 11.4M read, 0B written
      Tasks: 12 (limit: 48573)
     Memory: 17.4M (peak: 19.1M)
        CPU: 2.396s
     CGroup: /system.slice/polkit.service
             └─6205 /usr/lib/polkit-1/polkitd --no-debug

Oct 11 03:33:27 compute-0 polkitd[6205]: Collecting garbage unconditionally...
Oct 11 03:33:27 compute-0 polkitd[6205]: Loading rules from directory /etc/polkit-1/rules.d
Oct 11 03:33:27 compute-0 polkitd[6205]: Loading rules from directory /usr/share/polkit-1/rules.d
Oct 11 03:33:27 compute-0 polkitd[6205]: Finished loading, compiling and executing 4 rules
Oct 11 03:35:04 compute-0 polkitd[6205]: Registered Authentication Agent for unix-process:216337:317251 (system bus name :1.2993 [/usr/bin/pkttyagent --process 216337 --notify-fd 4 --fallback], object path /org/freedesktop/PolicyKit1/AuthenticationAgent, locale en_US.UTF-8)
Oct 11 03:35:04 compute-0 polkitd[6205]: Unregistered Authentication Agent for unix-process:216337:317251 (system bus name :1.2993, object path /org/freedesktop/PolicyKit1/AuthenticationAgent, locale en_US.UTF-8) (disconnected from bus)
Oct 11 03:35:04 compute-0 polkitd[6205]: Registered Authentication Agent for unix-process:216336:317250 (system bus name :1.2994 [/usr/bin/pkttyagent --process 216336 --notify-fd 4 --fallback], object path /org/freedesktop/PolicyKit1/AuthenticationAgent, locale en_US.UTF-8)
Oct 11 03:35:04 compute-0 polkitd[6205]: Unregistered Authentication Agent for unix-process:216336:317250 (system bus name :1.2994, object path /org/freedesktop/PolicyKit1/AuthenticationAgent, locale en_US.UTF-8) (disconnected from bus)
Oct 11 03:35:07 compute-0 polkitd[6205]: Registered Authentication Agent for unix-process:216803:317495 (system bus name :1.2997 [/usr/bin/pkttyagent --process 216803 -Unit power-profiles-daemon.service could not be found.
Unit rpc-svcgssd.service could not be found.
-notify-fd 4 --fallback], object path /org/freedesktop/PolicyKit1/AuthenticationAgent, locale en_US.UTF-8)
Oct 11 03:35:07 compute-0 polkitd[6205]: Unregistered Authentication Agent for unix-process:216803:317495 (system bus name :1.2997, object path /org/freedesktop/PolicyKit1/AuthenticationAgent, locale en_US.UTF-8) (disconnected from bus)

○ raid-check.service - RAID setup health check
     Loaded: loaded (/usr/lib/systemd/system/raid-check.service; static)
     Active: inactive (dead)
TriggeredBy: ○ raid-check.timer

○ rc-local.service - /etc/rc.d/rc.local Compatibility
     Loaded: loaded (/usr/lib/systemd/system/rc-local.service; static)
     Active: inactive (dead)
       Docs: man:systemd-rc-local-generator(8)

○ rescue.service - Rescue Shell
     Loaded: loaded (/usr/lib/systemd/system/rescue.service; static)
     Active: inactive (dead)
       Docs: man:sulogin(8)

○ rpc-gssd.service - RPC security service for NFS client and server
     Loaded: loaded (/usr/lib/systemd/system/rpc-gssd.service; static)
     Active: inactive (dead)
  Condition: start condition failed at Sat 2025-10-11 02:42:20 UTC; 1h 28min ago
       Docs: man:rpc.gssd(8)

Oct 11 02:42:20 np0005480824.novalocal systemd[1]: RPC security service for NFS client and server was skipped because of an unmet condition check (ConditionPathExists=/etc/krb5.keytab).

● rpc-statd-notify.service - Notify NFS peers of a restart
     Loaded: loaded (/usr/lib/systemd/system/rpc-statd-notify.service; static)
     Active: active (exited) since Sat 2025-10-11 02:42:21 UTC; 1h 28min ago
       Docs: man:sm-notify(8)
             man:rpc.statd(8)
        CPU: 10ms

Oct 11 02:42:21 np0005480824.novalocal sm-notify[1003]: Version 2.5.4 starting
Oct 11 02:42:21 np0005480824.novalocal systemd[1]: Starting Notify NFS peers of a restart...
Oct 11 02:42:21 np0005480824.novalocal systemd[1]: Started Notify NFS peers of a restart.

○ rpc-statd.service - NFS status monitor for NFSv2/3 locking.
     Loaded: loaded (/usr/lib/systemd/system/rpc-statd.service; static)
     Active: inactive (dead)
       Docs: man:rpc.statd(8)

● rpcbind.service - RPC Bind
     Loaded: loaded (/usr/lib/systemd/system/rpcbind.service; enabled; preset: enabled)
     Active: active (running) since Sat 2025-10-11 02:42:18 UTC; 1h 28min ago
TriggeredBy: ● rpcbind.socket
       Docs: man:rpcbind(8)
   Main PID: 698 (rpcbind)
         IO: 0B read, 0B written
      Tasks: 1 (limit: 48573)
     Memory: 2.7M (peak: 3.0M)
        CPU: 44ms
     CGroup: /system.slice/rpcbind.service
             └─698 /usr/bin/rpcbind -w -f

Oct 11 02:42:18 localhost systemd[1]: Starting RPC Bind...
Oct 11 02:42:18 localhost systemd[1]: Started RPC Bind.

● rsyslog.service - System Logging Service
     Loaded: loaded (/usr/lib/systemd/system/rsyslog.service; enabled; preset: enabled)
     Active: active (running) since Sat 2025-10-11 02:42:22 UTC; 1h 28min ago
       Docs: man:rsyslogd(8)
             https://www.rsyslog.com/doc/
   Main PID: 1004 (rsyslogd)
         IO: 4.0K read, 22.7M written
      Tasks: 3 (limit: 48573)
     Memory: 20.1M (peak: 20.7M)
        CPU: 14.623s
     CGroup: /system.slice/rsyslog.service
             └─1004 /usr/sbin/rsyslogd -n

Oct 11 03:39:18 compute-0 rsyslogd[1004]: imjournal: journal files changed, reloading...  [v8.2506.0-2.el9 try https://www.rsyslog.com/e/0 ]
Oct 11 03:39:18 compute-0 rsyslogd[1004]: imjournal: journal files changed, reloading...  [v8.2506.0-2.el9 try https://www.rsyslog.com/e/0 ]
Oct 11 03:46:35 compute-0 rsyslogd[1004]: imjournal: journal files changed, reloading...  [v8.2506.0-2.el9 try https://www.rsyslog.com/e/0 ]
Oct 11 03:46:35 compute-0 rsyslogd[1004]: imjournal: journal files changed, reloading...  [v8.2506.0-2.el9 try https://www.rsyslog.com/e/0 ]
Oct 11 03:54:10 compute-0 rsyslogd[1004]: imjournal: journal files changed, reloading...  [v8.2506.0-2.el9 try https://www.rsyslog.com/e/0 ]
Oct 11 04:03:26 compute-0 rsyslogd[1004]: imjournal: journal files changed, reloading...  [v8.2506.0-2.el9 try https://www.rsyslog.com/e/0 ]
Oct 11Unit sntp.service could not be found.
Unit sshd-keygen.service could not be found.
 04:03:26 compute-0 rsyslogd[1004]: imjournal: journal files changed, reloading...  [v8.2506.0-2.el9 try https://www.rsyslog.com/e/0 ]
Oct 11 04:10:05 compute-0 rsyslogd[1004]: imjournal: journal files changed, reloading...  [v8.2506.0-2.el9 try https://www.rsyslog.com/e/0 ]
Oct 11 04:10:10 compute-0 rsyslogd[1004]: imjournal from <np0005480824:ceph-osd>: begin to drop messages due to rate-limiting
Oct 11 04:10:10 compute-0 rsyslogd[1004]: imjournal: journal files changed, reloading...  [v8.2506.0-2.el9 try https://www.rsyslog.com/e/0 ]

○ selinux-autorelabel-mark.service - Mark the need to relabel after reboot
     Loaded: loaded (/usr/lib/systemd/system/selinux-autorelabel-mark.service; enabled; preset: enabled)
     Active: inactive (dead)
  Condition: start condition failed at Sat 2025-10-11 02:42:18 UTC; 1h 28min ago

Oct 11 02:42:18 localhost systemd[1]: Mark the need to relabel after reboot was skipped because of an unmet condition check (ConditionSecurity=!selinux).

● serial-getty@ttyS0.service - Serial Getty on ttyS0
     Loaded: loaded (/usr/lib/systemd/system/serial-getty@.service; enabled-runtime; preset: disabled)
     Active: active (running) since Sat 2025-10-11 02:42:21 UTC; 1h 28min ago
       Docs: man:agetty(8)
             man:systemd-getty-generator(8)
             http://0pointer.de/blog/projects/serial-console.html
   Main PID: 1009 (agetty)
         IO: 0B read, 0B written
      Tasks: 1 (limit: 48573)
     Memory: 300.0K (peak: 544.0K)
        CPU: 12ms
     CGroup: /system.slice/system-serial\x2dgetty.slice/serial-getty@ttyS0.service
             └─1009 /sbin/agetty -o "-p -- \\u" --keep-baud 115200,57600,38400,9600 - vt220

Oct 11 02:42:21 np0005480824.novalocal systemd[1]: Started Serial Getty on ttyS0.

○ sshd-keygen@ecdsa.service - OpenSSH ecdsa Server Key Generation
     Loaded: loaded (/usr/lib/systemd/system/sshd-keygen@.service; disabled; preset: disabled)
    Drop-In: /usr/lib/systemd/system/sshd-keygen@.service.d
             └─disable-sshd-keygen-if-cloud-init-active.conf
     Active: inactive (dead)
  Condition: start condition failed at Sat 2025-10-11 03:33:32 UTC; 37min ago

Oct 11 02:42:18 localhost systemd[1]: OpenSSH ecdsa Server Key Generation was skipped because of an unmet condition check (ConditionPathExists=!/run/systemd/generator.early/multi-user.target.wants/cloud-init.target).
Oct 11 03:33:32 compute-0 systemd[1]: OpenSSH ecdsa Server Key Generation was skipped because of an unmet condition check (ConditionPathExists=!/run/systemd/generator.early/multi-user.target.wants/cloud-init.target).

○ sshd-keygen@ed25519.service - OpenSSH ed25519 Server Key Generation
     Loaded: loaded (/usr/lib/systemd/system/sshd-keygen@.service; disabled; preset: disabled)
    Drop-In: /usr/lib/systemd/system/sshd-keygen@.service.d
             └─disable-sshd-keygen-if-cloud-init-active.conf
     Active: inactive (dead)
  Condition: start condition failed at Sat 2025-10-11 03:33:32 UTC; 37min ago

Oct 11 02:42:18 localhost systemd[1]: OpenSSH ed25519 Server Key Generation was skipped because of an unmet condition check (ConditionPathExists=!/run/systemd/generator.early/multi-user.target.wants/cloud-init.target).
Oct 11 03:33:32 compute-0 systemd[1]: OpenSSH ed25519 Server Key Generation was skipped because of an unmet condition check (ConditionPathExists=!/run/systemd/generator.early/multi-user.target.wants/cloud-init.target).

○ sshd-keygen@rsa.service - OpenSSH rsa Server Key Generation
     Loaded: loaded (/usr/lib/systemd/system/sshd-keygen@.service; disabled; preset: disabled)
    Drop-In: /usr/lib/systemd/system/sshd-keygen@.service.d
             └─disable-sshd-keygen-if-cloud-init-active.conf
     Active: inactive (dead)
  Condition: start condition failed at Sat 2025-10-11 03:33:32 UTC; 37min ago

Oct 11 02:42:18 localhost systemd[1]: OpenSSH rsa Server Key Generation was skipped because of an unmet condition check (ConditionPathExists=!/run/systemd/generator.early/multi-user.target.wants/cloud-init.target).
Oct 11 03:33:32 compute-0 systemd[1]: OpenSSH rsUnit syslog.service could not be found.
a Server Key Generation was skipped because of an unmet condition check (ConditionPathExists=!/run/systemd/generator.early/multi-user.target.wants/cloud-init.target).

● sshd.service - OpenSSH server daemon
     Loaded: loaded (/usr/lib/systemd/system/sshd.service; enabled; preset: enabled)
     Active: active (running) since Sat 2025-10-11 03:33:32 UTC; 37min ago
       Docs: man:sshd(8)
             man:sshd_config(5)
   Main PID: 189518 (sshd)
         IO: 124.0K read, 60.0K written
      Tasks: 1 (limit: 48573)
     Memory: 3.5M (peak: 9.7M)
        CPU: 724ms
     CGroup: /system.slice/sshd.service
             └─189518 "sshd: /usr/sbin/sshd -D [listener] 0 of 10-100 startups"

Oct 11 03:57:58 compute-0 sshd-session[292811]: pam_unix(sshd:auth): authentication failure; logname= uid=0 euid=0 tty=ssh ruser= rhost=193.46.255.103  user=root
Oct 11 03:58:00 compute-0 sshd-session[292811]: Failed password for root from 193.46.255.103 port 14426 ssh2
Oct 11 03:58:02 compute-0 sshd-session[292811]: Failed password for root from 193.46.255.103 port 14426 ssh2
Oct 11 03:58:04 compute-0 sshd-session[292811]: Failed password for root from 193.46.255.103 port 14426 ssh2
Oct 11 03:58:05 compute-0 sshd-session[292811]: Received disconnect from 193.46.255.103 port 14426:11:  [preauth]
Oct 11 03:58:05 compute-0 sshd-session[292811]: Disconnected from authenticating user root 193.46.255.103 port 14426 [preauth]
Oct 11 03:58:05 compute-0 sshd-session[292811]: PAM 2 more authentication failures; logname= uid=0 euid=0 tty=ssh ruser= rhost=193.46.255.103  user=root
Oct 11 04:07:58 compute-0 sshd[189518]: Timeout before authentication for connection from 58.242.127.33 to 38.102.83.68, pid = 304564
Oct 11 04:09:47 compute-0 sshd-session[308926]: Accepted publickey for zuul from 192.168.122.10 port 33054 ssh2: ECDSA SHA256:Cp9T2GExxJAi5aHCaQiVqmIUIBBGBKezVodhGxcKx+w
Oct 11 04:09:47 compute-0 sshd-session[308926]: pam_unix(sshd:session): session opened for user zuul(uid=1000) by zuul(uid=0)

○ sssd-kcm.service - SSSD Kerberos Cache Manager
     Loaded: loaded (/usr/lib/systemd/system/sssd-kcm.service; indirect; preset: disabled)
     Active: inactive (dead)
TriggeredBy: ● sssd-kcm.socket
       Docs: man:sssd-kcm(5)

○ sssd.service - System Security Services Daemon
     Loaded: loaded (/usr/lib/systemd/system/sssd.service; enabled; preset: enabled)
     Active: inactive (dead)
  Condition: start condition failed at Sat 2025-10-11 02:42:18 UTC; 1h 28min ago

Oct 11 02:42:18 localhost systemd[1]: System Security Services Daemon was skipped because no trigger condition checks were met.

○ sysstat-collect.service - system activity accounting tool
     Loaded: loaded (/usr/lib/systemd/system/sysstat-collect.service; static)
     Active: inactive (dead)
TriggeredBy: ○ sysstat-collect.timer
       Docs: man:sa1(8)

○ sysstat-summary.service - Generate a daily summary of process accounting
     Loaded: loaded (/usr/lib/systemd/system/sysstat-summary.service; static)
     Active: inactive (dead)
TriggeredBy: ○ sysstat-summary.timer
       Docs: man:sa2(8)

○ sysstat.service - Resets System Activity Logs
     Loaded: loaded (/usr/lib/systemd/system/sysstat.service; enabled; preset: enabled)
     Active: inactive (dead)

○ systemd-ask-password-console.service - Dispatch Password Requests to Console
     Loaded: loaded (/usr/lib/systemd/system/systemd-ask-password-console.service; static)
     Active: inactive (dead)
TriggeredBy: ● systemd-ask-password-console.path
       Docs: man:systemd-ask-password-console.service(8)

○ systemd-ask-password-wall.service - Forward Password Requests to Wall
     Loaded: loaded (/usr/lib/systemd/system/systemd-ask-password-wall.service; static)
     Active: inactive (dead)
TriggeredBy: ● systemd-ask-password-wall.path
       Docs: man:systemd-ask-password-wall.service(8)

○ systemd-binfmt.service - Set Up Additional Binary Formats
     Loaded: loaded (/usr/lib/systemd/system/systemd-binfmt.service; static)
     Active: inactive (dead)
  Condition: start condition failed at Sat 2025-10-11 02:42:18 UTC; 1h 28min ago
       Docs: man:systemd-binfmt.service(8)
             man:binfmt.d(5)
             https://docs.kernel.org/admin-guide/binfmt-misc.html
             https://www.freedesktop.org/wiki/Software/systemd/APIFileSystems

Oct 11 02:42:18 localhost systemd[1]: Set Up Additional Binary Formats was skipped because no trigger condition checks were met.

○ systemd-boot-random-seed.service - Update Boot Loader Random Seed
     Loaded: loaded (/usr/lib/systemd/system/systemd-boot-random-seed.service; static)
     Active: inactive (dead)
  Condition: start condition failed at Sat 2025-10-11 02:42:18 UTC; 1h 28min ago
       Docs: man:systemd-boot-random-seed.service(8)
             man:random(4)

Oct 11 02:42:18 localhost systemd[1]: Update Boot Loader Random Seed was skipped because no trigger condition checks were met.

● systemd-boot-update.service - Automatic Boot Loader Update
     Loaded: loaded (/usr/lib/systemd/system/systemd-boot-update.service; enabled; preset: enabled)
     Active: active (exited) since Sat 2025-10-11 02:42:18 UTC; 1h 28min ago
       Docs: man:bootctl(1)
   Main PID: 694 (code=exited, status=0/SUCCESS)
        CPU: 11ms

Oct 11 02:42:18 localhost systemd[1]: Starting Automatic Boot Loader Update...
Oct 11 02:42:18 localhost bootctl[694]: Couldn't find EFI system partition, skipping.
Oct 11 02:42:18 localhost systemd[1]: Finished Automatic Boot Loader Update.

○ systemd-firstboot.service - First Boot Wizard
     Loaded: loaded (/usr/lib/systemd/system/systemd-firstboot.service; static)
     Active: inactive (dead)
  Condition: start condition failed at Sat 2025-10-11 02:42:17 UTC; 1h 28min ago
       Docs: man:systemd-firstboot(1)

Oct 11 02:42:17 localhost systemd[1]: First Boot Wizard was skipped because of an unmet condition check (ConditionFirstBoot=yes).

○ systemd-fsck-root.service - File System Check on Root Device
     Loaded: loaded (/usr/lib/systemd/system/systemd-fsck-root.service; static)
     Active: inactive (dead) since Sat 2025-10-11 02:42:17 UTC; 1h 28min ago
   Duration: 1.893s
       Docs: man:systemd-fsck-root.service(8)
   Main PID: 548 (code=exited, status=0/SUCCESS)
        CPU: 23ms

Oct 11 02:42:15 localhost systemd[1]: Starting File System Check on /dev/disk/by-uuid/9839e2e1-98a2-4594-b609-79d514deb0a3...
Oct 11 02:42:15 localhost systemd-fsck[550]: /usr/sbin/fsck.xfs: XFS file system.
Oct 11 02:42:15 localhost systemd[1]: Finished File System Check on /dev/disk/by-uuid/9839e2e1-98a2-4594-b609-79d514deb0a3.

● systemd-hostnamed.service - Hostname Service
     Loaded: loaded (/usr/lib/systemd/system/systemd-hostnamed.service; static)
    Drop-In: /usr/lib/systemd/system/systemd-hostnamed.service.d
             └─disable-privatedevices.conf
     Active: active (running) since Sat 2025-10-11 04:10:16 UTC; 21s ago
       Docs: man:systemd-hostnamed.service(8)
             man:hostname(5)
             man:machine-info(5)
             man:org.freedesktop.resolve1(5)
   Main PID: 313130 (systemd-hostnam)
         IO: 24.0K read, 0B written
      Tasks: 1 (limit: 48573)
     Memory: 2.7M (peak: 3.8M)
        CPU: 113ms
     CGroup: /system.slice/systemd-hostnamed.service
             └─313130 /usr/lib/systemd/systemd-hostnamed

Oct 11 04:10:16 compute-0 systemd[1]: Starting Hostname Service...
Oct 11 04:10:16 compute-0 systemd[1]: Started Hostname Service.

● systemd-hwdb-update.service - Rebuild Hardware Database
     Loaded: loaded (/usr/lib/systemd/system/systemd-hwdb-update.service; static)
     Active: active (exited) since Sat 2025-10-11 02:42:18 UTC; 1h 28min ago
       Docs: man:hwdb(7)
             man:systemd-hwdb(8)
   Main PID: 685 (code=exited, status=0/SUCCESS)
        CPU: 509ms

Oct 11 02:42:17 localhost systemd[1]: Starting Rebuild Hardware Database...
Oct 11 02:42:18 localhost systemd[1]: Finished Rebuild Hardware Database.

○ systemd-initctl.service - initctl Compatibility Daemon
     Loaded: loaded (/usr/lib/systemd/system/systemd-initctl.service; static)
     Active: inactive (dead)
TriggeredBy: ● systemd-initctl.socket
       Docs: man:systemd-initctl.service(8)

● systemd-journal-catalog-update.service - Rebuild Journal Catalog
     Loaded: loaded (/usr/lib/systemd/system/systemd-journal-catalog-update.service; static)
     Active: active (exited) since Sat 2025-10-11 02:42:18 UTC; 1h 28min ago
       Docs: man:systemd-journald.service(8)
             man:journald.conf(5)
   Main PID: 699 (code=exited, status=0/SUCCESS)
        CPU: 17ms

Oct 11 02:42:18 localhost systemd[1]: Starting Rebuild Journal Catalog...
Oct 11 02:42:18 localhost systemd[1]: Finished Rebuild Journal Catalog.

● systemd-journal-flush.service - Flush Journal to Persistent Storage
     Loaded: loaded (/usr/lib/systemd/system/systemd-journal-flush.service; static)
     Active: active (exited) since Sat 2025-10-11 02:42:17 UTC; 1h 28min ago
       Docs: man:systemd-journald.service(8)
             man:journald.conf(5)
   Main PID: 686 (code=exited, status=0/SUCCESS)
        CPU: 9ms

Oct 11 02:42:17 localhost systemd[1]: Starting Flush Journal to Persistent Storage...
Oct 11 02:42:17 localhost systemd[1]: Finished Flush Journal to Persistent Storage.

● systemd-journald.service - Journal Service
     Loaded: loaded (/usr/lib/systemd/system/systemd-journald.service; static)
     Active: active (running) since Sat 2025-10-11 02:42:17 UTC; 1h 28min ago
TriggeredBy: ● systemd-journald.socket
             ● systemd-journald-dev-log.socket
       Docs: man:systemd-journald.service(8)
             man:journald.conf(5)
   Main PID: 674 (systemd-journal)
     Status: "Processing requests..."
         IO: 0B read, 0B written
      Tasks: 1 (limit: 48573)
     Memory: 83.3M (peak: 91.4M)
        CPU: 16.176s
     CGroup: /system.slice/systemd-journald.service
             └─674 /usr/lib/systemd/systemd-journald

Oct 11 02:42:17 localhost systemd-journald[674]: Journal started
Oct 11 02:42:17 localhost systemd-journald[674]: Runtime Journal (/run/log/journal/a1727ec20198bc6caf436a6e13c4ff5e) is 8.0M, max 153.6M, 145.6M free.
Oct 11 02:42:17 localhost systemd[1]: systemd-journald.service: Deactivated successfully.
Oct 11 02:42:17 localhost systemd-journald[674]: Runtime Journal (/run/log/journal/a1727ec20198bc6caf436a6e13c4ff5e) is 8.0M, max 153.6M, 145.6M free.
Oct 11 02:42:17 localhost systemd-journald[674]: Received client request to flush runtime journal.

● systemd-logind.service - User Login Management
     Loaded: loaded (/usr/lib/systemd/system/systemd-logind.service; static)
    Drop-In: /usr/lib/systemd/system/systemd-logind.service.d
             └─10-grub2-logind-service.conf
     Active: active (running) since Sat 2025-10-11 02:42:18 UTC; 1h 28min ago
       Docs: man:sd-login(3)
             man:systemd-logind.service(8)
             man:logind.conf(5)
             man:org.freedesktop.login1(5)
   Main PID: 782 (systemd-logind)
     Status: "Processing requests..."
         IO: 4.0K read, 0B written
      Tasks: 1 (limit: 48573)
     Memory: 7.5M (peak: 8.0M)
        CPU: 4.696s
     CGroup: /system.slice/systemd-logind.service
             └─782 /usr/lib/systemd/systemd-logind

Oct 11 03:35:35 compute-0 systemd-logind[782]: Removed session 49.
Oct 11 03:35:40 compute-0 systemd-logind[782]: New session 50 of user zuul.
Oct 11 03:37:39 compute-0 systemd-logind[782]: Watching system buttons on /dev/input/event0 (Power Button)
Oct 11 03:37:39 compute-0 systemd-logind[782]: Watching system buttons on /dev/input/event1 (AT Translated Set 2 keyboard)
Oct 11 03:38:36 compute-0 systemd-logind[782]: New session 52 of user zuul.
Oct 11 03:38:37 compute-0 systemd-logind[782]: Session 52 logged out. Waiting for processes to exit.
Oct 11 03:38:37 compute-0 systemd-logind[782]: Removed session 52.
Oct 11 03:39:22 compute-0 systemd-logind[782]: Session 50 logged out. Waiting for processes to exit.
Oct 11 03:39:22 compute-0 systemd-logind[782]: Removed session 50.
Oct 11 04:09:47 compute-0 systemd-logind[782]: New session 53 of user zuul.

○ systemd-machine-id-commit.service - Commit a transient machine-id on disk
     Loaded: loaded (/usr/lib/systemdUnit systemd-networkd-wait-online.service could not be found.
/system/systemd-machine-id-commit.service; static)
     Active: inactive (dead)
  Condition: start condition failed at Sat 2025-10-11 02:42:18 UTC; 1h 28min ago
       Docs: man:systemd-machine-id-commit.service(8)

Oct 11 02:42:18 localhost systemd[1]: Commit a transient machine-id on disk was skipped because of an unmet condition check (ConditionPathIsMountPoint=/etc/machine-id).

● systemd-machined.service - Virtual Machine and Container Registration Service
     Loaded: loaded (/usr/lib/systemd/system/systemd-machined.service; static)
     Active: active (running) since Sat 2025-10-11 03:34:58 UTC; 35min ago
       Docs: man:systemd-machined.service(8)
             man:org.freedesktop.machine1(5)
   Main PID: 215071 (systemd-machine)
     Status: "Processing requests..."
         IO: 0B read, 0B written
      Tasks: 1 (limit: 48573)
     Memory: 1.5M (peak: 2.0M)
        CPU: 2.057s
     CGroup: /system.slice/systemd-machined.service
             └─215071 /usr/lib/systemd/systemd-machined

Oct 11 04:02:20 compute-0 systemd-machined[215071]: New machine qemu-26-instance-0000001a.
Oct 11 04:02:44 compute-0 systemd-machined[215071]: Machine qemu-26-instance-0000001a terminated.
Oct 11 04:03:01 compute-0 systemd-machined[215071]: New machine qemu-27-instance-0000001b.
Oct 11 04:03:42 compute-0 systemd-machined[215071]: Machine qemu-27-instance-0000001b terminated.
Oct 11 04:05:37 compute-0 systemd-machined[215071]: New machine qemu-28-instance-0000001c.
Oct 11 04:06:05 compute-0 systemd-machined[215071]: Machine qemu-28-instance-0000001c terminated.
Oct 11 04:06:56 compute-0 systemd-machined[215071]: New machine qemu-29-instance-0000001d.
Oct 11 04:07:22 compute-0 systemd-machined[215071]: Machine qemu-29-instance-0000001d terminated.
Oct 11 04:07:41 compute-0 systemd-machined[215071]: New machine qemu-30-instance-0000001e.
Oct 11 04:08:21 compute-0 systemd-machined[215071]: Machine qemu-30-instance-0000001e terminated.

● systemd-modules-load.service - Load Kernel Modules
     Loaded: loaded (/usr/lib/systemd/system/systemd-modules-load.service; static)
     Active: active (exited) since Sat 2025-10-11 03:37:30 UTC; 33min ago
       Docs: man:systemd-modules-load.service(8)
             man:modules-load.d(5)
   Main PID: 242199 (code=exited, status=0/SUCCESS)
        CPU: 15ms

Oct 11 03:37:30 compute-0 systemd[1]: Starting Load Kernel Modules...
Oct 11 03:37:30 compute-0 systemd[1]: Finished Load Kernel Modules.

● systemd-network-generator.service - Generate network units from Kernel command line
     Loaded: loaded (/usr/lib/systemd/system/systemd-network-generator.service; enabled; preset: enabled)
     Active: active (exited) since Sat 2025-10-11 02:42:17 UTC; 1h 28min ago
       Docs: man:systemd-network-generator.service(8)
   Main PID: 675 (code=exited, status=0/SUCCESS)
        CPU: 12ms

Oct 11 02:42:17 localhost systemd[1]: Finished Generate network units from Kernel command line.

○ systemd-pcrmachine.service - TPM2 PCR Machine ID Measurement
     Loaded: loaded (/usr/lib/systemd/system/systemd-pcrmachine.service; static)
     Active: inactive (dead)
  Condition: start condition failed at Sat 2025-10-11 02:42:17 UTC; 1h 28min ago
       Docs: man:systemd-pcrmachine.service(8)

○ systemd-pcrphase-initrd.service - TPM2 PCR Barrier (initrd)
     Loaded: loaded (/usr/lib/systemd/system/systemd-pcrphase-initrd.service; static)
     Active: inactive (dead)
       Docs: man:systemd-pcrphase-initrd.service(8)

○ systemd-pcrphase-sysinit.service - TPM2 PCR Barrier (Initialization)
     Loaded: loaded (/usr/lib/systemd/system/systemd-pcrphase-sysinit.service; static)
     Active: inactive (dead)
  Condition: start condition failed at Sat 2025-10-11 02:42:18 UTC; 1h 28min ago
       Docs: man:systemd-pcrphase-sysinit.service(8)

Oct 11 02:42:18 localhost systemd[1]: TPM2 PCR Barrier (Initialization) was skipped because of an unmet condition check (ConditionPathExists=/sys/firmware/efi/efivars/StubPcrKernelImage-4a67b082-0a4c-41cf-b6c7-440b29bb8c4f).

○ systemd-pcrphase.service - TPM2 PCR Barrier (UserUnit systemd-timesyncd.service could not be found.
)
     Loaded: loaded (/usr/lib/systemd/system/systemd-pcrphase.service; static)
     Active: inactive (dead)
  Condition: start condition failed at Sat 2025-10-11 02:42:20 UTC; 1h 28min ago
       Docs: man:systemd-pcrphase.service(8)

Oct 11 02:42:20 np0005480824.novalocal systemd[1]: TPM2 PCR Barrier (User) was skipped because of an unmet condition check (ConditionPathExists=/sys/firmware/efi/efivars/StubPcrKernelImage-4a67b082-0a4c-41cf-b6c7-440b29bb8c4f).

○ systemd-pstore.service - Platform Persistent Storage Archival
     Loaded: loaded (/usr/lib/systemd/system/systemd-pstore.service; enabled; preset: enabled)
     Active: inactive (dead)
  Condition: start condition failed at Sat 2025-10-11 02:42:17 UTC; 1h 28min ago
       Docs: man:systemd-pstore(8)

Oct 11 02:42:17 localhost systemd[1]: Platform Persistent Storage Archival was skipped because of an unmet condition check (ConditionDirectoryNotEmpty=/sys/fs/pstore).

● systemd-random-seed.service - Load/Save OS Random Seed
     Loaded: loaded (/usr/lib/systemd/system/systemd-random-seed.service; static)
     Active: active (exited) since Sat 2025-10-11 02:42:17 UTC; 1h 28min ago
       Docs: man:systemd-random-seed.service(8)
             man:random(4)
   Main PID: 687 (code=exited, status=0/SUCCESS)
        CPU: 12ms

Oct 11 02:42:17 localhost systemd[1]: Starting Load/Save OS Random Seed...
Oct 11 02:42:17 localhost systemd[1]: Finished Load/Save OS Random Seed.

● systemd-remount-fs.service - Remount Root and Kernel File Systems
     Loaded: loaded (/usr/lib/systemd/system/systemd-remount-fs.service; enabled-runtime; preset: disabled)
     Active: active (exited) since Sat 2025-10-11 02:42:17 UTC; 1h 28min ago
       Docs: man:systemd-remount-fs.service(8)
             https://www.freedesktop.org/wiki/Software/systemd/APIFileSystems
   Main PID: 676 (code=exited, status=0/SUCCESS)
        CPU: 19ms

Oct 11 02:42:17 localhost systemd[1]: Finished Remount Root and Kernel File Systems.

○ systemd-repart.service - Repartition Root Disk
     Loaded: loaded (/usr/lib/systemd/system/systemd-repart.service; static)
     Active: inactive (dead)
  Condition: start condition failed at Sat 2025-10-11 02:42:17 UTC; 1h 28min ago
       Docs: man:systemd-repart.service(8)

○ systemd-rfkill.service - Load/Save RF Kill Switch Status
     Loaded: loaded (/usr/lib/systemd/system/systemd-rfkill.service; static)
     Active: inactive (dead)
TriggeredBy: ● systemd-rfkill.socket
       Docs: man:systemd-rfkill.service(8)

● systemd-sysctl.service - Apply Kernel Variables
     Loaded: loaded (/usr/lib/systemd/system/systemd-sysctl.service; static)
     Active: active (exited) since Sat 2025-10-11 03:10:55 UTC; 59min ago
       Docs: man:systemd-sysctl.service(8)
             man:sysctl.d(5)
   Main PID: 40931 (code=exited, status=0/SUCCESS)
        CPU: 24ms

Oct 11 03:10:55 compute-0 systemd[1]: Starting Apply Kernel Variables...
Oct 11 03:10:55 compute-0 systemd[1]: Finished Apply Kernel Variables.

○ systemd-sysext.service - Merge System Extension Images into /usr/ and /opt/
     Loaded: loaded (/usr/lib/systemd/system/systemd-sysext.service; disabled; preset: disabled)
     Active: inactive (dead)
       Docs: man:systemd-sysext.service(8)

● systemd-sysusers.service - Create System Users
     Loaded: loaded (/usr/lib/systemd/system/systemd-sysusers.service; static)
     Active: active (exited) since Sat 2025-10-11 02:42:17 UTC; 1h 28min ago
       Docs: man:sysusers.d(5)
             man:systemd-sysusers.service(8)
   Main PID: 688 (code=exited, status=0/SUCCESS)
        CPU: 31ms

Oct 11 02:42:17 localhost systemd[1]: Starting Create System Users...
Oct 11 02:42:17 localhost systemd[1]: Finished Create System Users.

○ systemd-tmpfiles-clean.service - Cleanup of Temporary Directories
     Loaded: loaded (/usr/lib/systemd/system/systemd-tmpfiles-clean.service; static)
     Active: inactive (dead) since Sat 2025-10-11 02:57:22 UTC; 1h 13min ago
TriggeredBy: ● systemd-tmpfiles-clean.timer
       Docs: man:tmpfiles.d(5)
             man:systemd-tmpfiles(8)
  Unit systemd-tmpfiles.service could not be found.
 Main PID: 27458 (code=exited, status=0/SUCCESS)
        CPU: 78ms

Oct 11 02:57:22 compute-0 systemd[1]: Starting Cleanup of Temporary Directories...
Oct 11 02:57:22 compute-0 systemd[1]: systemd-tmpfiles-clean.service: Deactivated successfully.
Oct 11 02:57:22 compute-0 systemd[1]: Finished Cleanup of Temporary Directories.

● systemd-tmpfiles-setup-dev.service - Create Static Device Nodes in /dev
     Loaded: loaded (/usr/lib/systemd/system/systemd-tmpfiles-setup-dev.service; static)
     Active: active (exited) since Sat 2025-10-11 02:42:18 UTC; 1h 28min ago
       Docs: man:tmpfiles.d(5)
             man:systemd-tmpfiles(8)
   Main PID: 691 (code=exited, status=0/SUCCESS)
        CPU: 50ms

Oct 11 02:42:17 localhost systemd[1]: Starting Create Static Device Nodes in /dev...
Oct 11 02:42:18 localhost systemd[1]: Finished Create Static Device Nodes in /dev.

● systemd-tmpfiles-setup.service - Create Volatile Files and Directories
     Loaded: loaded (/usr/lib/systemd/system/systemd-tmpfiles-setup.service; static)
     Active: active (exited) since Sat 2025-10-11 02:42:18 UTC; 1h 28min ago
       Docs: man:tmpfiles.d(5)
             man:systemd-tmpfiles(8)
   Main PID: 695 (code=exited, status=0/SUCCESS)
        CPU: 105ms

Oct 11 02:42:18 localhost systemd[1]: Starting Create Volatile Files and Directories...
Oct 11 02:42:18 localhost systemd[1]: Finished Create Volatile Files and Directories.

○ systemd-udev-settle.service - Wait for udev To Complete Device Initialization
     Loaded: loaded (/usr/lib/systemd/system/systemd-udev-settle.service; static)
     Active: inactive (dead)
       Docs: man:systemd-udev-settle.service(8)

● systemd-udev-trigger.service - Coldplug All udev Devices
     Loaded: loaded (/usr/lib/systemd/system/systemd-udev-trigger.service; static)
    Drop-In: /usr/lib/systemd/system/systemd-udev-trigger.service.d
             └─systemd-udev-trigger-no-reload.conf
     Active: active (exited) since Sat 2025-10-11 02:42:17 UTC; 1h 28min ago
       Docs: man:udev(7)
             man:systemd-udevd.service(8)
   Main PID: 680 (code=exited, status=0/SUCCESS)
        CPU: 89ms

Oct 11 02:42:17 localhost systemd[1]: Finished Coldplug All udev Devices.

● systemd-udevd.service - Rule-based Manager for Device Events and Files
     Loaded: loaded (/usr/lib/systemd/system/systemd-udevd.service; static)
     Active: active (running) since Sat 2025-10-11 02:42:18 UTC; 1h 28min ago
TriggeredBy: ● systemd-udevd-kernel.socket
             ● systemd-udevd-control.socket
       Docs: man:systemd-udevd.service(8)
             man:udev(7)
   Main PID: 729 (systemd-udevd)
     Status: "Processing with 32 children at max"
         IO: 126.5M read, 72.0M written
      Tasks: 1
     Memory: 50.3M (peak: 103.5M)
        CPU: 13.328s
     CGroup: /system.slice/systemd-udevd.service
             └─udev
               └─729 /usr/lib/systemd/systemd-udevd

Oct 11 04:05:37 compute-0 systemd-udevd[303751]: Network interface NamePolicy= disabled on kernel command line.
Oct 11 04:06:56 compute-0 systemd-udevd[305961]: Network interface NamePolicy= disabled on kernel command line.
Oct 11 04:07:41 compute-0 systemd-udevd[306461]: Network interface NamePolicy= disabled on kernel command line.
Oct 11 04:07:41 compute-0 systemd-udevd[306466]: Network interface NamePolicy= disabled on kernel command line.
Oct 11 04:09:59 compute-0 lvm[309674]: PV /dev/loop3 online, VG ceph_vg0 is complete.
Oct 11 04:09:59 compute-0 lvm[309674]: VG ceph_vg0 finished
Oct 11 04:09:59 compute-0 lvm[309701]: PV /dev/loop4 online, VG ceph_vg1 is complete.
Oct 11 04:09:59 compute-0 lvm[309701]: VG ceph_vg1 finished
Oct 11 04:09:59 compute-0 lvm[309723]: PV /dev/loop5 online, VG ceph_vg2 is complete.
Oct 11 04:09:59 compute-0 lvm[309723]: VG ceph_vg2 finished

● systemd-update-done.service - Update is Completed
     Loaded: loaded (/usr/lib/systemd/system/systemd-update-done.service; static)
     Active: active (exited) since Sat 2025-10-11 02:42:18 UTC; 1h 28min ago
       Docs: man:systemd-update-done.service(8)
   Main PID: 730 Unit tlp.service could not be found.
(code=exited, status=0/SUCCESS)
        CPU: 26ms

Oct 11 02:42:18 localhost systemd[1]: Starting Update is Completed...
Oct 11 02:42:18 localhost systemd[1]: Finished Update is Completed.

○ systemd-update-utmp-runlevel.service - Record Runlevel Change in UTMP
     Loaded: loaded (/usr/lib/systemd/system/systemd-update-utmp-runlevel.service; static)
     Active: inactive (dead) since Sat 2025-10-11 02:42:22 UTC; 1h 28min ago
       Docs: man:systemd-update-utmp-runlevel.service(8)
             man:utmp(5)
   Main PID: 1013 (code=exited, status=0/SUCCESS)
        CPU: 17ms

Oct 11 02:42:22 np0005480824.novalocal systemd[1]: Starting Record Runlevel Change in UTMP...
Oct 11 02:42:22 np0005480824.novalocal systemd[1]: systemd-update-utmp-runlevel.service: Deactivated successfully.
Oct 11 02:42:22 np0005480824.novalocal systemd[1]: Finished Record Runlevel Change in UTMP.

● systemd-update-utmp.service - Record System Boot/Shutdown in UTMP
     Loaded: loaded (/usr/lib/systemd/system/systemd-update-utmp.service; static)
     Active: active (exited) since Sat 2025-10-11 02:42:18 UTC; 1h 28min ago
       Docs: man:systemd-update-utmp.service(8)
             man:utmp(5)
   Main PID: 728 (code=exited, status=0/SUCCESS)
        CPU: 9ms

Oct 11 02:42:18 localhost systemd[1]: Starting Record System Boot/Shutdown in UTMP...
Oct 11 02:42:18 localhost systemd[1]: Finished Record System Boot/Shutdown in UTMP.

● systemd-user-sessions.service - Permit User Sessions
     Loaded: loaded (/usr/lib/systemd/system/systemd-user-sessions.service; static)
     Active: active (exited) since Sat 2025-10-11 02:42:21 UTC; 1h 28min ago
       Docs: man:systemd-user-sessions.service(8)
   Main PID: 1006 (code=exited, status=0/SUCCESS)
        CPU: 11ms

Oct 11 02:42:21 np0005480824.novalocal systemd[1]: Starting Permit User Sessions...
Oct 11 02:42:21 np0005480824.novalocal systemd[1]: Finished Permit User Sessions.

○ systemd-vconsole-setup.service - Setup Virtual Console
     Loaded: loaded (/usr/lib/systemd/system/systemd-vconsole-setup.service; static)
     Active: inactive (dead) since Sat 2025-10-11 02:42:16 UTC; 1h 28min ago
   Duration: 2.158s
       Docs: man:systemd-vconsole-setup.service(8)
             man:vconsole.conf(5)
   Main PID: 312 (code=exited, status=0/SUCCESS)
        CPU: 277ms

Oct 11 02:42:14 localhost systemd[1]: Finished Setup Virtual Console.
Oct 11 02:42:16 localhost systemd[1]: systemd-vconsole-setup.service: Deactivated successfully.
Oct 11 02:42:16 localhost systemd[1]: Stopped Setup Virtual Console.
Notice: journal has been rotated since unit was started, output may be incomplete.

● tuned.service - Dynamic System Tuning Daemon
     Loaded: loaded (/usr/lib/systemd/system/tuned.service; enabled; preset: enabled)
     Active: active (running) since Sat 2025-10-11 03:24:51 UTC; 45min ago
       Docs: man:tuned(8)
             man:tuned.conf(5)
             man:tuned-adm(8)
   Main PID: 112210 (tuned)
         IO: 0B read, 0B written
      Tasks: 4 (limit: 48573)
     Memory: 14.5M (peak: 16.8M)
        CPU: 1.175s
     CGroup: /system.slice/tuned.service
             └─112210 /usr/bin/python3 -Es /usr/sbin/tuned -l -P

Oct 11 03:24:51 compute-0 systemd[1]: Starting Dynamic System Tuning Daemon...
Oct 11 03:24:51 compute-0 systemd[1]: Started Dynamic System Tuning Daemon.

○ unbound-anchor.service - update of the root trust anchor for DNSSEC validation in unbound
     Loaded: loaded (/usr/lib/systemd/system/unbound-anchor.service; static)
     Active: inactive (dead)
TriggeredBy: ● unbound-anchor.timer
       Docs: man:unbound-anchor(8)

● user-runtime-dir@1000.service - User Runtime Directory /run/user/1000
     Loaded: loaded (/usr/lib/systemd/system/user-runtime-dir@.service; static)
     Active: active (exited) since Sat 2025-10-11 02:42:35 UTC; 1h 28min ago
       Docs: man:user@.service(5)
   Main PID: 1056 (code=exited, status=0/SUCCESS)
        CPU: 26ms

Oct 11 02:42:35 np0005480824.novalocal systemd[1]: Starting User Runtime Directory /run/user/1000...
Oct 11 02:42:35 np0005480824.novalocal systemd[1]: Finished User Runtime Directory /run/user/1000.

● user-runtime-dir@42477.service - User Runtime Directory /run/user/42477
     Loaded: loaded (/usr/lib/systemd/system/user-runtime-dir@.service; static)
     Active: active (exited) since Sat 2025-10-11 03:19:33 UTC; 51min ago
       Docs: man:user@.service(5)
   Main PID: 75889 (code=exited, status=0/SUCCESS)
        CPU: 29ms

Oct 11 03:19:33 compute-0 systemd[1]: Starting User Runtime Directory /run/user/42477...
Oct 11 03:19:33 compute-0 systemd[1]: Finished User Runtime Directory /run/user/42477.

● user@1000.service - User Manager for UID 1000
     Loaded: loaded (/usr/lib/systemd/system/user@.service; static)
    Drop-In: /usr/lib/systemd/system/user@.service.d
             └─10-login-barrier.conf
     Active: active (running) since Sat 2025-10-11 02:42:36 UTC; 1h 28min ago
       Docs: man:user@.service(5)
   Main PID: 1057 (systemd)
     Status: "Ready."
         IO: 648.0K read, 8.0K written
      Tasks: 5
     Memory: 7.4M (peak: 9.8M)
        CPU: 5.102s
     CGroup: /user.slice/user-1000.slice/user@1000.service
             ├─app.slice
             │ └─dbus-broker.service
             │   ├─9030 /usr/bin/dbus-broker-launch --scope user
             │   └─9042 dbus-broker --log 4 --controller 9 --machine-id a1727ec20198bc6caf436a6e13c4ff5e --max-bytes 100000000000000 --max-fds 25000000000000 --max-matches 5000000000
             ├─init.scope
             │ ├─1057 /usr/lib/systemd/systemd --user
             │ └─1059 "(sd-pam)"
             └─user.slice
               └─podman-pause-31202ca3.scope
                 └─8928 catatonit -P

Oct 11 02:52:20 np0005480824.novalocal dbus-broker-launch[9030]: Policy to allow eavesdropping in /usr/share/dbus-1/session.conf +31: Eavesdropping is deprecated and ignored
Oct 11 02:52:20 np0005480824.novalocal dbus-broker-launch[9030]: Policy to allow eavesdropping in /usr/share/dbus-1/session.conf +33: Eavesdropping is deprecated and ignored
Oct 11 02:52:20 np0005480824.novalocal systemd[1057]: Started D-Bus User Message Bus.
Oct 11 02:52:20 np0005480824.novalocal dbus-broker-lau[9030]: Ready
Oct 11 02:52:20 np0005480824.novalocal systemd[1057]: selinux: avc:  op=load_policy lsm=selinux seqno=6 res=1
Oct 11 02:52:20 np0005480824.novalocal systemd[1057]: Created slice Slice /user.
Oct 11 02:52:20 np0005480824.novalocal systemd[1057]: podman-8910.scope: unit configures an IP firewall, but not running as root.
Oct 11 02:52:20 np0005480824.novalocal systemd[1057]: (This warning is only shown for the first unit using IP firewalling.)
Oct 11 02:52:20 np0005480824.novalocal systemd[1057]: Started podman-8910.scope.
Oct 11 02:52:20 np0005480824.novalocal systemd[1057]: Started podman-pause-31202ca3.scope.

● user@42477.service - User Manager for UID 42477
     Loaded: loaded (/usr/lib/systemd/system/user@.service; static)
    Drop-In: /usr/lib/systemd/system/user@.service.d
             └─10-login-barrier.conf
     Active: active (running) since Sat 2025-10-11 03:19:33 UTC; 51min ago
       Docs: man:user@.service(5)
   Main PID: 75890 (systemd)
     Status: "Ready."
         IO: 0B read, 0B written
      Tasks: 2
     Memory: 7.3M (peak: 10.7M)
        CPU: 3.705s
     CGroup: /user.slice/user-42477.slice/user@42477.service
             └─init.scope
               ├─75890 /usr/lib/systemd/systemd --user
               └─75892 "(sd-pam)"

Oct 11 03:19:33 compute-0 systemd[75890]: Finished Create User's Volatile Files and Directories.
Oct 11 03:19:33 compute-0 systemd[75890]: Reached target Basic System.
Oct 11 03:19:33 compute-0 systemd[75890]: Reached target Main User Target.
Oct 11 03:19:33 compute-0 systemd[75890]: Startup finished in 179ms.
Oct 11 03:19:33 compute-0 systemd[1]: Started User Manager for UID 42477.
Oct 11 03:21:34 compute-0 systemd[75890]: Starting Mark boot as successful...
Oct 11 03:21:34 compute-0 systemd[75890]: Finished Mark boot as successful.
Oct 11 03:24:54 compute-0 systemd[75890]: Created slice User Background Tasks Slice.
Oct 11 03:24:54 compute-0 systemd[75890]: Starting Cleanup of User's Temporary Files and Directories...
Oct 11 03:24:54 compute-0 systemd[75890]: Finished Cleanup of User's Temporary Files and Directories.

○ virtinterfaced.service - libvirt interface daemon
     Loaded: loaded (/usr/lib/systemd/system/virtinterfaced.service; disabled; preset: disabled)
     Active: inactive (dead)
TriggeredBy: ○ virtinterfaced.socket
             ○ virtinterfaced-ro.socket
             ○ virtinterfaced-admin.socket
       Docs: man:virtinterfaced(8)
             https://libvirt.org/

○ virtlockd.service - libvirt locking daemon
     Loaded: loaded (/usr/lib/systemd/system/virtlockd.service; disabled; preset: disabled)
     Active: inactive (dead)
TriggeredBy: ● virtlockd.socket
             ○ virtlockd-admin.socket
       Docs: man:virtlockd(8)
             https://libvirt.org/

● virtlogd.service - libvirt logging daemon
     Loaded: loaded (/usr/lib/systemd/system/virtlogd.service; enabled; preset: disabled)
     Active: active (running) since Sat 2025-10-11 03:34:54 UTC; 35min ago
TriggeredBy: ● virtlogd-admin.socket
             ● virtlogd.socket
       Docs: man:virtlogd(8)
             https://libvirt.org/
   Main PID: 214444 (virtlogd)
         IO: 1.0M read, 2.5M written
      Tasks: 1 (limit: 48573)
     Memory: 4.0M (peak: 4.5M)
        CPU: 42.518s
     CGroup: /system.slice/virtlogd.service
             └─214444 /usr/sbin/virtlogd

Oct 11 03:34:54 compute-0 systemd[1]: Starting libvirt logging daemon...
Oct 11 03:34:54 compute-0 systemd[1]: Started libvirt logging daemon.

○ virtnetworkd.service - libvirt network daemon
     Loaded: loaded (/usr/lib/systemd/system/virtnetworkd.service; disabled; preset: disabled)
     Active: inactive (dead)
TriggeredBy: ○ virtnetworkd.socket
             ○ virtnetworkd-admin.socket
             ○ virtnetworkd-ro.socket
       Docs: man:virtnetworkd(8)
             https://libvirt.org/

● virtnodedevd.service - libvirt nodedev daemon
     Loaded: loaded (/usr/lib/systemd/system/virtnodedevd.service; enabled; preset: disabled)
     Active: active (running) since Sat 2025-10-11 03:39:24 UTC; 31min ago
TriggeredBy: ● virtnodedevd-admin.socket
             ● virtnodedevd.socket
             ● virtnodedevd-ro.socket
       Docs: man:virtnodedevd(8)
             https://libvirt.org/
   Main PID: 260415 (virtnodedevd)
         IO: 3.4M read, 0B written
      Tasks: 19 (limit: 48573)
     Memory: 9.0M (peak: 10.4M)
        CPU: 3.410s
     CGroup: /system.slice/virtnodedevd.service
             └─260415 /usr/sbin/virtnodedevd --timeout 120

Oct 11 03:39:24 compute-0 systemd[1]: Starting libvirt nodedev daemon...
Oct 11 03:39:24 compute-0 systemd[1]: Started libvirt nodedev daemon.

○ virtnwfilterd.service - libvirt nwfilter daemon
     Loaded: loaded (/usr/lib/systemd/system/virtnwfilterd.service; disabled; preset: disabled)
     Active: inactive (dead)
TriggeredBy: ○ virtnwfilterd-ro.socket
             ○ virtnwfilterd-admin.socket
             ○ virtnwfilterd.socket
       Docs: man:virtnwfilterd(8)
             https://libvirt.org/

○ virtproxyd.service - libvirt proxy daemon
     Loaded: loaded (/usr/lib/systemd/system/virtproxyd.service; enabled; preset: disabled)
     Active: inactive (dead) since Sat 2025-10-11 03:36:57 UTC; 33min ago
   Duration: 2min 22ms
TriggeredBy: ● virtproxyd-tls.socket
             ● virtproxyd.socket
             ● virtproxyd-ro.socket
             ● virtproxyd-admin.socket
       Docs: man:virtproxyd(8)
             https://libvirt.org/
   Main PID: 214862 (code=exited, status=0/SUCCESS)
        CPU: 59ms

Oct 11 03:34:57 compute-0 systemd[1]: Starting libvirt proxy daemon...
Oct 11 03:34:57 compute-0 systemd[1]: Started libvirt proxy daemon.
Oct 11 03:36:57 compute-0 systemd[1]: virtproxyd.service: Deactivated successfully.

● virtqemud.service - libvirt QEMU daemon
     Loaded: loaded (/usr/lib/systemd/system/virtqemud.service; enabled; preset: enabled)
     Active: active (running) since Sat 2Unit ypbind.service could not be found.
Unit yppasswdd.service could not be found.
Unit ypserv.service could not be found.
Unit ypxfrd.service could not be found.
025-10-11 03:39:18 UTC; 31min ago
TriggeredBy: ● virtqemud-ro.socket
             ● virtqemud-admin.socket
             ● virtqemud.socket
       Docs: man:virtqemud(8)
             https://libvirt.org/
   Main PID: 259861 (virtqemud)
         IO: 38.8M read, 1.3M written
      Tasks: 19 (limit: 32768)
     Memory: 61.0M (peak: 80.2M)
        CPU: 12.425s
     CGroup: /system.slice/virtqemud.service
             └─259861 /usr/sbin/virtqemud --timeout 120

Oct 11 03:39:18 compute-0 systemd[1]: Starting libvirt QEMU daemon...
Oct 11 03:39:18 compute-0 systemd[1]: Started libvirt QEMU daemon.
Oct 11 03:39:20 compute-0 virtqemud[259861]: libvirt version: 10.10.0, package: 15.el9 (builder@centos.org, 2025-08-18-13:22:20, )
Oct 11 03:39:20 compute-0 virtqemud[259861]: hostname: compute-0
Oct 11 03:39:20 compute-0 virtqemud[259861]: End of file while reading data: Input/output error
Oct 11 04:09:58 compute-0 virtqemud[259861]: Failed to connect socket to '/var/run/libvirt/virtnetworkd-sock-ro': No such file or directory
Oct 11 04:09:58 compute-0 virtqemud[259861]: Failed to connect socket to '/var/run/libvirt/virtnwfilterd-sock-ro': No such file or directory
Oct 11 04:09:58 compute-0 virtqemud[259861]: Failed to connect socket to '/var/run/libvirt/virtstoraged-sock-ro': No such file or directory
Oct 11 04:10:37 compute-0 virtqemud[259861]: Failed to connect socket to '/var/run/libvirt/virtstoraged-sock-ro': No such file or directory

● virtsecretd.service - libvirt secret daemon
     Loaded: loaded (/usr/lib/systemd/system/virtsecretd.service; enabled; preset: disabled)
     Active: active (running) since Sat 2025-10-11 03:46:03 UTC; 24min ago
TriggeredBy: ● virtsecretd-ro.socket
             ● virtsecretd.socket
             ● virtsecretd-admin.socket
       Docs: man:virtsecretd(8)
             https://libvirt.org/
   Main PID: 267673 (virtsecretd)
         IO: 792.0K read, 159.0K written
      Tasks: 16 (limit: 48573)
     Memory: 4.7M (peak: 5.7M)
        CPU: 523ms
     CGroup: /system.slice/virtsecretd.service
             └─267673 /usr/sbin/virtsecretd --timeout 120

Oct 11 03:46:03 compute-0 systemd[1]: Starting libvirt secret daemon...
Oct 11 03:46:03 compute-0 systemd[1]: Started libvirt secret daemon.

○ virtstoraged.service - libvirt storage daemon
     Loaded: loaded (/usr/lib/systemd/system/virtstoraged.service; disabled; preset: disabled)
     Active: inactive (dead)
TriggeredBy: ○ virtstoraged-admin.socket
             ○ virtstoraged.socket
             ○ virtstoraged-ro.socket
       Docs: man:virtstoraged(8)
             https://libvirt.org/

● -.slice - Root Slice
     Loaded: loaded
     Active: active since Sat 2025-10-11 02:42:14 UTC; 1h 28min ago
      Until: Sat 2025-10-11 02:42:14 UTC; 1h 28min ago
       Docs: man:systemd.special(7)
      Tasks: 1408
     Memory: 3.4G
        CPU: 58min 6.510s
     CGroup: /
             ├─init.scope
             │ └─1 /usr/lib/systemd/systemd --switched-root --system --deserialize 31
             ├─machine.slice
             │ ├─libpod-26619da4fa972b2b2b8df272a799dceac616417728cb0ea1160a898d8a7167a1.scope
             │ │ └─container
             │ │   ├─260091 dumb-init --single-child -- kolla_start
             │ │   ├─260093 /usr/bin/python3 /usr/bin/nova-compute
             │ │   ├─267479 /usr/bin/python3 /bin/privsep-helper --config-file /etc/nova/nova.conf --config-file /etc/nova/nova-compute.conf --config-dir /etc/nova/nova.conf.d --privsep_context vif_plug_ovs.privsep.vif_plug --privsep_sock_path /tmp/tmpqv15d5ht/privsep.sock
             │ │   ├─268186 /usr/bin/python3 /bin/privsep-helper --config-file /etc/nova/nova.conf --config-file /etc/nova/nova-compute.conf --config-dir /etc/nova/nova.conf.d --privsep_context os_brick.privileged.default --privsep_sock_path /tmp/tmplcy3_gw7/privsep.sock
             │ │   └─268305 /usr/bin/python3 /bin/privsep-helper --config-file /etc/nova/nova.conf --config-file /etc/nova/nova-compute.conf --config-dir /etc/nova/nova.conf.d --privsep_context nova.privsep.sys_admin_pctxt --privsep_sock_path /tmp/tmpb19gkq7f/privsep.sock
             │ ├─libpod-65c3a3d72e1cba3c83fc771a841564f690b47cc0f5012ce0acf16e2d9f8e3fe2.scope
             │ │ └─container
             │ │   ├─152669 dumb-init --single-child -- kolla_start
             │ │   └─152684 /usr/bin/ovn-controller --pidfile unix:/run/openvswitch/db.sock -p /etc/pki/tls/private/ovndb.key -c /etc/pki/tls/certs/ovndb.crt -C /etc/pki/tls/certs/ovndbca.crt
             │ ├─libpod-8b003d65c8e439e280409825aa37dacfb921ffdd0ada54278b9746654fdc0aa8.scope
             │ │ └─container
             │ │   ├─241114 dumb-init --single-child -- kolla_start
             │ │   └─241117 /usr/sbin/multipathd -d
             │ ├─libpod-dd5285a58cbe29a90687a00af14b934b599bb4de55df5857e4d7b7ac9f22feab.scope
             │ │ └─container
             │ │   ├─162242 dumb-init --single-child -- kolla_start
             │ │   ├─162245 "neutron-ovn-metadata-agent (/usr/bin/python3 /usr/bin/neutron-ovn-metadata-agent)"
             │ │   ├─162583 "neutron-ovn-metadata-agent (/usr/bin/python3 /usr/bin/neutron-ovn-metadata-agent)"
             │ │   ├─162666 /usr/bin/python3 /bin/privsep-helper --config-file /etc/neutron/neutron.conf --config-dir /etc/neutron.conf.d --privsep_context neutron.privileged.namespace_cmd --privsep_sock_path /tmp/tmpf884qsmb/privsep.sock
             │ │   ├─267859 /usr/bin/python3 /bin/privsep-helper --config-file /etc/neutron/neutron.conf --config-dir /etc/neutron.conf.d --privsep_context neutron.privileged.default --privsep_sock_path /tmp/tmph48b70lz/privsep.sock
             │ │   └─268023 /usr/bin/python3 /bin/privsep-helper --config-file /etc/neutron/neutron.conf --config-dir /etc/neutron.conf.d --privsep_context neutron.privileged.link_cmd --privsep_sock_path /tmp/tmp8g8ifqko/privsep.sock
             │ └─libpod-f2b19cad22d0fbdd185b264c3c8b6443d09a02319dc9fb0585dc69a0f24a758d.scope
             │   └─container
             │     ├─230879 dumb-init --single-child -- kolla_start
             │     └─230882 /usr/sbin/iscsid -f
             ├─system.slice
             │ ├─NetworkManager.service
             │ │ └─44969 /usr/sbin/NetworkManager --no-daemon
             │ ├─auditd.service
             │ │ ├─700 /sbin/auditd
             │ │ └─702 /usr/sbin/sedispatch
             │ ├─chronyd.service
             │ │ └─54376 /usr/sbin/chronyd -F 2
             │ ├─crond.service
             │ │ ├─ 1007 /usr/sbin/crond -n
             │ │ └─27476 /usr/sbin/anacron -s
             │ ├─dbus-broker.service
             │ │ ├─738 /usr/bin/dbus-broker-launch --scope system --audit
             │ │ └─770 dbus-broker --log 4 --controller 9 --machine-id a1727ec20198bc6caf436a6e13c4ff5e --max-bytes 536870912 --max-fds 4096 --max-matches 131072 --audit
             │ ├─edpm_iscsid.service
             │ │ └─230877 /usr/bin/conmon --api-version 1 -c f2b19cad22d0fbdd185b264c3c8b6443d09a02319dc9fb0585dc69a0f24a758d -u f2b19cad22d0fbdd185b264c3c8b6443d09a02319dc9fb0585dc69a0f24a758d -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/f2b19cad22d0fbdd185b264c3c8b6443d09a02319dc9fb0585dc69a0f24a758d/userdata -p /run/containers/storage/overlay-containers/f2b19cad22d0fbdd185b264c3c8b6443d09a02319dc9fb0585dc69a0f24a758d/userdata/pidfile -n iscsid --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/f2b19cad22d0fbdd185b264c3c8b6443d09a02319dc9fb0585dc69a0f24a758d --full-attach -s -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/f2b19cad22d0fbdd185b264c3c8b6443d09a02319dc9fb0585dc69a0f24a758d/userdata/oci-log --conmon-pidfile /run/iscsid.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg f2b19cad22d0fbdd185b264c3c8b6443d09a02319dc9fb0585dc69a0f24a758d
             │ ├─edpm_multipathd.service
             │ │ └─241112 /usr/bin/conmon --api-version 1 -c 8b003d65c8e439e280409825aa37dacfb921ffdd0ada54278b9746654fdc0aa8 -u 8b003d65c8e439e280409825aa37dacfb921ffdd0ada54278b9746654fdc0aa8 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/8b003d65c8e439e280409825aa37dacfb921ffdd0ada54278b9746654fdc0aa8/userdata -p /run/containers/storage/overlay-containers/8b003d65c8e439e280409825aa37dacfb921ffdd0ada54278b9746654fdc0aa8/userdata/pidfile -n multipathd --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/8b003d65c8e439e280409825aa37dacfb921ffdd0ada54278b9746654fdc0aa8 --full-attach -s -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/8b003d65c8e439e280409825aa37dacfb921ffdd0ada54278b9746654fdc0aa8/userdata/oci-log --conmon-pidfile /run/multipathd.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg 8b003d65c8e439e280409825aa37dacfb921ffdd0ada54278b9746654fdc0aa8
             │ ├─edpm_nova_compute.service
             │ │ └─260089 /usr/bin/conmon --api-version 1 -c 26619da4fa972b2b2b8df272a799dceac616417728cb0ea1160a898d8a7167a1 -u 26619da4fa972b2b2b8df272a799dceac616417728cb0ea1160a898d8a7167a1 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/26619da4fa972b2b2b8df272a799dceac616417728cb0ea1160a898d8a7167a1/userdata -p /run/containers/storage/overlay-containers/26619da4fa972b2b2b8df272a799dceac616417728cb0ea1160a898d8a7167a1/userdata/pidfile -n nova_compute --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/26619da4fa972b2b2b8df272a799dceac616417728cb0ea1160a898d8a7167a1 --full-attach -s -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/26619da4fa972b2b2b8df272a799dceac616417728cb0ea1160a898d8a7167a1/userdata/oci-log --conmon-pidfile /run/nova_compute.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg 26619da4fa972b2b2b8df272a799dceac616417728cb0ea1160a898d8a7167a1
             │ ├─edpm_ovn_controller.service
             │ │ └─152667 /usr/bin/conmon --api-version 1 -c 65c3a3d72e1cba3c83fc771a841564f690b47cc0f5012ce0acf16e2d9f8e3fe2 -u 65c3a3d72e1cba3c83fc771a841564f690b47cc0f5012ce0acf16e2d9f8e3fe2 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/65c3a3d72e1cba3c83fc771a841564f690b47cc0f5012ce0acf16e2d9f8e3fe2/userdata -p /run/containers/storage/overlay-containers/65c3a3d72e1cba3c83fc771a841564f690b47cc0f5012ce0acf16e2d9f8e3fe2/userdata/pidfile -n ovn_controller --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/65c3a3d72e1cba3c83fc771a841564f690b47cc0f5012ce0acf16e2d9f8e3fe2 --full-attach -s -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/65c3a3d72e1cba3c83fc771a841564f690b47cc0f5012ce0acf16e2d9f8e3fe2/userdata/oci-log --conmon-pidfile /run/ovn_controller.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg 65c3a3d72e1cba3c83fc771a841564f690b47cc0f5012ce0acf16e2d9f8e3fe2
             │ ├─edpm_ovn_metadata_agent.service
             │ │ └─162240 /usr/bin/conmon --api-version 1 -c dd5285a58cbe29a90687a00af14b934b599bb4de55df5857e4d7b7ac9f22feab -u dd5285a58cbe29a90687a00af14b934b599bb4de55df5857e4d7b7ac9f22feab -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/dd5285a58cbe29a90687a00af14b934b599bb4de55df5857e4d7b7ac9f22feab/userdata -p /run/containers/storage/overlay-containers/dd5285a58cbe29a90687a00af14b934b599bb4de55df5857e4d7b7ac9f22feab/userdata/pidfile -n ovn_metadata_agent --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/dd5285a58cbe29a90687a00af14b934b599bb4de55df5857e4d7b7ac9f22feab --full-attach -s -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/dd5285a58cbe29a90687a00af14b934b599bb4de55df5857e4d7b7ac9f22feab/userdata/oci-log --conmon-pidfile /run/ovn_metadata_agent.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg dd5285a58cbe29a90687a00af14b934b599bb4de55df5857e4d7b7ac9f22feab
             │ ├─gssproxy.service
             │ │ └─874 /usr/sbin/gssproxy -D
             │ ├─irqbalance.service
             │ │ └─777 /usr/sbin/irqbalance
             │ ├─ovs-vswitchd.service
             │ │ └─43265 ovs-vswitchd unix:/var/run/openvswitch/db.sock -vconsole:emer -vsyslog:err -vfile:info --mlockall --user openvswitch:hugetlbfs --no-chdir --log-file=/var/log/openvswitch/ovs-vswitchd.log --pidfile=/var/run/openvswitch/ovs-vswitchd.pid --detach
             │ ├─ovsdb-server.service
             │ │ └─43180 ovsdb-server /etc/openvswitch/conf.db -vconsole:emer -vsyslog:err -vfile:info --remote=punix:/var/run/openvswitch/db.sock --private-key=db:Open_vSwitch,SSL,private_key --certificate=db:Open_vSwitch,SSL,certificate --bootstrap-ca-cert=db:Open_vSwitch,SSL,ca_cert --user openvswitch:hugetlbfs --no-chdir --log-file=/var/log/openvswitch/ovsdb-server.log --pidfile=/var/run/openvswitch/ovsdb-server.pid --detach
             │ ├─polkit.service
             │ │ └─6205 /usr/lib/polkit-1/polkitd --no-debug
             │ ├─rpcbind.service
             │ │ └─698 /usr/bin/rpcbind -w -f
             │ ├─rsyslog.service
             │ │ └─1004 /usr/sbin/rsyslogd -n
             │ ├─sshd.service
             │ │ └─189518 "sshd: /usr/sbin/sshd -D [listener] 0 of 10-100 startups"
             │ ├─system-ceph\x2d92cfe4d4\x2d4917\x2d5be1\x2d9d00\x2d73758793a62b.slice
             │ │ ├─ceph-92cfe4d4-4917-5be1-9d00-73758793a62b@crash.compute-0.service
             │ │ │ ├─libpod-payload-90b5b5a031904a9fddf3ed90b02a240d3a0ebaaf670415137274ba4391b5f476
             │ │ │ │ ├─82392 /run/podman-init -- /usr/bin/ceph-crash -n client.crash.compute-0
             │ │ │ │ └─82394 /usr/bin/python3 -s /usr/bin/ceph-crash -n client.crash.compute-0
             │ │ │ └─runtime
             │ │ │   └─82390 /usr/bin/conmon --api-version 1 -c 90b5b5a031904a9fddf3ed90b02a240d3a0ebaaf670415137274ba4391b5f476 -u 90b5b5a031904a9fddf3ed90b02a240d3a0ebaaf670415137274ba4391b5f476 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/90b5b5a031904a9fddf3ed90b02a240d3a0ebaaf670415137274ba4391b5f476/userdata -p /run/containers/storage/overlay-containers/90b5b5a031904a9fddf3ed90b02a240d3a0ebaaf670415137274ba4391b5f476/userdata/pidfile -n ceph-92cfe4d4-4917-5be1-9d00-73758793a62b-crash-compute-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/90b5b5a031904a9fddf3ed90b02a240d3a0ebaaf670415137274ba4391b5f476 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/90b5b5a031904a9fddf3ed90b02a240d3a0ebaaf670415137274ba4391b5f476/userdata/oci-log --conmon-pidfile /run/ceph-92cfe4d4-4917-5be1-9d00-73758793a62b@crash.compute-0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 90b5b5a031904a9fddf3ed90b02a240d3a0ebaaf670415137274ba4391b5f476
             │ │ ├─ceph-92cfe4d4-4917-5be1-9d00-73758793a62b@mds.cephfs.compute-0.uxaxgb.service
             │ │ │ ├─libpod-payload-53426c3eb317616c2b70d50730a6965f375d00d0ca023da2460bbd9c696b45a6
             │ │ │ │ ├─101046 /run/podman-init -- /usr/bin/ceph-mds -n mds.cephfs.compute-0.uxaxgb -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ │ │ └─101067 /usr/bin/ceph-mds -n mds.cephfs.compute-0.uxaxgb -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ │ └─runtime
             │ │ │   └─101044 /usr/bin/conmon --api-version 1 -c 53426c3eb317616c2b70d50730a6965f375d00d0ca023da2460bbd9c696b45a6 -u 53426c3eb317616c2b70d50730a6965f375d00d0ca023da2460bbd9c696b45a6 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/53426c3eb317616c2b70d50730a6965f375d00d0ca023da2460bbd9c696b45a6/userdata -p /run/containers/storage/overlay-containers/53426c3eb317616c2b70d50730a6965f375d00d0ca023da2460bbd9c696b45a6/userdata/pidfile -n ceph-92cfe4d4-4917-5be1-9d00-73758793a62b-mds-cephfs-compute-0-uxaxgb --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/53426c3eb317616c2b70d50730a6965f375d00d0ca023da2460bbd9c696b45a6 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/53426c3eb317616c2b70d50730a6965f375d00d0ca023da2460bbd9c696b45a6/userdata/oci-log --conmon-pidfile /run/ceph-92cfe4d4-4917-5be1-9d00-73758793a62b@mds.cephfs.compute-0.uxaxgb.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 53426c3eb317616c2b70d50730a6965f375d00d0ca023da2460bbd9c696b45a6
             │ │ ├─ceph-92cfe4d4-4917-5be1-9d00-73758793a62b@mgr.compute-0.pdyrua.service
             │ │ │ ├─libpod-payload-5396d33f03d79fbf0e6626513c29ad41ed11bd6b7439ed7e048b771ff7bb44ba
             │ │ │ │ ├─74615 /run/podman-init -- /usr/bin/ceph-mgr -n mgr.compute-0.pdyrua -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ │ │ └─74617 /usr/bin/ceph-mgr -n mgr.compute-0.pdyrua -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ │ └─runtime
             │ │ │   └─74613 /usr/bin/conmon --api-version 1 -c 5396d33f03d79fbf0e6626513c29ad41ed11bd6b7439ed7e048b771ff7bb44ba -u 5396d33f03d79fbf0e6626513c29ad41ed11bd6b7439ed7e048b771ff7bb44ba -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/5396d33f03d79fbf0e6626513c29ad41ed11bd6b7439ed7e048b771ff7bb44ba/userdata -p /run/containers/storage/overlay-containers/5396d33f03d79fbf0e6626513c29ad41ed11bd6b7439ed7e048b771ff7bb44ba/userdata/pidfile -n ceph-92cfe4d4-4917-5be1-9d00-73758793a62b-mgr-compute-0-pdyrua --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/5396d33f03d79fbf0e6626513c29ad41ed11bd6b7439ed7e048b771ff7bb44ba --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/5396d33f03d79fbf0e6626513c29ad41ed11bd6b7439ed7e048b771ff7bb44ba/userdata/oci-log --conmon-pidfile /run/ceph-92cfe4d4-4917-5be1-9d00-73758793a62b@mgr.compute-0.pdyrua.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 5396d33f03d79fbf0e6626513c29ad41ed11bd6b7439ed7e048b771ff7bb44ba
             │ │ ├─ceph-92cfe4d4-4917-5be1-9d00-73758793a62b@mon.compute-0.service
             │ │ │ ├─libpod-payload-a848fe58749db588a5a4b8471e0c9916b9e4a1ccc899f04343e6491a43c45c05
             │ │ │ │ ├─74324 /run/podman-init -- /usr/bin/ceph-mon -n mon.compute-0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false
             │ │ │ │ └─74326 /usr/bin/ceph-mon -n mon.compute-0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false
             │ │ │ └─runtime
             │ │ │   └─74322 /usr/bin/conmon --api-version 1 -c a848fe58749db588a5a4b8471e0c9916b9e4a1ccc899f04343e6491a43c45c05 -u a848fe58749db588a5a4b8471e0c9916b9e4a1ccc899f04343e6491a43c45c05 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/a848fe58749db588a5a4b8471e0c9916b9e4a1ccc899f04343e6491a43c45c05/userdata -p /run/containers/storage/overlay-containers/a848fe58749db588a5a4b8471e0c9916b9e4a1ccc899f04343e6491a43c45c05/userdata/pidfile -n ceph-92cfe4d4-4917-5be1-9d00-73758793a62b-mon-compute-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/a848fe58749db588a5a4b8471e0c9916b9e4a1ccc899f04343e6491a43c45c05 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/a848fe58749db588a5a4b8471e0c9916b9e4a1ccc899f04343e6491a43c45c05/userdata/oci-log --conmon-pidfile /run/ceph-92cfe4d4-4917-5be1-9d00-73758793a62b@mon.compute-0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg a848fe58749db588a5a4b8471e0c9916b9e4a1ccc899f04343e6491a43c45c05
             │ │ ├─ceph-92cfe4d4-4917-5be1-9d00-73758793a62b@osd.0.service
             │ │ │ ├─libpod-payload-47f64e87e5871d7c071a121b83455d21f2bfdfce6b6c64a3ceca78155daa9205
             │ │ │ │ ├─88323 /run/podman-init -- /usr/bin/ceph-osd -n osd.0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ │ │ └─88325 /usr/bin/ceph-osd -n osd.0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ │ └─runtime
             │ │ │   └─88321 /usr/bin/conmon --api-version 1 -c 47f64e87e5871d7c071a121b83455d21f2bfdfce6b6c64a3ceca78155daa9205 -u 47f64e87e5871d7c071a121b83455d21f2bfdfce6b6c64a3ceca78155daa9205 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/47f64e87e5871d7c071a121b83455d21f2bfdfce6b6c64a3ceca78155daa9205/userdata -p /run/containers/storage/overlay-containers/47f64e87e5871d7c071a121b83455d21f2bfdfce6b6c64a3ceca78155daa9205/userdata/pidfile -n ceph-92cfe4d4-4917-5be1-9d00-73758793a62b-osd-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/47f64e87e5871d7c071a121b83455d21f2bfdfce6b6c64a3ceca78155daa9205 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/47f64e87e5871d7c071a121b83455d21f2bfdfce6b6c64a3ceca78155daa9205/userdata/oci-log --conmon-pidfile /run/ceph-92cfe4d4-4917-5be1-9d00-73758793a62b@osd.0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 47f64e87e5871d7c071a121b83455d21f2bfdfce6b6c64a3ceca78155daa9205
             │ │ ├─ceph-92cfe4d4-4917-5be1-9d00-73758793a62b@osd.1.service
             │ │ │ ├─libpod-payload-159562a3a15003ca855365b6b045a9e9a594d967d5f41c2a02d35e99d6a005e2
             │ │ │ │ ├─89399 /run/podman-init -- /usr/bin/ceph-osd -n osd.1 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ │ │ └─89401 /usr/bin/ceph-osd -n osd.1 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ │ └─runtime
             │ │ │   └─89397 /usr/bin/conmon --api-version 1 -c 159562a3a15003ca855365b6b045a9e9a594d967d5f41c2a02d35e99d6a005e2 -u 159562a3a15003ca855365b6b045a9e9a594d967d5f41c2a02d35e99d6a005e2 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/159562a3a15003ca855365b6b045a9e9a594d967d5f41c2a02d35e99d6a005e2/userdata -p /run/containers/storage/overlay-containers/159562a3a15003ca855365b6b045a9e9a594d967d5f41c2a02d35e99d6a005e2/userdata/pidfile -n ceph-92cfe4d4-4917-5be1-9d00-73758793a62b-osd-1 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/159562a3a15003ca855365b6b045a9e9a594d967d5f41c2a02d35e99d6a005e2 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/159562a3a15003ca855365b6b045a9e9a594d967d5f41c2a02d35e99d6a005e2/userdata/oci-log --conmon-pidfile /run/ceph-92cfe4d4-4917-5be1-9d00-73758793a62b@osd.1.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 159562a3a15003ca855365b6b045a9e9a594d967d5f41c2a02d35e99d6a005e2
             │ │ ├─ceph-92cfe4d4-4917-5be1-9d00-73758793a62b@osd.2.service
             │ │ │ ├─libpod-payload-1ea030e74696bd87ca10210c9afc9c6b7cb96088b5f077a47c4d25c6a9cd0784
             │ │ │ │ ├─90441 /run/podman-init -- /usr/bin/ceph-osd -n osd.2 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ │ │ └─90443 /usr/bin/ceph-osd -n osd.2 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ │ └─runtime
             │ │ │   └─90439 /usr/bin/conmon --api-version 1 -c 1ea030e74696bd87ca10210c9afc9c6b7cb96088b5f077a47c4d25c6a9cd0784 -u 1ea030e74696bd87ca10210c9afc9c6b7cb96088b5f077a47c4d25c6a9cd0784 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/1ea030e74696bd87ca10210c9afc9c6b7cb96088b5f077a47c4d25c6a9cd0784/userdata -p /run/containers/storage/overlay-containers/1ea030e74696bd87ca10210c9afc9c6b7cb96088b5f077a47c4d25c6a9cd0784/userdata/pidfile -n ceph-92cfe4d4-4917-5be1-9d00-73758793a62b-osd-2 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/1ea030e74696bd87ca10210c9afc9c6b7cb96088b5f077a47c4d25c6a9cd0784 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/1ea030e74696bd87ca10210c9afc9c6b7cb96088b5f077a47c4d25c6a9cd0784/userdata/oci-log --conmon-pidfile /run/ceph-92cfe4d4-4917-5be1-9d00-73758793a62b@osd.2.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 1ea030e74696bd87ca10210c9afc9c6b7cb96088b5f077a47c4d25c6a9cd0784
             │ │ └─ceph-92cfe4d4-4917-5be1-9d00-73758793a62b@rgw.rgw.compute-0.bqunnq.service
             │ │   ├─libpod-payload-25ff1f079ca6d5d68281bf95dc2311e742798de4e5895710535ed7a1e9682f60
             │ │   │ ├─100587 /run/podman-init -- /usr/bin/radosgw -n client.rgw.rgw.compute-0.bqunnq -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │   │ └─100589 /usr/bin/radosgw -n client.rgw.rgw.compute-0.bqunnq -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │   └─runtime
             │ │     └─100585 /usr/bin/conmon --api-version 1 -c 25ff1f079ca6d5d68281bf95dc2311e742798de4e5895710535ed7a1e9682f60 -u 25ff1f079ca6d5d68281bf95dc2311e742798de4e5895710535ed7a1e9682f60 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/25ff1f079ca6d5d68281bf95dc2311e742798de4e5895710535ed7a1e9682f60/userdata -p /run/containers/storage/overlay-containers/25ff1f079ca6d5d68281bf95dc2311e742798de4e5895710535ed7a1e9682f60/userdata/pidfile -n ceph-92cfe4d4-4917-5be1-9d00-73758793a62b-rgw-rgw-compute-0-bqunnq --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/25ff1f079ca6d5d68281bf95dc2311e742798de4e5895710535ed7a1e9682f60 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/25ff1f079ca6d5d68281bf95dc2311e742798de4e5895710535ed7a1e9682f60/userdata/oci-log --conmon-pidfile /run/ceph-92cfe4d4-4917-5be1-9d00-73758793a62b@rgw.rgw.compute-0.bqunnq.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 25ff1f079ca6d5d68281bf95dc2311e742798de4e5895710535ed7a1e9682f60
             │ ├─system-getty.slice
             │ │ └─getty@tty1.service
             │ │   └─1008 /sbin/agetty -o "-p -- \\u" --noclear - linux
             │ ├─system-serial\x2dgetty.slice
             │ │ └─serial-getty@ttyS0.service
             │ │   └─1009 /sbin/agetty -o "-p -- \\u" --keep-baud 115200,57600,38400,9600 - vt220
             │ ├─systemd-hostnamed.service
             │ │ └─313130 /usr/lib/systemd/systemd-hostnamed
             │ ├─systemd-journald.service
             │ │ └─674 /usr/lib/systemd/systemd-journald
             │ ├─systemd-logind.service
             │ │ └─782 /usr/lib/systemd/systemd-logind
             │ ├─systemd-machined.service
             │ │ └─215071 /usr/lib/systemd/systemd-machined
             │ ├─systemd-udevd.service
             │ │ └─udev
             │ │   └─729 /usr/lib/systemd/systemd-udevd
             │ ├─tuned.service
             │ │ └─112210 /usr/bin/python3 -Es /usr/sbin/tuned -l -P
             │ ├─virtlogd.service
             │ │ └─214444 /usr/sbin/virtlogd
             │ ├─virtnodedevd.service
             │ │ └─260415 /usr/sbin/virtnodedevd --timeout 120
             │ ├─virtqemud.service
             │ │ └─259861 /usr/sbin/virtqemud --timeout 120
             │ └─virtsecretd.service
             │   └─267673 /usr/sbin/virtsecretd --timeout 120
             └─user.slice
               ├─user-1000.slice
               │ ├─session-1.scope
               │ │ └─1267 /usr/bin/python3
               │ ├─session-53.scope
               │ │ ├─308926 "sshd-session: zuul [priv]"
               │ │ ├─308929 "sshd-session: zuul@notty"
               │ │ ├─308930 sudo bash -c "rm -rf /var/tmp/sos-osp && mkdir /var/tmp/sos-osp && sos report --batch --all-logs --tmp-dir=/var/tmp/sos-osp -p container,openstack_edpm,system,storage,virt"
               │ │ ├─308954 /usr/bin/python3 -s /sbin/sos report --batch --all-logs --tmp-dir=/var/tmp/sos-osp -p container,openstack_edpm,system,storage,virt
               │ │ ├─316407 timeout 300s systemctl status --all
               │ │ └─316408 systemctl status --all
               │ └─user@1000.service
               │   ├─app.slice
               │   │ └─dbus-broker.service
               │   │   ├─9030 /usr/bin/dbus-broker-launch --scope user
               │   │   └─9042 dbus-broker --log 4 --controller 9 --machine-id a1727ec20198bc6caf436a6e13c4ff5e --max-bytes 100000000000000 --max-fds 25000000000000 --max-matches 5000000000
               │   ├─init.scope
               │   │ ├─1057 /usr/lib/systemd/systemd --user
               │   │ └─1059 "(sd-pam)"
               │   └─user.slice
               │     └─podman-pause-31202ca3.scope
               │       └─8928 catatonit -P
               └─user-42477.slice
                 ├─session-21.scope
                 │ ├─75886 "sshd-session: ceph-admin [priv]"
                 │ └─75908 "sshd-session: ceph-admin"
                 ├─session-23.scope
                 │ ├─75893 "sshd-session: ceph-admin [priv]"
                 │ └─75909 "sshd-session: ceph-admin@notty"
                 ├─session-24.scope
                 │ ├─75960 "sshd-session: ceph-admin [priv]"
                 │ └─75963 "sshd-session: ceph-admin@notty"
                 ├─session-25.scope
                 │ ├─76014 "sshd-session: ceph-admin [priv]"
                 │ └─76017 "sshd-session: ceph-admin@notty"
                 ├─session-26.scope
                 │ ├─76068 "sshd-session: ceph-admin [priv]"
                 │ └─76071 "sshd-session: ceph-admin@notty"
                 ├─session-27.scope
                 │ ├─76122 "sshd-session: ceph-admin [priv]"
                 │ └─76125 "sshd-session: ceph-admin@notty"
                 ├─session-28.scope
                 │ ├─76176 "sshd-session: ceph-admin [priv]"
                 │ └─76179 "sshd-session: ceph-admin@notty"
                 ├─session-29.scope
                 │ ├─76230 "sshd-session: ceph-admin [priv]"
                 │ └─76233 "sshd-session: ceph-admin@notty"
                 ├─session-30.scope
                 │ ├─76284 "sshd-session: ceph-admin [priv]"
                 │ └─76287 "sshd-session: ceph-admin@notty"
                 ├─session-31.scope
                 │ ├─76338 "sshd-session: ceph-admin [priv]"
                 │ └─76341 "sshd-session: ceph-admin@notty"
                 ├─session-32.scope
                 │ ├─76365 "sshd-session: ceph-admin [priv]"
                 │ └─76368 "sshd-session: ceph-admin@notty"
                 ├─session-33.scope
                 │ ├─76419 "sshd-session: ceph-admin [priv]"
                 │ └─76422 "sshd-session: ceph-admin@notty"
                 └─user@42477.service
                   └─init.scope
                     ├─75890 /usr/lib/systemd/systemd --user
                     └─75892 "(sd-pam)"

Oct 11 04:10:13 compute-0 systemd[1]: var-lib-containers-storage-overlay-56d3d552db340baac6a394077bbacfc91f51a67986c050196cbb8d418b04db91-merged.mount: Deactivated successfully.
Oct 11 04:10:13 compute-0 systemd[1]: libpod-conmon-c7a69b0f84f7e1cb8da3482d9ea77ad193006b4ccbc5f02682485a30ca6c3590.scope: Deactivated successfully.
Oct 11 04:10:14 compute-0 systemd[1]: Started libpod-conmon-62934323c7dacd31d940284e1901d7f46aeff7a5d1c2a6cf475415517bf1f78a.scope.
Oct 11 04:10:14 compute-0 systemd[1]: Started libcrun container.
Oct 11 04:10:15 compute-0 systemd[1]: libpod-62934323c7dacd31d940284e1901d7f46aeff7a5d1c2a6cf475415517bf1f78a.scope: Deactivated successfully.
Oct 11 04:10:15 compute-0 systemd[1]: libpod-62934323c7dacd31d940284e1901d7f46aeff7a5d1c2a6cf475415517bf1f78a.scope: Consumed 1.046s CPU time.
Oct 11 04:10:15 compute-0 systemd[1]: var-lib-containers-storage-overlay-e43d32d0f8addaec55941d7134f63f063e3d3e2e76f13a81d74e89dd50ddf381-merged.mount: Deactivated successfully.
Oct 11 04:10:15 compute-0 systemd[1]: libpod-conmon-62934323c7dacd31d940284e1901d7f46aeff7a5d1c2a6cf475415517bf1f78a.scope: Deactivated successfully.
Oct 11 04:10:16 compute-0 systemd[1]: Starting Hostname Service...
Oct 11 04:10:16 compute-0 systemd[1]: Started Hostname Service.

● machine.slice - Virtual Machine and Container Slice
     Loaded: loaded (/usr/lib/systemd/system/machine.slice; static)
     Active: active since Sat 2025-10-11 03:18:29 UTC; 52min ago
      Until: Sat 2025-10-11 03:18:29 UTC; 52min ago
       Docs: man:systemd.special(7)
         IO: 406.5M read, 87.6M written
      Tasks: 56
     Memory: 1.0G (peak: 1.6G)
        CPU: 14min 27.933s
     CGroup: /machine.slice
             ├─libpod-26619da4fa972b2b2b8df272a799dceac616417728cb0ea1160a898d8a7167a1.scope
             │ └─container
             │   ├─260091 dumb-init --single-child -- kolla_start
             │   ├─260093 /usr/bin/python3 /usr/bin/nova-compute
             │   ├─267479 /usr/bin/python3 /bin/privsep-helper --config-file /etc/nova/nova.conf --config-file /etc/nova/nova-compute.conf --config-dir /etc/nova/nova.conf.d --privsep_context vif_plug_ovs.privsep.vif_plug --privsep_sock_path /tmp/tmpqv15d5ht/privsep.sock
             │   ├─268186 /usr/bin/python3 /bin/privsep-helper --config-file /etc/nova/nova.conf --config-file /etc/nova/nova-compute.conf --config-dir /etc/nova/nova.conf.d --privsep_context os_brick.privileged.default --privsep_sock_path /tmp/tmplcy3_gw7/privsep.sock
             │   └─268305 /usr/bin/python3 /bin/privsep-helper --config-file /etc/nova/nova.conf --config-file /etc/nova/nova-compute.conf --config-dir /etc/nova/nova.conf.d --privsep_context nova.privsep.sys_admin_pctxt --privsep_sock_path /tmp/tmpb19gkq7f/privsep.sock
             ├─libpod-65c3a3d72e1cba3c83fc771a841564f690b47cc0f5012ce0acf16e2d9f8e3fe2.scope
             │ └─container
             │   ├─152669 dumb-init --single-child -- kolla_start
             │   └─152684 /usr/bin/ovn-controller --pidfile unix:/run/openvswitch/db.sock -p /etc/pki/tls/private/ovndb.key -c /etc/pki/tls/certs/ovndb.crt -C /etc/pki/tls/certs/ovndbca.crt
             ├─libpod-8b003d65c8e439e280409825aa37dacfb921ffdd0ada54278b9746654fdc0aa8.scope
             │ └─container
             │   ├─241114 dumb-init --single-child -- kolla_start
             │   └─241117 /usr/sbin/multipathd -d
             ├─libpod-dd5285a58cbe29a90687a00af14b934b599bb4de55df5857e4d7b7ac9f22feab.scope
             │ └─container
             │   ├─162242 dumb-init --single-child -- kolla_start
             │   ├─162245 "neutron-ovn-metadata-agent (/usr/bin/python3 /usr/bin/neutron-ovn-metadata-agent)"
             │   ├─162583 "neutron-ovn-metadata-agent (/usr/bin/python3 /usr/bin/neutron-ovn-metadata-agent)"
             │   ├─162666 /usr/bin/python3 /bin/privsep-helper --config-file /etc/neutron/neutron.conf --config-dir /etc/neutron.conf.d --privsep_context neutron.privileged.namespace_cmd --privsep_sock_path /tmp/tmpf884qsmb/privsep.sock
             │   ├─267859 /usr/bin/python3 /bin/privsep-helper --config-file /etc/neutron/neutron.conf --config-dir /etc/neutron.conf.d --privsep_context neutron.privileged.default --privsep_sock_path /tmp/tmph48b70lz/privsep.sock
             │   └─268023 /usr/bin/python3 /bin/privsep-helper --config-file /etc/neutron/neutron.conf --config-dir /etc/neutron.conf.d --privsep_context neutron.privileged.link_cmd --privsep_sock_path /tmp/tmp8g8ifqko/privsep.sock
             └─libpod-f2b19cad22d0fbdd185b264c3c8b6443d09a02319dc9fb0585dc69a0f24a758d.scope
               └─container
                 ├─230879 dumb-init --single-child -- kolla_start
                 └─230882 /usr/sbin/iscsid -f

Oct 11 04:10:15 compute-0 loving_cannon[312751]:     },
Oct 11 04:10:15 compute-0 loving_cannon[312751]:     "e86945e8-6909-4584-9098-cee0dfe9add4": {
Oct 11 04:10:15 compute-0 loving_cannon[312751]:         "ceph_fsid": "92cfe4d4-4917-5be1-9d00-73758793a62b",
Oct 11 04:10:15 compute-0 loving_cannon[312751]:         "device": "/dev/mapper/ceph_vg2-ceph_lv2",
Oct 11 04:10:15 compute-0 loving_cannon[312751]:         "osd_id": 2,
Oct 11 04:10:15 compute-0 loving_cannon[312751]:         "osd_uuid": "e86945e8-6909-4584-9098-cee0dfe9add4",
Oct 11 04:10:15 compute-0 loving_cannon[312751]:         "type": "bluestore"
Oct 11 04:10:15 compute-0 loving_cannon[312751]:     }
Oct 11 04:10:15 compute-0 loving_cannon[312751]: }
Oct 11 04:10:15 compute-0 conmon[312751]: conmon 62934323c7dacd31d940 <nwarn>: Failed to open cgroups file: /sys/fs/cgroup/machine.slice/libpod-62934323c7dacd31d940284e1901d7f46aeff7a5d1c2a6cf475415517bf1f78a.scope/container/memory.events

● system-ceph\x2d92cfe4d4\x2d4917\x2d5be1\x2d9d00\x2d73758793a62b.slice - Slice /system/ceph-92cfe4d4-4917-5be1-9d00-73758793a62b
     Loaded: loaded
     Active: active since Sat 2025-10-11 03:18:35 UTC; 52min ago
      Until: Sat 2025-10-11 03:18:35 UTC; 52min ago
         IO: 1.5G read, 25.5G written
      Tasks: 992
     Memory: 3.5G (peak: 4.5G)
        CPU: 5min 14.813s
     CGroup: /system.slice/system-ceph\x2d92cfe4d4\x2d4917\x2d5be1\x2d9d00\x2d73758793a62b.slice
             ├─ceph-92cfe4d4-4917-5be1-9d00-73758793a62b@crash.compute-0.service
             │ ├─libpod-payload-90b5b5a031904a9fddf3ed90b02a240d3a0ebaaf670415137274ba4391b5f476
             │ │ ├─82392 /run/podman-init -- /usr/bin/ceph-crash -n client.crash.compute-0
             │ │ └─82394 /usr/bin/python3 -s /usr/bin/ceph-crash -n client.crash.compute-0
             │ └─runtime
             │   └─82390 /usr/bin/conmon --api-version 1 -c 90b5b5a031904a9fddf3ed90b02a240d3a0ebaaf670415137274ba4391b5f476 -u 90b5b5a031904a9fddf3ed90b02a240d3a0ebaaf670415137274ba4391b5f476 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/90b5b5a031904a9fddf3ed90b02a240d3a0ebaaf670415137274ba4391b5f476/userdata -p /run/containers/storage/overlay-containers/90b5b5a031904a9fddf3ed90b02a240d3a0ebaaf670415137274ba4391b5f476/userdata/pidfile -n ceph-92cfe4d4-4917-5be1-9d00-73758793a62b-crash-compute-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/90b5b5a031904a9fddf3ed90b02a240d3a0ebaaf670415137274ba4391b5f476 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/90b5b5a031904a9fddf3ed90b02a240d3a0ebaaf670415137274ba4391b5f476/userdata/oci-log --conmon-pidfile /run/ceph-92cfe4d4-4917-5be1-9d00-73758793a62b@crash.compute-0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 90b5b5a031904a9fddf3ed90b02a240d3a0ebaaf670415137274ba4391b5f476
             ├─ceph-92cfe4d4-4917-5be1-9d00-73758793a62b@mds.cephfs.compute-0.uxaxgb.service
             │ ├─libpod-payload-53426c3eb317616c2b70d50730a6965f375d00d0ca023da2460bbd9c696b45a6
             │ │ ├─101046 /run/podman-init -- /usr/bin/ceph-mds -n mds.cephfs.compute-0.uxaxgb -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ └─101067 /usr/bin/ceph-mds -n mds.cephfs.compute-0.uxaxgb -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ └─runtime
             │   └─101044 /usr/bin/conmon --api-version 1 -c 53426c3eb317616c2b70d50730a6965f375d00d0ca023da2460bbd9c696b45a6 -u 53426c3eb317616c2b70d50730a6965f375d00d0ca023da2460bbd9c696b45a6 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/53426c3eb317616c2b70d50730a6965f375d00d0ca023da2460bbd9c696b45a6/userdata -p /run/containers/storage/overlay-containers/53426c3eb317616c2b70d50730a6965f375d00d0ca023da2460bbd9c696b45a6/userdata/pidfile -n ceph-92cfe4d4-4917-5be1-9d00-73758793a62b-mds-cephfs-compute-0-uxaxgb --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/53426c3eb317616c2b70d50730a6965f375d00d0ca023da2460bbd9c696b45a6 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/53426c3eb317616c2b70d50730a6965f375d00d0ca023da2460bbd9c696b45a6/userdata/oci-log --conmon-pidfile /run/ceph-92cfe4d4-4917-5be1-9d00-73758793a62b@mds.cephfs.compute-0.uxaxgb.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 53426c3eb317616c2b70d50730a6965f375d00d0ca023da2460bbd9c696b45a6
             ├─ceph-92cfe4d4-4917-5be1-9d00-73758793a62b@mgr.compute-0.pdyrua.service
             │ ├─libpod-payload-5396d33f03d79fbf0e6626513c29ad41ed11bd6b7439ed7e048b771ff7bb44ba
             │ │ ├─74615 /run/podman-init -- /usr/bin/ceph-mgr -n mgr.compute-0.pdyrua -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ └─74617 /usr/bin/ceph-mgr -n mgr.compute-0.pdyrua -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ └─runtime
             │   └─74613 /usr/bin/conmon --api-version 1 -c 5396d33f03d79fbf0e6626513c29ad41ed11bd6b7439ed7e048b771ff7bb44ba -u 5396d33f03d79fbf0e6626513c29ad41ed11bd6b7439ed7e048b771ff7bb44ba -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/5396d33f03d79fbf0e6626513c29ad41ed11bd6b7439ed7e048b771ff7bb44ba/userdata -p /run/containers/storage/overlay-containers/5396d33f03d79fbf0e6626513c29ad41ed11bd6b7439ed7e048b771ff7bb44ba/userdata/pidfile -n ceph-92cfe4d4-4917-5be1-9d00-73758793a62b-mgr-compute-0-pdyrua --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/5396d33f03d79fbf0e6626513c29ad41ed11bd6b7439ed7e048b771ff7bb44ba --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/5396d33f03d79fbf0e6626513c29ad41ed11bd6b7439ed7e048b771ff7bb44ba/userdata/oci-log --conmon-pidfile /run/ceph-92cfe4d4-4917-5be1-9d00-73758793a62b@mgr.compute-0.pdyrua.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 5396d33f03d79fbf0e6626513c29ad41ed11bd6b7439ed7e048b771ff7bb44ba
             ├─ceph-92cfe4d4-4917-5be1-9d00-73758793a62b@mon.compute-0.service
             │ ├─libpod-payload-a848fe58749db588a5a4b8471e0c9916b9e4a1ccc899f04343e6491a43c45c05
             │ │ ├─74324 /run/podman-init -- /usr/bin/ceph-mon -n mon.compute-0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false
             │ │ └─74326 /usr/bin/ceph-mon -n mon.compute-0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false
             │ └─runtime
             │   └─74322 /usr/bin/conmon --api-version 1 -c a848fe58749db588a5a4b8471e0c9916b9e4a1ccc899f04343e6491a43c45c05 -u a848fe58749db588a5a4b8471e0c9916b9e4a1ccc899f04343e6491a43c45c05 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/a848fe58749db588a5a4b8471e0c9916b9e4a1ccc899f04343e6491a43c45c05/userdata -p /run/containers/storage/overlay-containers/a848fe58749db588a5a4b8471e0c9916b9e4a1ccc899f04343e6491a43c45c05/userdata/pidfile -n ceph-92cfe4d4-4917-5be1-9d00-73758793a62b-mon-compute-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/a848fe58749db588a5a4b8471e0c9916b9e4a1ccc899f04343e6491a43c45c05 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/a848fe58749db588a5a4b8471e0c9916b9e4a1ccc899f04343e6491a43c45c05/userdata/oci-log --conmon-pidfile /run/ceph-92cfe4d4-4917-5be1-9d00-73758793a62b@mon.compute-0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg a848fe58749db588a5a4b8471e0c9916b9e4a1ccc899f04343e6491a43c45c05
             ├─ceph-92cfe4d4-4917-5be1-9d00-73758793a62b@osd.0.service
             │ ├─libpod-payload-47f64e87e5871d7c071a121b83455d21f2bfdfce6b6c64a3ceca78155daa9205
             │ │ ├─88323 /run/podman-init -- /usr/bin/ceph-osd -n osd.0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ └─88325 /usr/bin/ceph-osd -n osd.0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ └─runtime
             │   └─88321 /usr/bin/conmon --api-version 1 -c 47f64e87e5871d7c071a121b83455d21f2bfdfce6b6c64a3ceca78155daa9205 -u 47f64e87e5871d7c071a121b83455d21f2bfdfce6b6c64a3ceca78155daa9205 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/47f64e87e5871d7c071a121b83455d21f2bfdfce6b6c64a3ceca78155daa9205/userdata -p /run/containers/storage/overlay-containers/47f64e87e5871d7c071a121b83455d21f2bfdfce6b6c64a3ceca78155daa9205/userdata/pidfile -n ceph-92cfe4d4-4917-5be1-9d00-73758793a62b-osd-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/47f64e87e5871d7c071a121b83455d21f2bfdfce6b6c64a3ceca78155daa9205 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/47f64e87e5871d7c071a121b83455d21f2bfdfce6b6c64a3ceca78155daa9205/userdata/oci-log --conmon-pidfile /run/ceph-92cfe4d4-4917-5be1-9d00-73758793a62b@osd.0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 47f64e87e5871d7c071a121b83455d21f2bfdfce6b6c64a3ceca78155daa9205
             ├─ceph-92cfe4d4-4917-5be1-9d00-73758793a62b@osd.1.service
             │ ├─libpod-payload-159562a3a15003ca855365b6b045a9e9a594d967d5f41c2a02d35e99d6a005e2
             │ │ ├─89399 /run/podman-init -- /usr/bin/ceph-osd -n osd.1 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ └─89401 /usr/bin/ceph-osd -n osd.1 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ └─runtime
             │   └─89397 /usr/bin/conmon --api-version 1 -c 159562a3a15003ca855365b6b045a9e9a594d967d5f41c2a02d35e99d6a005e2 -u 159562a3a15003ca855365b6b045a9e9a594d967d5f41c2a02d35e99d6a005e2 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/159562a3a15003ca855365b6b045a9e9a594d967d5f41c2a02d35e99d6a005e2/userdata -p /run/containers/storage/overlay-containers/159562a3a15003ca855365b6b045a9e9a594d967d5f41c2a02d35e99d6a005e2/userdata/pidfile -n ceph-92cfe4d4-4917-5be1-9d00-73758793a62b-osd-1 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/159562a3a15003ca855365b6b045a9e9a594d967d5f41c2a02d35e99d6a005e2 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/159562a3a15003ca855365b6b045a9e9a594d967d5f41c2a02d35e99d6a005e2/userdata/oci-log --conmon-pidfile /run/ceph-92cfe4d4-4917-5be1-9d00-73758793a62b@osd.1.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 159562a3a15003ca855365b6b045a9e9a594d967d5f41c2a02d35e99d6a005e2
             ├─ceph-92cfe4d4-4917-5be1-9d00-73758793a62b@osd.2.service
             │ ├─libpod-payload-1ea030e74696bd87ca10210c9afc9c6b7cb96088b5f077a47c4d25c6a9cd0784
             │ │ ├─90441 /run/podman-init -- /usr/bin/ceph-osd -n osd.2 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ └─90443 /usr/bin/ceph-osd -n osd.2 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ └─runtime
             │   └─90439 /usr/bin/conmon --api-version 1 -c 1ea030e74696bd87ca10210c9afc9c6b7cb96088b5f077a47c4d25c6a9cd0784 -u 1ea030e74696bd87ca10210c9afc9c6b7cb96088b5f077a47c4d25c6a9cd0784 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/1ea030e74696bd87ca10210c9afc9c6b7cb96088b5f077a47c4d25c6a9cd0784/userdata -p /run/containers/storage/overlay-containers/1ea030e74696bd87ca10210c9afc9c6b7cb96088b5f077a47c4d25c6a9cd0784/userdata/pidfile -n ceph-92cfe4d4-4917-5be1-9d00-73758793a62b-osd-2 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/1ea030e74696bd87ca10210c9afc9c6b7cb96088b5f077a47c4d25c6a9cd0784 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/1ea030e74696bd87ca10210c9afc9c6b7cb96088b5f077a47c4d25c6a9cd0784/userdata/oci-log --conmon-pidfile /run/ceph-92cfe4d4-4917-5be1-9d00-73758793a62b@osd.2.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 1ea030e74696bd87ca10210c9afc9c6b7cb96088b5f077a47c4d25c6a9cd0784
             └─ceph-92cfe4d4-4917-5be1-9d00-73758793a62b@rgw.rgw.compute-0.bqunnq.service
               ├─libpod-payload-25ff1f079ca6d5d68281bf95dc2311e742798de4e5895710535ed7a1e9682f60
               │ ├─100587 /run/podman-init -- /usr/bin/radosgw -n client.rgw.rgw.compute-0.bqunnq -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
               │ └─100589 /usr/bin/radosgw -n client.rgw.rgw.compute-0.bqunnq -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
               └─runtime
                 └─100585 /usr/bin/conmon --api-version 1 -c 25ff1f079ca6d5d68281bf95dc2311e742798de4e5895710535ed7a1e9682f60 -u 25ff1f079ca6d5d68281bf95dc2311e742798de4e5895710535ed7a1e9682f60 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/25ff1f079ca6d5d68281bf95dc2311e742798de4e5895710535ed7a1e9682f60/userdata -p /run/containers/storage/overlay-containers/25ff1f079ca6d5d68281bf95dc2311e742798de4e5895710535ed7a1e9682f60/userdata/pidfile -n ceph-92cfe4d4-4917-5be1-9d00-73758793a62b-rgw-rgw-compute-0-bqunnq --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/25ff1f079ca6d5d68281bf95dc2311e742798de4e5895710535ed7a1e9682f60 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/25ff1f079ca6d5d68281bf95dc2311e742798de4e5895710535ed7a1e9682f60/userdata/oci-log --conmon-pidfile /run/ceph-92cfe4d4-4917-5be1-9d00-73758793a62b@rgw.rgw.compute-0.bqunnq.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 25ff1f079ca6d5d68281bf95dc2311e742798de4e5895710535ed7a1e9682f60

Oct 11 04:10:38 compute-0 ceph-mgr[74617]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 04:10:38 compute-0 ceph-mgr[74617]: [pg_autoscaler INFO root] Pool 'cephfs.cephfs.data' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 04:10:38 compute-0 ceph-mgr[74617]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 04:10:38 compute-0 ceph-mgr[74617]: [pg_autoscaler INFO root] Pool '.rgw.root' root_id -1 using 2.5436283128215145e-07 of space, bias 1.0, pg target 7.630884938464544e-05 quantized to 32 (current 32)
Oct 11 04:10:38 compute-0 ceph-mgr[74617]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 04:10:38 compute-0 ceph-mgr[74617]: [pg_autoscaler INFO root] Pool 'default.rgw.log' root_id -1 using 2.1620840658982875e-06 of space, bias 1.0, pg target 0.0006486252197694863 quantized to 32 (current 32)
Oct 11 04:10:38 compute-0 ceph-mgr[74617]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 04:10:38 compute-0 ceph-mgr[74617]: [pg_autoscaler INFO root] Pool 'default.rgw.control' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Oct 11 04:10:38 compute-0 ceph-mgr[74617]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Oct 11 04:10:38 compute-0 ceph-mgr[74617]: [pg_autoscaler INFO root] Pool 'default.rgw.meta' root_id -1 using 1.2718141564107572e-07 of space, bias 4.0, pg target 0.00015261769876929088 quantized to 32 (current 32)

● system-dbus\x2d:1.1\x2dorg.fedoraproject.SetroubleshootPrivileged.slice - Slice /system/dbus-:1.1-org.fedoraproject.SetroubleshootPrivileged
     Loaded: loaded
     Active: active since Sat 2025-10-11 03:34:56 UTC; 35min ago
      Until: Sat 2025-10-11 03:34:56 UTC; 35min ago
         IO: 4.0K read, 0B written
      Tasks: 0
     Memory: 4.0K (peak: 57.8M)
        CPU: 979ms
     CGroup: /system.slice/system-dbus\x2d:1.1\x2dorg.fedoraproject.SetroubleshootPrivileged.slice

Oct 11 03:34:56 compute-0 systemd[1]: Created slice Slice /system/dbus-:1.1-org.fedoraproject.SetroubleshootPrivileged.

● system-getty.slice - Slice /system/getty
     Loaded: loaded
     Active: active since Sat 2025-10-11 02:42:17 UTC; 1h 28min ago
      Until: Sat 2025-10-11 02:42:17 UTC; 1h 28min ago
         IO: 0B read, 0B written
      Tasks: 1
     Memory: 240.0K (peak: 712.0K)
        CPU: 14ms
     CGroup: /system.slice/system-getty.slice
             └─getty@tty1.service
               └─1008 /sbin/agetty -o "-p -- \\u" --noclear - linux

● system-modprobe.slice - Slice /system/modprobe
     Loaded: loaded
     Active: active since Sat 2025-10-11 02:42:15 UTC; 1h 28min ago
      Until: Sat 2025-10-11 02:42:15 UTC; 1h 28min ago
         IO: 0B read, 0B written
      Tasks: 0
     Memory: 48.0K (peak: 11.5M)
        CPU: 159ms
     CGroup: /system.slice/system-modprobe.slice

Oct 11 02:42:15 localhost systemd[1]: Created slice Slice /system/modprobe.

● system-serial\x2dgetty.slice - Slice /system/serial-getty
     Loaded: loaded
     Active: active since Sat 2025-10-11 02:42:17 UTC; 1h 28min ago
      Until: Sat 2025-10-11 02:42:17 UTC; 1h 28min ago
         IO: 0B read, 0B written
      Tasks: 1
     Memory: 320.0K (peak: 564.0K)
        CPU: 12ms
     CGroup: /system.slice/system-serial\x2dgetty.slice
             └─serial-getty@ttyS0.service
               └─1009 /sbin/agetty -o "-p -- \\u" --keep-baud 115200,57600,38400,9600 - vt220

● system-sshd\x2dkeygen.slice - Slice /system/sshd-keygen
     Loaded: loaded
     Active: active since Sat 2025-10-11 02:42:17 UTC; 1h 28min ago
      Until: Sat 2025-10-11 02:42:17 UTC; 1h 28min ago
         IO: 0B read, 0B written
      Tasks: 0
     Memory: 0B (peak: 0B)
        CPU: 0
     CGroup: /system.slice/system-sshd\x2dkeygen.slice

● system-systemd\x2dcoredump.slice - Slice /system/systemd-coredump
     Loaded: loaded
     Active: active since Sat 2025-10-11 03:46:34 UTC; 24min ago
      Until: Sat 2025-10-11 03:46:34 UTC; 24min ago
         IO: 4.1M read, 1.0M written
      Tasks: 0
     Memory: 4.1M (peak: 291.4M)
        CPU: 1.050s
     CGroup: /system.slice/system-systemd\x2dcoredump.slice

Oct 11 03:46:34 compute-0 systemd[1]: Created slice Slice /system/systemd-coredump.
Oct 11 03:46:35 compute-0 systemd-coredump[268327]: Process 268307 (qemu-img) of user 0 dumped core.
                                                    
                                                    Stack trace of thread 767:
                                                    #0  0x00007f065dd3003c __pthread_kill_implementation (libc.so.6 + 0x8d03c)
                                                    #1  0x00007f065dce2b86 raise (libc.so.6 + 0x3fb86)
                                                    #2  0x00007f065dccc873 abort (libc.so.6 + 0x29873)
                                                    #3  0x00005636f4af556f ___interceptor_pthread_create (qemu-img + 0x4e56f)
                                                    #4  0x00007f065af06ff4 _ZN6Thread10try_createEm (libceph-common.so.2 + 0x258ff4)
                                                    #5  0x00007f065af096ae _ZN6Thread6createEPKcm (libceph-common.so.2 + 0x25b6ae)
                                                    #6  0x00007f065be1026b _ZNSt8_Rb_treeISt4pairINSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEESt10type_indexES0_IKS8_N4ceph12immobile_anyILm576EEEESt10_Select1stISD_ENSA_6common11CephContext19associated_objs_cmpESaISD_EE22_M_emplace_hint_uniqueIJRKSt21piecewise_construct_tSt5tupleIJRSt17basic_string_viewIcS4_ERS7_EESP_IJRKSt15in_place_type_tIN6librbd21TaskFinisherSingletonEERPSH_EEEEESt17_Rb_tree_iteratorISD_ESt23_Rb_tree_const_iteratorISD_EDpOT_.constprop.0 (librbd.so.1 + 0x51126b)
                                                    #7  0x00007f065ba3d7a6 _ZN6librbd8ImageCtx4initEv (librbd.so.1 + 0x13e7a6)
                                                    #8  0x00007f065bb172d3 _ZN6librbd5image11OpenRequestINS_8ImageCtxEE12send_refreshEv (librbd.so.1 + 0x2182d3)
                                                    #9  0x00007f065bb17f46 _ZN6librbd5image11OpenRequestINS_8ImageCtxEE23handle_v2_get_data_poolEPi (librbd.so.1 + 0x218f46)
                                                    #10 0x00007f065bb182a7 _ZN6librbd4util6detail20rados_state_callbackINS_5image11OpenRequestINS_8ImageCtxEEEXadL_ZNS6_23handle_v2_get_data_poolEPiEELb1EEEvPvS8_ (librbd.so.1 + 0x2192a7)
                                                    #11 0x00007f065b8160ac _ZN5boost4asio6detail18completion_handlerINS1_7binder0IN8librados14CB_AioCompleteEEENS0_10io_context19basic_executor_typeISaIvELm0EEEE11do_completeEPvPNS1_19scheduler_operationERKNS_6system10error_codeEm (librados.so.2 + 0xad0ac)
                                                    #12 0x00007f065b815585 _ZN5boost4asio6detail14strand_service11do_completeEPvPNS1_19scheduler_operationERKNS_6system10error_codeEm (librados.so.2 + 0xac585)
                                                    #13 0x00007f065b890498 _ZN5boost4asio6detail9scheduler3runERNS_6system10error_codeE.constprop.0.isra.0 (librados.so.2 + 0x127498)
                                                    #14 0x00007f065b82f4e4 _ZNSt6thread11_State_implINS_8_InvokerISt5tupleIJZ17make_named_threadIZN4ceph5async15io_context_pool5startEsEUlvE_JEES_St17basic_string_viewIcSt11char_traitsIcEEOT_DpOT0_EUlSD_SG_E_S7_EEEEE6_M_runEv (librados.so.2 + 0xc64e4)
                                                    #15 0x00007f065a59dae4 execute_native_thread_routine (libstdc++.so.6 + 0xdbae4)
                                                    #16 0x00007f065dd2e2fa start_thread (libc.so.6 + 0x8b2fa)
                                                    #17 0x00007f065ddb3540 __clone3 (libc.so.6 + 0x110540)
                                                    
                                                    Stack trace of thread 757:
                                                    #0  0x00007f065dd2b38a __futex_abstimed_wait_common (libc.so.6 + 0x8838a)
                                                    #1  0x00007f065dd2d8e2 pthread_cond_wait@@GLIBC_2.3.2 (libc.so.6 + 0x8a8e2)
                                                    #2  0x00007f065a5976c0 _ZNSt18condition_variable4waitERSt11unique_lockISt5mutexE (libstdc++.so.6 + 0xd56c0)
                                                    #3  0x00007f065ba44eb3 _ZN6librbd10ImageStateINS_8ImageCtxEE4openEm (librbd.so.1 + 0x145eb3)
                                                    #4  0x00007f065ba14fcb rbd_open (librbd.so.1 + 0x115fcb)
                                                    #5  0x00007f065bfbf89d qemu_rbd_open (block-rbd.so + 0x489d)
                                                    #6  0x00005636f4b05e4c bdrv_open_driver.llvm.6332234179151191066 (qemu-img + 0x5ee4c)
                                                    #7  0x00005636f4b0ab6b bdrv_open_inherit.llvm.6332234179151191066 (qemu-img + 0x63b6b)
                                                    #8  0x00005636f4b175ce bdrv_open_child_bs.llvm.6332234179151191066 (qemu-img + 0x705ce)
                                                    #9  0x00005636f4b0a396 bdrv_open_inherit.llvm.6332234179151191066 (qemu-img + 0x63396)
                                                    #10 0x00005636f4b381f5 blk_new_open (qemu-img + 0x911f5)
                                                    #11 0x00005636f4bf3e16 img_open_file (qemu-img + 0x14ce16)
                                                    #12 0x00005636f4bf39e0 img_open (qemu-img + 0x14c9e0)
                                                    #13 0x00005636f4befc1d img_info (qemu-img + 0x148c1d)
                                                    #14 0x00005636f4be9638 main (qemu-img + 0x142638)
                                                    #15 0x00007f065dccd610 __libc_start_call_main (libc.so.6 + 0x2a610)
                                                    #16 0x00007f065dccd6c0 __libc_start_main@@GLIBC_2.34 (libc.so.6 + 0x2a6c0)
                                                    #17 0x00005636f4af5215 _start (qemu-img + 0x4e215)
                                                    
                                                    Stack trace of thread 758:
                                                    #0  0x00007f065ddab96d syscall (libc.so.6 + 0x10896d)
                                                    #1  0x00005636f4c74f73 qemu_event_wait (qemu-img + 0x1cdf73)
                                                    #2  0x00005636f4c81f87 call_rcu_thread (qemu-img + 0x1daf87)
                                                    #3  0x00005636f4c752ba qemu_thread_start.llvm.7701297430486814853 (qemu-img + 0x1ce2ba)
                                                    #4  0x00007f065dd2e2fa start_thread (libc.so.6 + 0x8b2fa)
                                                    #5  0x00007f065ddb3540 __clone3 (libc.so.6 + 0x110540)
                                                    
                                                    Stack trace of thread 761:
                                                    #0  0x00007f065ddb2b7e epoll_wait (libc.so.6 + 0x10fb7e)
                                                    #1  0x00007f065b0ee618 _ZN11EpollDriver10event_waitERSt6vectorI14FiredFileEventSaIS1_EEP7timeval (libceph-common.so.2 + 0x440618)
                                                    #2  0x00007f065b0ec702 _ZN11EventCenter14process_eventsEjPNSt6chrono8durationImSt5ratioILl1ELl1000000000EEEE (libceph-common.so.2 + 0x43e702)
                                                    #3  0x00007f065b0ed2c6 _ZNSt17_Function_handlerIFvvEZN12NetworkStack10add_threadEP6WorkerEUlvE_E9_M_invokeERKSt9_Any_data (libceph-common.so.2 + 0x43f2c6)
                                                    #4  0x00007f065a59dae4 execute_native_thread_routine (libstdc++.so.6 + 0xdbae4)
                                                    #5  0x00007f065dd2e2fa start_thread (libc.so.6 + 0x8b2fa)
                                                    #6  0x00007f065ddb3540 __clone3 (libc.so.6 + 0x110540)
                                                    
                                                    Stack trace of thread 759:
                                                    #0  0x00007f065dd2b38a __futex_abstimed_wait_common (libc.so.6 + 0x8838a)
                                                    #1  0x00007f065dd2d8e2 pthread_cond_wait@@GLIBC_2.3.2 (libc.so.6 + 0x8a8e2)
                                                    #2  0x00007f065a5976c0 _ZNSt18condition_variable4waitERSt11unique_lockISt5mutexE (libstdc++.so.6 + 0xd56c0)
                                                    #3  0x00007f065b1190a2 _ZN4ceph7logging3Log5entryEv (libceph-common.so.2 + 0x46b0a2)
                                                    #4  0x00007f065dd2e2fa start_thread (libc.so.6 + 0x8b2fa)
                                                    #5  0x00007f065ddb3540 __clone3 (libc.so.6 + 0x110540)
                                                    
                                                    Stack trace of thread 769:
                                                    #0  0x00007f065dd2b38a __futex_abstimed_wait_common (libc.so.6 + 0x8838a)
                                                    #1  0x00007f065dd2dcc0 pthread_cond_clockwait@GLIBC_2.30 (libc.so.6 + 0x8acc0)
                                                    #2  0x00007f065b868364 _ZN4ceph5timerINS_17coarse_mono_clockEE12timer_threadEv (librados.so.2 + 0xff364)
                                                    #3  0x00007f065a59dae4 execute_native_thread_routine (libstdc++.so.6 + 0xdbae4)
                                                    #4  0x00007f065dd2e2fa start_thread (libc.so.6 + 0x8b2fa)
                                                    #5  0x00007f065ddb3540 __clone3 (libc.so.6 + 0x110540)
                                                    
                                                    Stack trace of thread 771:
                                                    #0  0x00007f065dd2b38a __futex_abstimed_wait_common (libc.so.6 + 0x8838a)
                                                    #1  0x00007f065dd2d8e2 pthread_cond_wait@@GLIBC_2.3.2 (libc.so.6 + 0x8a8e2)
                                                    #2  0x00007f065a5976c0 _ZNSt18condition_variable4waitERSt11unique_lockISt5mutexE (libstdc++.so.6 + 0xd56c0)
                                                    #3  0x00007f065b0150b9 _ZN13DispatchQueue18run_local_deliveryEv (libceph-common.so.2 + 0x3670b9)
                                                    #4  0x00007f065b0a6431 _ZN13DispatchQueue19LocalDeliveryThread5entryEv (libceph-common.so.2 + 0x3f8431)
                                                    #5  0x00007f065dd2e2fa start_thread (libc.so.6 + 0x8b2fa)
                                                    #6  0x00007f065ddb3540 __clone3 (libc.so.6 + 0x110540)
                                                    
                                                    Stack trace of thread 766:
                                                    #0  0x00007f065dd2b38a __futex_abstimed_wait_common (libc.so.6 + 0x8838a)
                                                    #1  0x00007f065dd2dcc0 pthread_cond_clockwait@GLIBC_2.30 (libc.so.6 + 0x8acc0)
                                                    #2  0x00007f065af27150 _ZN4ceph6common24CephContextServiceThread5entryEv (libceph-common.so.2 + 0x279150)
                                                    #3  0x00007f065dd2e2fa start_thread (libc.so.6 + 0x8b2fa)
                                                    #4  0x00007f065ddb3540 __clone3 (libc.so.6 + 0x110540)
                                                    
                                                    Stack trace of thread 773:
                                                    #0  0x00007f065dd2b38a __futex_abstimed_wait_common (libc.so.6 + 0x8838a)
                                                    #1  0x00007f065dd2d8e2 pthread_cond_wait@@GLIBC_2.3.2 (libc.so.6 + 0x8a8e2)
                                                    #2  0x00007f065a5976c0 _ZNSt18condition_variable4waitERSt11unique_lockISt5mutexE (libstdc++.so.6 + 0xd56c0)
                                                    #3  0x00007f065af0c7f8 _ZN15CommonSafeTimerISt5mutexE12timer_threadEv (libceph-common.so.2 + 0x25e7f8)
                                                    #4  0x00007f065af0cf81 _ZN21CommonSafeTimerThreadISt5mutexE5entryEv (libceph-common.so.2 + 0x25ef81)
                                                    #5  0x00007f065dd2e2fa start_thread (libc.so.6 + 0x8b2fa)
                                                    #6  0x00007f065ddb3540 __clone3 (libc.so.6 + 0x110540)
                                                    
                                                    Stack trace of thread 775:
                                                    #0  0x00007f065dd2b38a __futex_abstimed_wait_common (libc.so.6 + 0x8838a)
                                                    #1  0x00007f065dd2d8e2 pthread_cond_wait@@GLIBC_2.3.2 (libc.so.6 + 0x8a8e2)
                                                    #2  0x00007f065a5976c0 _ZNSt18condition_variable4waitERSt11unique_lockISt5mutexE (libstdc++.so.6 + 0xd56c0)
                                                    #3  0x00007f065af0c7f8 _ZN15CommonSafeTimerISt5mutexE12timer_threadEv (libceph-common.so.2 + 0x25e7f8)
                                                    #4  0x00007f065af0cf81 _ZN21CommonSafeTimerThreadISt5mutexE5entryEv (libceph-common.so.2 + 0x25ef81)
                                                    #5  0x00007f065dd2e2fa start_thread (libc.so.6 + 0x8b2fa)
                                                    #6  0x00007f065ddb3540 __clone3 (libc.so.6 + 0x110540)
                                                    
                                                    Stack trace of thread 762:
                                                    #0  0x00007f065ddb2b7e epoll_wait (libc.so.6 + 0x10fb7e)
                                                    #1  0x00007f065b0ee618 _ZN11EpollDriver10event_waitERSt6vectorI14FiredFileEventSaIS1_EEP7timeval (libceph-common.so.2 + 0x440618)
                                                    #2  0x00007f065b0ec702 _ZN11EventCenter14process_eventsEjPNSt6chrono8durationImSt5ratioILl1ELl1000000000EEEE (libceph-common.so.2 + 0x43e702)
                                                    #3  0x00007f065b0ed2c6 _ZNSt17_Function_handlerIFvvEZN12NetworkStack10add_threadEP6WorkerEUlvE_E9_M_invokeERKSt9_Any_data (libceph-common.so.2 + 0x43f2c6)
                                                    #4  0x00007f065a59dae4 execute_native_thread_routine (libstdc++.so.6 + 0xdbae4)
                                                    #5  0x00007f065dd2e2fa start_thread (libc.so.6 + 0x8b2fa)
                                                    #6  0x00007f065ddb3540 __clone3 (libc.so.6 + 0x110540)
                                                    
                                                    Stack trace of thread 768:
                                                    #0  0x00007f065dd2b38a __futex_abstimed_wait_common (libc.so.6 + 0x8838a)
                                                    #1  0x00007f065dd2d8e2 pthread_cond_wait@@GLIBC_2.3.2 (libc.so.6 + 0x8a8e2)
                                                    #2  0x00007f065b890266 _ZN5boost4asio6detail9scheduler3runERNS_6system10error_codeE.constprop.0.isra.0 (librados.so.2 + 0x127266)
                                                    #3  0x00007f065b82f4e4 _ZNSt6thread11_State_implINS_8_InvokerISt5tupleIJZ17make_named_threadIZN4ceph5async15io_context_pool5startEsEUlvE_JEES_St17basic_string_viewIcSt11char_traitsIcEEOT_DpOT0_EUlSD_SG_E_S7_EEEEE6_M_runEv (librados.so.2 + 0xc64e4)
                                                    #4  0x00007f065a59dae4 execute_native_thread_routine (libstdc++.so.6 + 0xdbae4)
                                                    #5  0x00007f065dd2e2fa start_thread (libc.so.6 + 0x8b2fa)
                                                    #6  0x00007f065ddb3540 __clone3 (libc.so.6 + 0x110540)
                                                    
                                                    Stack trace of thread 774:
                                                    #0  0x00007f065dd2b38a __futex_abstimed_wait_common (libc.so.6 + 0x8838a)
                                                    #1  0x00007f065dd2d8e2 pthread_cond_wait@@GLIBC_2.3.2 (libc.so.6 + 0x8a8e2)
                                                    #2  0x00007f065a5976c0 _ZNSt18condition_variable4waitERSt11unique_lockISt5mutexE (libstdc++.so.6 + 0xd56c0)
                                                    #3  0x00007f065af0c7f8 _ZN15CommonSafeTimerISt5mutexE12timer_threadEv (libceph-common.so.2 + 0x25e7f8)
                                                    #4  0x00007f065af0cf81 _ZN21CommonSafeTimerThreadISt5mutexE5entryEv (libceph-common.so.2 + 0x25ef81)
                                                    #5  0x00007f065dd2e2fa start_thread (libc.so.6 + 0x8b2fa)
                                                    #6  0x00007f065ddb3540 __clone3 (libc.so.6 + 0x110540)
                                                    
                                                    Stack trace of thread 760:
                                                    #0  0x00007f065ddb2b7e epoll_wait (libc.so.6 + 0x10fb7e)
                                                    #1  0x00007f065b0ee618 _ZN11EpollDriver10event_waitERSt6vectorI14FiredFileEventSaIS1_EEP7timeval (libceph-common.so.2 + 0x440618)
                                                    #2  0x00007f065b0ec702 _ZN11EventCenter14process_eventsEjPNSt6chrono8durationImSt5ratioILl1ELl1000000000EEEE (libceph-common.so.2 + 0x43e702)
                                                    #3  0x00007f065b0ed2c6 _ZNSt17_Function_handlerIFvvEZN12NetworkStack10add_threadEP6WorkerEUlvE_E9_M_invokeERKSt9_Any_data (libceph-common.so.2 + 0x43f2c6)
                                                    #4  0x00007f065a59dae4 execute_native_thread_routine (libstdc++.so.6 + 0xdbae4)
                                                    #5  0x00007f065dd2e2fa start_thread (libc.so.6 + 0x8b2fa)
                                                    #6  0x00007f065ddb3540 __clone3 (libc.so.6 + 0x110540)
                                                    
                                                    Stack trace of thread 770:
                                                    #0  0x00007f065dd2b38a __futex_abstimed_wait_common (libc.so.6 + 0x8838a)
                                                    #1  0x00007f065dd2d8e2 pthread_cond_wait@@GLIBC_2.3.2 (libc.so.6 + 0x8a8e2)
                                                    #2  0x00007f065a5976c0 _ZNSt18condition_variable4waitERSt11unique_lockISt5mutexE (libstdc++.so.6 + 0xd56c0)
                                                    #3  0x00007f065b01549f _ZN13DispatchQueue5entryEv (libceph-common.so.2 + 0x36749f)
                                                    #4  0x00007f065b0a6411 _ZN13DispatchQueue14DispatchThread5entryEv (libceph-common.so.2 + 0x3f8411)
                                                    #5  0x00007f065dd2e2fa start_thread (libc.so.6 + 0x8b2fa)
                                                    #6  0x00007f065ddb3540 __clone3 (libc.so.6 + 0x110540)
                                                    
                                                    Stack trace of thread 772:
                                                    #0  0x00007f065dd2b38a __futex_abstimed_wait_common (libc.so.6 + 0x8838a)
                                                    #1  0x00007f065dd2dcc0 pthread_cond_clockwait@GLIBC_2.30 (libc.so.6 + 0x8acc0)
                                                    #2  0x00007f065af0cb23 _ZN15CommonSafeTimerISt5mutexE12timer_threadEv (libceph-common.so.2 + 0x25eb23)
                                                    #3  0x00007f065af0cf81 _ZN21CommonSafeTimerThreadISt5mutexE5entryEv (libceph-common.so.2 + 0x25ef81)
                                                    #4  0x00007f065dd2e2fa start_thread (libc.so.6 + 0x8b2fa)
                                                    #5  0x00007f065ddb3540 __clone3 (libc.so.6 + 0x110540)
                                                    ELF object binary architecture: AMD x86-64

● system.slice - System Slice
     Loaded: loaded
     Active: active since Sat 2025-10-11 02:42:14 UTC; 1h 28min ago
      Until: Sat 2025-10-11 02:42:14 UTC; 1h 28min ago
       Docs: man:systemd.special(7)
         IO: 1.8G read, 25.8G written
      Tasks: 1114
     Memory: 4.2G (peak: 5.1G)
        CPU: 11min 27.097s
     CGroup: /system.slice
             ├─NetworkManager.service
             │ └─44969 /usr/sbin/NetworkManager --no-daemon
             ├─auditd.service
             │ ├─700 /sbin/auditd
             │ └─702 /usr/sbin/sedispatch
             ├─chronyd.service
             │ └─54376 /usr/sbin/chronyd -F 2
             ├─crond.service
             │ ├─ 1007 /usr/sbin/crond -n
             │ └─27476 /usr/sbin/anacron -s
             ├─dbus-broker.service
             │ ├─738 /usr/bin/dbus-broker-launch --scope system --audit
             │ └─770 dbus-broker --log 4 --controller 9 --machine-id a1727ec20198bc6caf436a6e13c4ff5e --max-bytes 536870912 --max-fds 4096 --max-matches 131072 --audit
             ├─edpm_iscsid.service
             │ └─230877 /usr/bin/conmon --api-version 1 -c f2b19cad22d0fbdd185b264c3c8b6443d09a02319dc9fb0585dc69a0f24a758d -u f2b19cad22d0fbdd185b264c3c8b6443d09a02319dc9fb0585dc69a0f24a758d -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/f2b19cad22d0fbdd185b264c3c8b6443d09a02319dc9fb0585dc69a0f24a758d/userdata -p /run/containers/storage/overlay-containers/f2b19cad22d0fbdd185b264c3c8b6443d09a02319dc9fb0585dc69a0f24a758d/userdata/pidfile -n iscsid --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/f2b19cad22d0fbdd185b264c3c8b6443d09a02319dc9fb0585dc69a0f24a758d --full-attach -s -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/f2b19cad22d0fbdd185b264c3c8b6443d09a02319dc9fb0585dc69a0f24a758d/userdata/oci-log --conmon-pidfile /run/iscsid.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg f2b19cad22d0fbdd185b264c3c8b6443d09a02319dc9fb0585dc69a0f24a758d
             ├─edpm_multipathd.service
             │ └─241112 /usr/bin/conmon --api-version 1 -c 8b003d65c8e439e280409825aa37dacfb921ffdd0ada54278b9746654fdc0aa8 -u 8b003d65c8e439e280409825aa37dacfb921ffdd0ada54278b9746654fdc0aa8 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/8b003d65c8e439e280409825aa37dacfb921ffdd0ada54278b9746654fdc0aa8/userdata -p /run/containers/storage/overlay-containers/8b003d65c8e439e280409825aa37dacfb921ffdd0ada54278b9746654fdc0aa8/userdata/pidfile -n multipathd --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/8b003d65c8e439e280409825aa37dacfb921ffdd0ada54278b9746654fdc0aa8 --full-attach -s -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/8b003d65c8e439e280409825aa37dacfb921ffdd0ada54278b9746654fdc0aa8/userdata/oci-log --conmon-pidfile /run/multipathd.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg 8b003d65c8e439e280409825aa37dacfb921ffdd0ada54278b9746654fdc0aa8
             ├─edpm_nova_compute.service
             │ └─260089 /usr/bin/conmon --api-version 1 -c 26619da4fa972b2b2b8df272a799dceac616417728cb0ea1160a898d8a7167a1 -u 26619da4fa972b2b2b8df272a799dceac616417728cb0ea1160a898d8a7167a1 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/26619da4fa972b2b2b8df272a799dceac616417728cb0ea1160a898d8a7167a1/userdata -p /run/containers/storage/overlay-containers/26619da4fa972b2b2b8df272a799dceac616417728cb0ea1160a898d8a7167a1/userdata/pidfile -n nova_compute --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/26619da4fa972b2b2b8df272a799dceac616417728cb0ea1160a898d8a7167a1 --full-attach -s -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/26619da4fa972b2b2b8df272a799dceac616417728cb0ea1160a898d8a7167a1/userdata/oci-log --conmon-pidfile /run/nova_compute.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg 26619da4fa972b2b2b8df272a799dceac616417728cb0ea1160a898d8a7167a1
             ├─edpm_ovn_controller.service
             │ └─152667 /usr/bin/conmon --api-version 1 -c 65c3a3d72e1cba3c83fc771a841564f690b47cc0f5012ce0acf16e2d9f8e3fe2 -u 65c3a3d72e1cba3c83fc771a841564f690b47cc0f5012ce0acf16e2d9f8e3fe2 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/65c3a3d72e1cba3c83fc771a841564f690b47cc0f5012ce0acf16e2d9f8e3fe2/userdata -p /run/containers/storage/overlay-containers/65c3a3d72e1cba3c83fc771a841564f690b47cc0f5012ce0acf16e2d9f8e3fe2/userdata/pidfile -n ovn_controller --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/65c3a3d72e1cba3c83fc771a841564f690b47cc0f5012ce0acf16e2d9f8e3fe2 --full-attach -s -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/65c3a3d72e1cba3c83fc771a841564f690b47cc0f5012ce0acf16e2d9f8e3fe2/userdata/oci-log --conmon-pidfile /run/ovn_controller.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg 65c3a3d72e1cba3c83fc771a841564f690b47cc0f5012ce0acf16e2d9f8e3fe2
             ├─edpm_ovn_metadata_agent.service
             │ └─162240 /usr/bin/conmon --api-version 1 -c dd5285a58cbe29a90687a00af14b934b599bb4de55df5857e4d7b7ac9f22feab -u dd5285a58cbe29a90687a00af14b934b599bb4de55df5857e4d7b7ac9f22feab -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/dd5285a58cbe29a90687a00af14b934b599bb4de55df5857e4d7b7ac9f22feab/userdata -p /run/containers/storage/overlay-containers/dd5285a58cbe29a90687a00af14b934b599bb4de55df5857e4d7b7ac9f22feab/userdata/pidfile -n ovn_metadata_agent --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/dd5285a58cbe29a90687a00af14b934b599bb4de55df5857e4d7b7ac9f22feab --full-attach -s -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/dd5285a58cbe29a90687a00af14b934b599bb4de55df5857e4d7b7ac9f22feab/userdata/oci-log --conmon-pidfile /run/ovn_metadata_agent.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg dd5285a58cbe29a90687a00af14b934b599bb4de55df5857e4d7b7ac9f22feab
             ├─gssproxy.service
             │ └─874 /usr/sbin/gssproxy -D
             ├─irqbalance.service
             │ └─777 /usr/sbin/irqbalance
             ├─ovs-vswitchd.service
             │ └─43265 ovs-vswitchd unix:/var/run/openvswitch/db.sock -vconsole:emer -vsyslog:err -vfile:info --mlockall --user openvswitch:hugetlbfs --no-chdir --log-file=/var/log/openvswitch/ovs-vswitchd.log --pidfile=/var/run/openvswitch/ovs-vswitchd.pid --detach
             ├─ovsdb-server.service
             │ └─43180 ovsdb-server /etc/openvswitch/conf.db -vconsole:emer -vsyslog:err -vfile:info --remote=punix:/var/run/openvswitch/db.sock --private-key=db:Open_vSwitch,SSL,private_key --certificate=db:Open_vSwitch,SSL,certificate --bootstrap-ca-cert=db:Open_vSwitch,SSL,ca_cert --user openvswitch:hugetlbfs --no-chdir --log-file=/var/log/openvswitch/ovsdb-server.log --pidfile=/var/run/openvswitch/ovsdb-server.pid --detach
             ├─polkit.service
             │ └─6205 /usr/lib/polkit-1/polkitd --no-debug
             ├─rpcbind.service
             │ └─698 /usr/bin/rpcbind -w -f
             ├─rsyslog.service
             │ └─1004 /usr/sbin/rsyslogd -n
             ├─sshd.service
             │ └─189518 "sshd: /usr/sbin/sshd -D [listener] 0 of 10-100 startups"
             ├─system-ceph\x2d92cfe4d4\x2d4917\x2d5be1\x2d9d00\x2d73758793a62b.slice
             │ ├─ceph-92cfe4d4-4917-5be1-9d00-73758793a62b@crash.compute-0.service
             │ │ ├─libpod-payload-90b5b5a031904a9fddf3ed90b02a240d3a0ebaaf670415137274ba4391b5f476
             │ │ │ ├─82392 /run/podman-init -- /usr/bin/ceph-crash -n client.crash.compute-0
             │ │ │ └─82394 /usr/bin/python3 -s /usr/bin/ceph-crash -n client.crash.compute-0
             │ │ └─runtime
             │ │   └─82390 /usr/bin/conmon --api-version 1 -c 90b5b5a031904a9fddf3ed90b02a240d3a0ebaaf670415137274ba4391b5f476 -u 90b5b5a031904a9fddf3ed90b02a240d3a0ebaaf670415137274ba4391b5f476 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/90b5b5a031904a9fddf3ed90b02a240d3a0ebaaf670415137274ba4391b5f476/userdata -p /run/containers/storage/overlay-containers/90b5b5a031904a9fddf3ed90b02a240d3a0ebaaf670415137274ba4391b5f476/userdata/pidfile -n ceph-92cfe4d4-4917-5be1-9d00-73758793a62b-crash-compute-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/90b5b5a031904a9fddf3ed90b02a240d3a0ebaaf670415137274ba4391b5f476 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/90b5b5a031904a9fddf3ed90b02a240d3a0ebaaf670415137274ba4391b5f476/userdata/oci-log --conmon-pidfile /run/ceph-92cfe4d4-4917-5be1-9d00-73758793a62b@crash.compute-0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 90b5b5a031904a9fddf3ed90b02a240d3a0ebaaf670415137274ba4391b5f476
             │ ├─ceph-92cfe4d4-4917-5be1-9d00-73758793a62b@mds.cephfs.compute-0.uxaxgb.service
             │ │ ├─libpod-payload-53426c3eb317616c2b70d50730a6965f375d00d0ca023da2460bbd9c696b45a6
             │ │ │ ├─101046 /run/podman-init -- /usr/bin/ceph-mds -n mds.cephfs.compute-0.uxaxgb -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ │ └─101067 /usr/bin/ceph-mds -n mds.cephfs.compute-0.uxaxgb -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ └─runtime
             │ │   └─101044 /usr/bin/conmon --api-version 1 -c 53426c3eb317616c2b70d50730a6965f375d00d0ca023da2460bbd9c696b45a6 -u 53426c3eb317616c2b70d50730a6965f375d00d0ca023da2460bbd9c696b45a6 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/53426c3eb317616c2b70d50730a6965f375d00d0ca023da2460bbd9c696b45a6/userdata -p /run/containers/storage/overlay-containers/53426c3eb317616c2b70d50730a6965f375d00d0ca023da2460bbd9c696b45a6/userdata/pidfile -n ceph-92cfe4d4-4917-5be1-9d00-73758793a62b-mds-cephfs-compute-0-uxaxgb --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/53426c3eb317616c2b70d50730a6965f375d00d0ca023da2460bbd9c696b45a6 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/53426c3eb317616c2b70d50730a6965f375d00d0ca023da2460bbd9c696b45a6/userdata/oci-log --conmon-pidfile /run/ceph-92cfe4d4-4917-5be1-9d00-73758793a62b@mds.cephfs.compute-0.uxaxgb.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 53426c3eb317616c2b70d50730a6965f375d00d0ca023da2460bbd9c696b45a6
             │ ├─ceph-92cfe4d4-4917-5be1-9d00-73758793a62b@mgr.compute-0.pdyrua.service
             │ │ ├─libpod-payload-5396d33f03d79fbf0e6626513c29ad41ed11bd6b7439ed7e048b771ff7bb44ba
             │ │ │ ├─74615 /run/podman-init -- /usr/bin/ceph-mgr -n mgr.compute-0.pdyrua -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ │ └─74617 /usr/bin/ceph-mgr -n mgr.compute-0.pdyrua -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ └─runtime
             │ │   └─74613 /usr/bin/conmon --api-version 1 -c 5396d33f03d79fbf0e6626513c29ad41ed11bd6b7439ed7e048b771ff7bb44ba -u 5396d33f03d79fbf0e6626513c29ad41ed11bd6b7439ed7e048b771ff7bb44ba -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/5396d33f03d79fbf0e6626513c29ad41ed11bd6b7439ed7e048b771ff7bb44ba/userdata -p /run/containers/storage/overlay-containers/5396d33f03d79fbf0e6626513c29ad41ed11bd6b7439ed7e048b771ff7bb44ba/userdata/pidfile -n ceph-92cfe4d4-4917-5be1-9d00-73758793a62b-mgr-compute-0-pdyrua --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/5396d33f03d79fbf0e6626513c29ad41ed11bd6b7439ed7e048b771ff7bb44ba --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/5396d33f03d79fbf0e6626513c29ad41ed11bd6b7439ed7e048b771ff7bb44ba/userdata/oci-log --conmon-pidfile /run/ceph-92cfe4d4-4917-5be1-9d00-73758793a62b@mgr.compute-0.pdyrua.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 5396d33f03d79fbf0e6626513c29ad41ed11bd6b7439ed7e048b771ff7bb44ba
             │ ├─ceph-92cfe4d4-4917-5be1-9d00-73758793a62b@mon.compute-0.service
             │ │ ├─libpod-payload-a848fe58749db588a5a4b8471e0c9916b9e4a1ccc899f04343e6491a43c45c05
             │ │ │ ├─74324 /run/podman-init -- /usr/bin/ceph-mon -n mon.compute-0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false
             │ │ │ └─74326 /usr/bin/ceph-mon -n mon.compute-0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false
             │ │ └─runtime
             │ │   └─74322 /usr/bin/conmon --api-version 1 -c a848fe58749db588a5a4b8471e0c9916b9e4a1ccc899f04343e6491a43c45c05 -u a848fe58749db588a5a4b8471e0c9916b9e4a1ccc899f04343e6491a43c45c05 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/a848fe58749db588a5a4b8471e0c9916b9e4a1ccc899f04343e6491a43c45c05/userdata -p /run/containers/storage/overlay-containers/a848fe58749db588a5a4b8471e0c9916b9e4a1ccc899f04343e6491a43c45c05/userdata/pidfile -n ceph-92cfe4d4-4917-5be1-9d00-73758793a62b-mon-compute-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/a848fe58749db588a5a4b8471e0c9916b9e4a1ccc899f04343e6491a43c45c05 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/a848fe58749db588a5a4b8471e0c9916b9e4a1ccc899f04343e6491a43c45c05/userdata/oci-log --conmon-pidfile /run/ceph-92cfe4d4-4917-5be1-9d00-73758793a62b@mon.compute-0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg a848fe58749db588a5a4b8471e0c9916b9e4a1ccc899f04343e6491a43c45c05
             │ ├─ceph-92cfe4d4-4917-5be1-9d00-73758793a62b@osd.0.service
             │ │ ├─libpod-payload-47f64e87e5871d7c071a121b83455d21f2bfdfce6b6c64a3ceca78155daa9205
             │ │ │ ├─88323 /run/podman-init -- /usr/bin/ceph-osd -n osd.0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ │ └─88325 /usr/bin/ceph-osd -n osd.0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ └─runtime
             │ │   └─88321 /usr/bin/conmon --api-version 1 -c 47f64e87e5871d7c071a121b83455d21f2bfdfce6b6c64a3ceca78155daa9205 -u 47f64e87e5871d7c071a121b83455d21f2bfdfce6b6c64a3ceca78155daa9205 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/47f64e87e5871d7c071a121b83455d21f2bfdfce6b6c64a3ceca78155daa9205/userdata -p /run/containers/storage/overlay-containers/47f64e87e5871d7c071a121b83455d21f2bfdfce6b6c64a3ceca78155daa9205/userdata/pidfile -n ceph-92cfe4d4-4917-5be1-9d00-73758793a62b-osd-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/47f64e87e5871d7c071a121b83455d21f2bfdfce6b6c64a3ceca78155daa9205 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/47f64e87e5871d7c071a121b83455d21f2bfdfce6b6c64a3ceca78155daa9205/userdata/oci-log --conmon-pidfile /run/ceph-92cfe4d4-4917-5be1-9d00-73758793a62b@osd.0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 47f64e87e5871d7c071a121b83455d21f2bfdfce6b6c64a3ceca78155daa9205
             │ ├─ceph-92cfe4d4-4917-5be1-9d00-73758793a62b@osd.1.service
             │ │ ├─libpod-payload-159562a3a15003ca855365b6b045a9e9a594d967d5f41c2a02d35e99d6a005e2
             │ │ │ ├─89399 /run/podman-init -- /usr/bin/ceph-osd -n osd.1 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ │ └─89401 /usr/bin/ceph-osd -n osd.1 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ └─runtime
             │ │   └─89397 /usr/bin/conmon --api-version 1 -c 159562a3a15003ca855365b6b045a9e9a594d967d5f41c2a02d35e99d6a005e2 -u 159562a3a15003ca855365b6b045a9e9a594d967d5f41c2a02d35e99d6a005e2 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/159562a3a15003ca855365b6b045a9e9a594d967d5f41c2a02d35e99d6a005e2/userdata -p /run/containers/storage/overlay-containers/159562a3a15003ca855365b6b045a9e9a594d967d5f41c2a02d35e99d6a005e2/userdata/pidfile -n ceph-92cfe4d4-4917-5be1-9d00-73758793a62b-osd-1 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/159562a3a15003ca855365b6b045a9e9a594d967d5f41c2a02d35e99d6a005e2 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/159562a3a15003ca855365b6b045a9e9a594d967d5f41c2a02d35e99d6a005e2/userdata/oci-log --conmon-pidfile /run/ceph-92cfe4d4-4917-5be1-9d00-73758793a62b@osd.1.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 159562a3a15003ca855365b6b045a9e9a594d967d5f41c2a02d35e99d6a005e2
             │ ├─ceph-92cfe4d4-4917-5be1-9d00-73758793a62b@osd.2.service
             │ │ ├─libpod-payload-1ea030e74696bd87ca10210c9afc9c6b7cb96088b5f077a47c4d25c6a9cd0784
             │ │ │ ├─90441 /run/podman-init -- /usr/bin/ceph-osd -n osd.2 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ │ └─90443 /usr/bin/ceph-osd -n osd.2 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ └─runtime
             │ │   └─90439 /usr/bin/conmon --api-version 1 -c 1ea030e74696bd87ca10210c9afc9c6b7cb96088b5f077a47c4d25c6a9cd0784 -u 1ea030e74696bd87ca10210c9afc9c6b7cb96088b5f077a47c4d25c6a9cd0784 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/1ea030e74696bd87ca10210c9afc9c6b7cb96088b5f077a47c4d25c6a9cd0784/userdata -p /run/containers/storage/overlay-containers/1ea030e74696bd87ca10210c9afc9c6b7cb96088b5f077a47c4d25c6a9cd0784/userdata/pidfile -n ceph-92cfe4d4-4917-5be1-9d00-73758793a62b-osd-2 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/1ea030e74696bd87ca10210c9afc9c6b7cb96088b5f077a47c4d25c6a9cd0784 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/1ea030e74696bd87ca10210c9afc9c6b7cb96088b5f077a47c4d25c6a9cd0784/userdata/oci-log --conmon-pidfile /run/ceph-92cfe4d4-4917-5be1-9d00-73758793a62b@osd.2.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 1ea030e74696bd87ca10210c9afc9c6b7cb96088b5f077a47c4d25c6a9cd0784
             │ └─ceph-92cfe4d4-4917-5be1-9d00-73758793a62b@rgw.rgw.compute-0.bqunnq.service
             │   ├─libpod-payload-25ff1f079ca6d5d68281bf95dc2311e742798de4e5895710535ed7a1e9682f60
             │   │ ├─100587 /run/podman-init -- /usr/bin/radosgw -n client.rgw.rgw.compute-0.bqunnq -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │   │ └─100589 /usr/bin/radosgw -n client.rgw.rgw.compute-0.bqunnq -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │   └─runtime
             │     └─100585 /usr/bin/conmon --api-version 1 -c 25ff1f079ca6d5d68281bf95dc2311e742798de4e5895710535ed7a1e9682f60 -u 25ff1f079ca6d5d68281bf95dc2311e742798de4e5895710535ed7a1e9682f60 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/25ff1f079ca6d5d68281bf95dc2311e742798de4e5895710535ed7a1e9682f60/userdata -p /run/containers/storage/overlay-containers/25ff1f079ca6d5d68281bf95dc2311e742798de4e5895710535ed7a1e9682f60/userdata/pidfile -n ceph-92cfe4d4-4917-5be1-9d00-73758793a62b-rgw-rgw-compute-0-bqunnq --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/25ff1f079ca6d5d68281bf95dc2311e742798de4e5895710535ed7a1e9682f60 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/25ff1f079ca6d5d68281bf95dc2311e742798de4e5895710535ed7a1e9682f60/userdata/oci-log --conmon-pidfile /run/ceph-92cfe4d4-4917-5be1-9d00-73758793a62b@rgw.rgw.compute-0.bqunnq.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 25ff1f079ca6d5d68281bf95dc2311e742798de4e5895710535ed7a1e9682f60
             ├─system-getty.slice
             │ └─getty@tty1.service
             │   └─1008 /sbin/agetty -o "-p -- \\u" --noclear - linux
             ├─system-serial\x2dgetty.slice
             │ └─serial-getty@ttyS0.service
             │   └─1009 /sbin/agetty -o "-p -- \\u" --keep-baud 115200,57600,38400,9600 - vt220
             ├─systemd-hostnamed.service
             │ └─313130 /usr/lib/systemd/systemd-hostnamed
             ├─systemd-journald.service
             │ └─674 /usr/lib/systemd/systemd-journald
             ├─systemd-logind.service
             │ └─782 /usr/lib/systemd/systemd-logind
             ├─systemd-machined.service
             │ └─215071 /usr/lib/systemd/systemd-machined
             ├─systemd-udevd.service
             │ └─udev
             │   └─729 /usr/lib/systemd/systemd-udevd
             ├─tuned.service
             │ └─112210 /usr/bin/python3 -Es /usr/sbin/tuned -l -P
             ├─virtlogd.service
             │ └─214444 /usr/sbin/virtlogd
             ├─virtnodedevd.service
             │ └─260415 /usr/sbin/virtnodedevd --timeout 120
             ├─virtqemud.service
             │ └─259861 /usr/sbin/virtqemud --timeout 120
             └─virtsecretd.service
               └─267673 /usr/sbin/virtsecretd --timeout 120

Oct 11 04:10:25 compute-0 podman[314596]: 2025-10-11 04:10:25.001991533 +0000 UTC m=+0.064375184 container health_status 8b003d65c8e439e280409825aa37dacfb921ffdd0ada54278b9746654fdc0aa8 (image=quay.io/podified-antelope-centos9/openstack-multipathd:current-podified, name=multipathd, health_status=healthy, health_failing_streak=0, health_log=, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/multipathd', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-multipathd:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/multipathd.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run/udev:/run/udev', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:ro', '/var/lib/iscsi:/var/lib/iscsi:z', '/etc/multipath:/etc/multipath:z', '/etc/multipath.conf:/etc/multipath.conf:ro', '/var/lib/openstack/healthchecks/multipathd:/openstack:ro,z']}, config_id=multipathd, container_name=multipathd, io.buildah.version=1.41.3, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, org.label-schema.build-date=20251009, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, tcib_managed=true)
Oct 11 04:10:25 compute-0 nova_compute[260089]: 2025-10-11 04:10:25.692 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263[00m
Oct 11 04:10:26 compute-0 nova_compute[260089]: 2025-10-11 04:10:26.379 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263[00m
Oct 11 04:10:30 compute-0 nova_compute[260089]: 2025-10-11 04:10:30.693 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263[00m
Oct 11 04:10:31 compute-0 nova_compute[260089]: 2025-10-11 04:10:31.381 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263[00m
Oct 11 04:10:33 compute-0 podman[316049]: 2025-10-11 04:10:33.020474025 +0000 UTC m=+0.080446587 container health_status 65c3a3d72e1cba3c83fc771a841564f690b47cc0f5012ce0acf16e2d9f8e3fe2 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, maintainer=OpenStack Kubernetes Operator team, org.label-schema.schema-version=1.0, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, tcib_managed=true, io.buildah.version=1.41.3, managed_by=edpm_ansible, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, config_id=ovn_controller, container_name=ovn_controller, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS)
Oct 11 04:10:35 compute-0 nova_compute[260089]: 2025-10-11 04:10:35.695 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263[00m
Oct 11 04:10:36 compute-0 nova_compute[260089]: 2025-10-11 04:10:36.384 2 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263[00m
Oct 11 04:10:37 compute-0 virtqemud[259861]: Failed to connect socket to '/var/run/libvirt/virtstoraged-sock-ro': No such file or directory
Oct 11 04:10:38 compute-0 podman[316458]: 2025-10-11 04:10:38.127409692 +0000 UTC m=+0.060110525 container health_status dd5285a58cbe29a90687a00af14b934b599bb4de55df5857e4d7b7ac9f22feab (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=ovn_metadata_agent, health_status=healthy, health_failing_streak=0, health_log=, config_id=ovn_metadata_agent, org.label-schema.vendor=CentOS, config_data={'cgroupns': 'host', 'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'EDPM_CONFIG_HASH': '0823bd3e096c75f72e4a95820d41b0d4b6a1172bd2892ddb9f29b788a11bc87d'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_metadata_agent', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified', 'net': 'host', 'pid': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/run/openvswitch:/run/openvswitch:z', '/var/lib/config-data/ansible-generated/neutron-ovn-metadata-agent:/etc/neutron.conf.d:z', '/run/netns:/run/netns:shared', '/var/lib/kolla/config_files/ovn_metadata_agent.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/neutron:/var/lib/neutron:shared,z', '/var/lib/neutron/ovn_metadata_haproxy_wrapper:/usr/local/bin/haproxy:ro', '/var/lib/neutron/kill_scripts:/etc/neutron/kill_scripts:ro', '/var/lib/openstack/cacerts/neutron-metadata/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_metadata_agent:/openstack:ro,z']}, container_name=ovn_metadata_agent, managed_by=edpm_ansible, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, org.label-schema.build-date=20251009, org.label-schema.license=GPLv2, tcib_managed=true)

● user-1000.slice - User Slice of UID 1000
     Loaded: loaded
    Drop-In: /usr/lib/systemd/system/user-.slice.d
             └─10-defaults.conf
     Active: active since Sat 2025-10-11 02:42:35 UTC; 1h 28min ago
      Until: Sat 2025-10-11 02:42:35 UTC; 1h 28min ago
       Docs: man:user@.service(5)
         IO: 711.9M read, 7.8G written
      Tasks: 18 (limit: 20036)
     Memory: 1.4G (peak: 4.1G)
        CPU: 23min 19.667s
     CGroup: /user.slice/user-1000.slice
             ├─session-1.scope
             │ └─1267 /usr/bin/python3
             ├─session-53.scope
             │ ├─308926 "sshd-session: zuul [priv]"
             │ ├─308929 "sshd-session: zuul@notty"
             │ ├─308930 sudo bash -c "rm -rf /var/tmp/sos-osp && mkdir /var/tmp/sos-osp && sos report --batch --all-logs --tmp-dir=/var/tmp/sos-osp -p container,openstack_edpm,system,storage,virt"
             │ ├─308954 /usr/bin/python3 -s /sbin/sos report --batch --all-logs --tmp-dir=/var/tmp/sos-osp -p container,openstack_edpm,system,storage,virt
             │ ├─316407 timeout 300s systemctl status --all
             │ ├─316408 systemctl status --all
             │ ├─316550 timeout --foreground 300s virsh -r nodedev-dumpxml scsi_host0
             │ └─316551 virsh -r nodedev-dumpxml scsi_host0
             └─user@1000.service
               ├─app.slice
               │ └─dbus-broker.service
               │   ├─9030 /usr/bin/dbus-broker-launch --scope user
               │   └─9042 dbus-broker --log 4 --controller 9 --machine-id a1727ec20198bc6caf436a6e13c4ff5e --max-bytes 100000000000000 --max-fds 25000000000000 --max-matches 5000000000
               ├─init.scope
               │ ├─1057 /usr/lib/systemd/systemd --user
               │ └─1059 "(sd-pam)"
               └─user.slice
                 └─podman-pause-31202ca3.scope
                   └─8928 catatonit -P

Oct 11 03:39:21 compute-0 python3.9[260252]: ansible-containers.podman.podman_container Invoked with name=nova_compute_init state=started executable=podman detach=True debug=False force_restart=False force_delete=True generate_systemd={} image_strict=False recreate=False image=None annotation=None arch=None attach=None authfile=None blkio_weight=None blkio_weight_device=None cap_add=None cap_drop=None cgroup_conf=None cgroup_parent=None cgroupns=None cgroups=None chrootdirs=None cidfile=None cmd_args=None conmon_pidfile=None command=None cpu_period=None cpu_quota=None cpu_rt_period=None cpu_rt_runtime=None cpu_shares=None cpus=None cpuset_cpus=None cpuset_mems=None decryption_key=None delete_depend=None delete_time=None delete_volumes=None detach_keys=None device=None device_cgroup_rule=None device_read_bps=None device_read_iops=None device_write_bps=None device_write_iops=None dns=None dns_option=None dns_search=None entrypoint=None env=None env_file=None env_host=None env_merge=None etc_hosts=None expose=None gidmap=None gpus=None group_add=None group_entry=None healthcheck=None healthcheck_interval=None healthcheck_retries=None healthcheck_start_period=None health_startup_cmd=None health_startup_interval=None health_startup_retries=None health_startup_success=None health_startup_timeout=None healthcheck_timeout=None healthcheck_failure_action=None hooks_dir=None hostname=None hostuser=None http_proxy=None image_volume=None init=None init_ctr=None init_path=None interactive=None ip=None ip6=None ipc=None kernel_memory=None label=None label_file=None log_driver=None log_level=None log_opt=None mac_address=None memory=None memory_reservation=None memory_swap=None memory_swappiness=None mount=None network=None network_aliases=None no_healthcheck=None no_hosts=None oom_kill_disable=None oom_score_adj=None os=None passwd=None passwd_entry=None personality=None pid=None pid_file=None pids_limit=None platform=None pod=None pod_id_file=None preserve_fd=None preserve_fds=None privileged=None publish=None publish_all=None pull=None quadlet_dir=None quadlet_filename=None quadlet_file_mode=None quadlet_options=None rdt_class=None read_only=None read_only_tmpfs=None requires=None restart_policy=None restart_time=None retry=None retry_delay=None rm=None rmi=None rootfs=None seccomp_policy=None secrets=NOT_LOGGING_PARAMETER sdnotify=None security_opt=None shm_size=None shm_size_systemd=None sig_proxy=None stop_signal=None stop_timeout=None stop_time=None subgidname=None subuidname=None sysctl=None systemd=None timeout=None timezone=None tls_verify=None tmpfs=None tty=None uidmap=None ulimit=None umask=None unsetenv=None unsetenv_all=None user=None userns=None uts=None variant=None volume=None volumes_from=None workdir=None
Oct 11 03:39:22 compute-0 podman[260277]: 2025-10-11 03:39:22.07577285 +0000 UTC m=+0.140414305 container init 496cf2c6a410baa100fe0ea9fd6c8bb42fe073ff3b6903246928d7a1d9d82d47 (image=quay.io/podified-antelope-centos9/openstack-nova-compute:current-podified, name=nova_compute_init, org.label-schema.vendor=CentOS, io.buildah.version=1.41.3, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f, config_id=edpm, container_name=nova_compute_init, managed_by=edpm_ansible, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, tcib_managed=true, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-nova-compute:current-podified', 'privileged': False, 'user': 'root', 'restart': 'never', 'command': 'bash -c $* -- eval python3 /sbin/nova_statedir_ownership.py | logger -t nova_compute_init', 'net': 'none', 'security_opt': ['label=disable'], 'detach': False, 'environment': {'NOVA_STATEDIR_OWNERSHIP_SKIP': '/var/lib/nova/compute_id', '__OS_DEBUG': False}, 'volumes': ['/dev/log:/dev/log', '/var/lib/nova:/var/lib/nova:shared', '/var/lib/_nova_secontext:/var/lib/_nova_secontext:shared,z', '/var/lib/openstack/config/nova/nova_statedir_ownership.py:/sbin/nova_statedir_ownership.py:z']}, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251009, org.label-schema.name=CentOS Stream 9 Base Image)
Oct 11 03:39:22 compute-0 podman[260277]: 2025-10-11 03:39:22.084977666 +0000 UTC m=+0.149619111 container start 496cf2c6a410baa100fe0ea9fd6c8bb42fe073ff3b6903246928d7a1d9d82d47 (image=quay.io/podified-antelope-centos9/openstack-nova-compute:current-podified, name=nova_compute_init, config_id=edpm, container_name=nova_compute_init, managed_by=edpm_ansible, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, tcib_managed=true, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-nova-compute:current-podified', 'privileged': False, 'user': 'root', 'restart': 'never', 'command': 'bash -c $* -- eval python3 /sbin/nova_statedir_ownership.py | logger -t nova_compute_init', 'net': 'none', 'security_opt': ['label=disable'], 'detach': False, 'environment': {'NOVA_STATEDIR_OWNERSHIP_SKIP': '/var/lib/nova/compute_id', '__OS_DEBUG': False}, 'volumes': ['/dev/log:/dev/log', '/var/lib/nova:/var/lib/nova:shared', '/var/lib/_nova_secontext:/var/lib/_nova_secontext:shared,z', '/var/lib/openstack/config/nova/nova_statedir_ownership.py:/sbin/nova_statedir_ownership.py:z']}, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251009, org.label-schema.vendor=CentOS, io.buildah.version=1.41.3, org.label-schema.license=GPLv2, tcib_build_tag=c4b77291aeca5591ac860bd4127cec2f)
Oct 11 03:39:22 compute-0 python3.9[260252]: ansible-containers.podman.podman_container PODMAN-CONTAINER-DEBUG: podman start nova_compute_init
Oct 11 03:39:22 compute-0 sudo[260250]: pam_unix(sudo:session): session closed for user root
Oct 11 03:39:22 compute-0 sshd-session[221991]: Connection closed by 192.168.122.30 port 43918
Oct 11 03:39:22 compute-0 sshd-session[221988]: pam_unix(sshd:session): session closed for user zuul
Oct 11 04:09:47 compute-0 sudo[308930]:     zuul : PWD=/home/zuul ; USER=root ; COMMAND=/bin/bash -c 'rm -rf /var/tmp/sos-osp && mkdir /var/tmp/sos-osp && sos report --batch --all-logs --tmp-dir=/var/tmp/sos-osp -p container,openstack_edpm,system,storage,virt'
Oct 11 04:09:47 compute-0 sudo[308930]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Oct 11 04:09:57 compute-0 ovs-vsctl[309342]: ovs|00001|db_ctl_base|ERR|no key "dpdk-init" in Open_vSwitch record "." column other_config

● user-42477.slice - User Slice of UID 42477
     Loaded: loaded
    Drop-In: /usr/lib/systemd/system/user-.slice.d
             └─10-defaults.conf
     Active: active since Sat 2025-10-11 03:19:33 UTC; 51min ago
      Until: Sat 2025-10-11 03:19:33 UTC; 51min ago
       Docs: man:user@.service(5)
         IO: 2.9M read, 170.4M written
      Tasks: 26 (limit: 20036)
     Memory: 28.2M (peak: 90.5M)
        CPU: 4min 39.784s
     CGroup: /user.slice/user-42477.slice
             ├─session-21.scope
             │ ├─75886 "sshd-session: ceph-admin [priv]"
             │ └─75908 "sshd-session: ceph-admin"
             ├─session-23.scope
             │ ├─75893 "sshd-session: ceph-admin [priv]"
             │ └─75909 "sshd-session: ceph-admin@notty"
             ├─session-24.scope
             │ ├─75960 "sshd-session: ceph-admin [priv]"
             │ └─75963 "sshd-session: ceph-admin@notty"
             ├─session-25.scope
             │ ├─76014 "sshd-session: ceph-admin [priv]"
             │ └─76017 "sshd-session: ceph-admin@notty"
             ├─session-26.scope
             │ ├─76068 "sshd-session: ceph-admin [priv]"
             │ └─76071 "sshd-session: ceph-admin@notty"
             ├─session-27.scope
             │ ├─76122 "sshd-session: ceph-admin [priv]"
             │ └─76125 "sshd-session: ceph-admin@notty"
             ├─session-28.scope
             │ ├─76176 "sshd-session: ceph-admin [priv]"
             │ └─76179 "sshd-session: ceph-admin@notty"
             ├─session-29.scope
             │ ├─76230 "sshd-session: ceph-admin [priv]"
             │ └─76233 "sshd-session: ceph-admin@notty"
             ├─session-30.scope
             │ ├─76284 "sshd-session: ceph-admin [priv]"
             │ └─76287 "sshd-session: ceph-admin@notty"
             ├─session-31.scope
             │ ├─76338 "sshd-session: ceph-admin [priv]"
             │ └─76341 "sshd-session: ceph-admin@notty"
             ├─session-32.scope
             │ ├─76365 "sshd-session: ceph-admin [priv]"
             │ └─76368 "sshd-session: ceph-admin@notty"
             ├─session-33.scope
             │ ├─76419 "sshd-session: ceph-admin [priv]"
             │ └─76422 "sshd-session: ceph-admin@notty"
             └─user@42477.service
               └─init.scope
                 ├─75890 /usr/lib/systemd/systemd --user
                 └─75892 "(sd-pam)"

Oct 11 04:10:14 compute-0 podman[312731]: 2025-10-11 04:10:14.216724725 +0000 UTC m=+0.255550979 container attach 62934323c7dacd31d940284e1901d7f46aeff7a5d1c2a6cf475415517bf1f78a (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=loving_cannon, org.label-schema.vendor=CentOS, io.buildah.version=1.39.3, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad)
Oct 11 04:10:15 compute-0 podman[312731]: 2025-10-11 04:10:15.227679315 +0000 UTC m=+1.266505519 container died 62934323c7dacd31d940284e1901d7f46aeff7a5d1c2a6cf475415517bf1f78a (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=loving_cannon, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, org.label-schema.build-date=20250507, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default)
Oct 11 04:10:15 compute-0 podman[312731]: 2025-10-11 04:10:15.406281947 +0000 UTC m=+1.445108131 container remove 62934323c7dacd31d940284e1901d7f46aeff7a5d1c2a6cf475415517bf1f78a (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=loving_cannon, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, CEPH_REF=reef, ceph=True, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Oct 11 04:10:15 compute-0 sudo[312558]: pam_unix(sudo:session): session closed for user root
Oct 11 04:10:15 compute-0 sudo[312933]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Oct 11 04:10:15 compute-0 sudo[312933]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 04:10:15 compute-0 sudo[312933]: pam_unix(sudo:session): session closed for user root
Oct 11 04:10:15 compute-0 sudo[312966]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/ls /etc/sysctl.d
Oct 11 04:10:15 compute-0 sudo[312966]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Oct 11 04:10:15 compute-0 sudo[312966]: pam_unix(sudo:session): session closed for user root

● user.slice - User and Session Slice
     Loaded: loaded (/usr/lib/systemd/system/user.slice; static)
     Active: active since Sat 2025-10-11 02:42:17 UTC; 1h 28min ago
      Until: Sat 2025-10-11 02:42:17 UTC; 1h 28min ago
       Docs: man:systemd.special(7)
         IO: 715.8M read, 8.0G written
      Tasks: 43
     Memory: 1.4G (peak: 4.1G)
        CPU: 28min 478ms
     CGroup: /user.slice
             ├─user-1000.slice
             │ ├─session-1.scope
             │ │ └─1267 /usr/bin/python3
             │ ├─session-53.scope
             │ │ ├─308926 "sshd-session: zuul [priv]"
             │ │ ├─308929 "sshd-session: zuul@notty"
             │ │ ├─308930 sudo bash -c "rm -rf /var/tmp/sos-osp && mkdir /var/tmp/sos-osp && sos report --batch --all-logs --tmp-dir=/var/tmp/sos-osp -p container,openstack_edpm,system,storage,virt"
             │ │ ├─308954 /usr/bin/python3 -s /sbin/sos report --batch --all-logs --tmp-dir=/var/tmp/sos-osp -p container,openstack_edpm,system,storage,virt
             │ │ ├─316407 timeout 300s systemctl status --all
             │ │ └─316408 systemctl status --all
             │ └─user@1000.service
             │   ├─app.slice
             │   │ └─dbus-broker.service
             │   │   ├─9030 /usr/bin/dbus-broker-launch --scope user
             │   │   └─9042 dbus-broker --log 4 --controller 9 --machine-id a1727ec20198bc6caf436a6e13c4ff5e --max-bytes 100000000000000 --max-fds 25000000000000 --max-matches 5000000000
             │   ├─init.scope
             │   │ ├─1057 /usr/lib/systemd/systemd --user
             │   │ └─1059 "(sd-pam)"
             │   └─user.slice
             │     └─podman-pause-31202ca3.scope
             │       └─8928 catatonit -P
             └─user-42477.slice
               ├─session-21.scope
               │ ├─75886 "sshd-session: ceph-admin [priv]"
               │ └─75908 "sshd-session: ceph-admin"
               ├─session-23.scope
               │ ├─75893 "sshd-session: ceph-admin [priv]"
               │ └─75909 "sshd-session: ceph-admin@notty"
               ├─session-24.scope
               │ ├─75960 "sshd-session: ceph-admin [priv]"
               │ └─75963 "sshd-session: ceph-admin@notty"
               ├─session-25.scope
               │ ├─76014 "sshd-session: ceph-admin [priv]"
               │ └─76017 "sshd-session: ceph-admin@notty"
               ├─session-26.scope
               │ ├─76068 "sshd-session: ceph-admin [priv]"
               │ └─76071 "sshd-session: ceph-admin@notty"
               ├─session-27.scope
               │ ├─76122 "sshd-session: ceph-admin [priv]"
               │ └─76125 "sshd-session: ceph-admin@notty"
               ├─session-28.scope
               │ ├─76176 "sshd-session: ceph-admin [priv]"
               │ └─76179 "sshd-session: ceph-admin@notty"
               ├─session-29.scope
               │ ├─76230 "sshd-session: ceph-admin [priv]"
               │ └─76233 "sshd-session: ceph-admin@notty"
               ├─session-30.scope
               │ ├─76284 "sshd-session: ceph-admin [priv]"
               │ └─76287 "sshd-session: ceph-admin@notty"
               ├─session-31.scope
               │ ├─76338 "sshd-session: ceph-admin [priv]"
               │ └─76341 "sshd-session: ceph-admin@notty"
               ├─session-32.scope
               │ ├─76365 "sshd-session: ceph-admin [priv]"
               │ └─76368 "sshd-session: ceph-admin@notty"
               ├─session-33.scope
               │ ├─76419 "sshd-session: ceph-admin [priv]"
               │ └─76422 "sshd-session: ceph-admin@notty"
               └─user@42477.service
                 └─init.scope
                   ├─75890 /usr/lib/systemd/systemd --user
                   └─75892 "(sd-pam)"

● dbus.socket - D-Bus System Message Bus Socket
     Loaded: loaded (/usr/lib/systemd/system/dbus.socket; enabled; preset: enabled)
     Active: active (running) since Sat 2025-10-11 02:42:18 UTC; 1h 28min ago
      Until: Sat 2025-10-11 02:42:18 UTC; 1h 28min ago
   Triggers: ● dbus-broker.service
     Listen: /run/dbus/system_bus_socket (Stream)
     CGroup: /system.slice/dbus.socket

Oct 11 02:42:18 localhost systemd[1]: Listening on D-Bus System Message Bus Socket.

● dm-event.socket - Device-mapper event daemon FIFOs
     Loaded: loaded (/usr/lib/systemd/system/dm-event.socket; enabled; preset: enabled)
     Active: active (listening) since Sat 2025-10-11 03:08:56 UTC; 1h 1min ago
      Until: Sat 2025-10-11 03:08:56 UTC; 1h 1min ago
   Triggers: ● dm-event.service
       Docs: man:dmeventd(8)
     Listen: /run/dmeventd-server (FIFO)
             /run/dmeventd-client (FIFO)
     CGroup: /system.slice/dm-event.socket

Oct 11 03:08:56 compute-0 systemd[1]: Listening on Device-mapper event daemon FIFOs.

○ iscsid.socket - Open-iSCSI iscsid Socket
     Loaded: loaded (/usr/lib/systemd/system/iscsid.socket; disabled; preset: enabled)
     Active: inactive (dead)
   Triggers: ● iscsid.service
       Docs: man:iscsid(8)
             man:iscsiadm(8)
     Listen: @ISCSIADM_ABSTRACT_NAMESPACE (Stream)

○ iscsiuio.socket - Open-iSCSI iscsiuio Socket
     Loaded: loaded (/usr/lib/systemd/system/iscsiuio.socket; enabled; preset: enabled)
     Active: inactive (dead)
   Triggers: ● iscsiuio.service
       Docs: man:iscsiuio(8)
     Listen: @ISCSID_UIP_ABSTRACT_NAMESPACE (Stream)

○ libvirtd-admin.socket - libvirt legacy monolithic daemon admin socket
     Loaded: loaded (/usr/lib/systemd/system/libvirtd-admin.socket; disabled; preset: disabled)
     Active: inactive (dead)
   Triggers: ● libvirtd.service
     Listen: /run/libvirt/libvirt-admin-sock (Stream)

○ libvirtd-ro.socket - libvirt legacy monolithic daemon read-only socket
     Loaded: loaded (/usr/lib/systemd/system/libvirtd-ro.socket; disabled; preset: disabled)
     Active: inactive (dead)
   Triggers: ● libvirtd.service
     Listen: /run/libvirt/libvirt-sock-ro (Stream)

○ libvirtd-tls.socket
     Loaded: masked (Reason: Unit libvirtd-tls.socket is masked.)
     Active: inactive (dead)

○ libvirtd.socket - libvirt legacy monolithic daemon socket
     Loaded: loaded (/usr/lib/systemd/system/libvirtd.socket; disabled; preset: disabled)
     Active: inactive (dead)
   Triggers: ● libvirtd.service
     Listen: /run/libvirt/libvirt-sock (Stream)

● lvm2-lvmpolld.socket - LVM2 poll daemon socket
     Loaded: loaded (/usr/lib/systemd/system/lvm2-lvmpolld.socket; enabled; preset: enabled)
     Active: active (listening) since Sat 2025-10-11 03:08:58 UTC; 1h 1min ago
      Until: Sat 2025-10-11 03:08:58 UTC; 1h 1min ago
   Triggers: ● lvm2-lvmpolld.service
       Docs: man:lvmpolld(8)
     Listen: /run/lvm/lvmpolld.socket (Stream)
     CGroup: /system.slice/lvm2-lvmpolld.socket

Oct 11 03:08:58 compute-0 systemd[1]: Listening on LVM2 poll daemon socket.

● rpcbind.socket - RPCbind Server Activation Socket
     Loaded: loaded (/usr/lib/systemd/system/rpcbind.socket; enabled; preset: enabled)
     Active: active (running) since Sat 2025-10-11 02:42:17 UTC; 1h 28min ago
      Until: Sat 2025-10-11 02:42:17 UTC; 1h 28min ago
   Triggers: ● rpcbind.service
     Listen: /run/rpcbind.sock (Stream)
             0.0.0.0:111 (Stream)
             0.0.0.0:111 (Datagram)
             [::]:111 (Stream)
             [::]:111 (Datagram)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48573)
     Memory: 16.0K (peak: 284.0K)
        CPU: 4ms
     CGroup: /system.slice/rpcbind.socket

● sssd-kcm.socket - SSSD Kerberos Cache Manager responder socket
     Loaded: loaded (/usr/lib/systemd/system/sssd-kcm.socket; enabled; preset: enabled)
     Active: active (listening) since Sat 2025-10-11 02:42:18 UTC; 1h 28min ago
      Until: Sat 2025-10-11 02:42:18 UTC; 1h 28min ago
   Triggers: ● sssd-kcm.service
       Docs: man:sssd-kcm(8)
     Listen: /run/.heim_org.h5l.kcm-socket (Stream)
     CGroup: /system.slice/sssd-kcm.socket

Oct 11 02:42:18 localhost systemd[1]: Listening on SSSD Kerberos Cache Manager responder socket.

○ syslog.socket - Syslog Socket
     Loaded: loaded (/usr/lib/systemd/system/syslog.socket; static)
     Active: inactive (dead)
   Triggers: ● syslog.service
       Docs: man:systemd.special(7)
             https://www.freedesktop.org/wiki/Software/systemd/syslog
     Listen: /run/systemd/journal/syslog (Datagram)

● systemd-coredump.socket - Process Core Dump Socket
     Loaded: loaded (/usr/lib/systemd/system/systemd-coredump.socket; static)
     Active: active (listening) since Sat 2025-10-11 02:42:17 UTC; 1h 28min ago
      Until: Sat 2025-10-11 02:42:17 UTC; 1h 28min ago
       Docs: man:systemd-coredump(8)
     Listen: /run/systemd/coredump (SequentialPacket)
   Accepted: 1; Connected: 0;
     CGroup: /system.slice/systemd-coredump.socket

● systemd-initctl.socket - initctl Compatibility Named Pipe
     Loaded: loaded (/usr/lib/systemd/system/systemd-initctl.socket; static)
     Active: active (listening) since Sat 2025-10-11 02:42:17 UTC; 1h 28min ago
      Until: Sat 2025-10-11 02:42:17 UTC; 1h 28min ago
   Triggers: ● systemd-initctl.service
       Docs: man:systemd-initctl.socket(8)
     Listen: /run/initctl (FIFO)
     CGroup: /system.slice/systemd-initctl.socket

● systemd-journald-dev-log.socket - Journal Socket (/dev/log)
     Loaded: loaded (/usr/lib/systemd/system/systemd-journald-dev-log.socket; static)
     Active: active (running) since Sat 2025-10-11 02:42:14 UTC; 1h 28min ago
      Until: Sat 2025-10-11 02:42:14 UTC; 1h 28min ago
   Triggers: ● systemd-journald.service
       Docs: man:systemd-journald.service(8)
             man:journald.conf(5)
     Listen: /run/systemd/journal/dev-log (Datagram)
     CGroup: /system.slice/systemd-journald-dev-log.socket

Notice: journal has been rotated since unit was started, output may be incomplete.

● systemd-journald.socket - Journal Socket
     Loaded: loaded (/usr/lib/systemd/system/systemd-journald.socket; static)
     Active: active (running) since Sat 2025-10-11 02:42:14 UTC; 1h 28min ago
      Until: Sat 2025-10-11 02:42:14 UTC; 1h 28min ago
   Triggers: ● systemd-journald.service
       Docs: man:systemd-journald.service(8)
             man:journald.conf(5)
     Listen: /run/systemd/journal/socket (Datagram)
             /run/systemd/journal/stdout (Stream)
     CGroup: /system.slice/systemd-journald.socket

Notice: journal has been rotated since unit was started, output may be incomplete.

● systemd-rfkill.socket - Load/Save RF Kill Switch Status /dev/rfkill Watch
     Loaded: loaded (/usr/lib/systemd/system/systemd-rfkill.socket; static)
     Active: active (listening) since Sat 2025-10-11 02:42:19 UTC; 1h 28min ago
      Until: Sat 2025-10-11 02:42:19 UTC; 1h 28min ago
   Triggers: ● systemd-rfkill.service
       Docs: man:systemd-rfkill.socket(8)
     Listen: /dev/rfkill (Special)
     CGroup: /system.slice/systemd-rfkill.socket

Oct 11 02:42:19 np0005480824.novalocal systemd[1]: Listening on Load/Save RF Kill Switch Status /dev/rfkill Watch.

● systemd-udevd-control.socket - udev Control Socket
     Loaded: loaded (/usr/lib/systemd/system/systemd-udevd-control.socket; static)
     Active: active (running) since Sat 2025-10-11 02:42:17 UTC; 1h 28min ago
      Until: Sat 2025-10-11 02:42:17 UTC; 1h 28min ago
   Triggers: ● systemd-udevd.service
       Docs: man:systemd-udevd-control.socket(8)
             man:udev(7)
     Listen: /run/udev/control (SequentialPacket)
     CGroup: /system.slice/systemd-udevd-control.socket

● systemd-udevd-kernel.socket - udev Kernel Socket
     Loaded: loaded (/usr/lib/systemd/system/systemd-udevd-kernel.socket; static)
     Active: active (running) since Sat 2025-10-11 02:42:17 UTC; 1h 28min ago
      Until: Sat 2025-10-11 02:42:17 UTC; 1h 28min ago
   Triggers: ● systemd-udevd.service
       Docs: man:systemd-udevd-kernel.socket(8)
             man:udev(7)
     Listen: kobject-uevent 1 (Netlink)
     CGroup: /system.slice/systemd-udevd-kernel.socket

○ virtinterfaced-admin.socket - libvirt interface daemon admin socket
     Loaded: loaded (/usr/lib/systemd/system/virtinterfaced-admin.socket; enabled; preset: enabled)
     Active: inactive (dead)
   Triggers: ● virtinterfaced.service
     Listen: /run/libvirt/virtinterfaced-admin-sock (Stream)

○ virtinterfaced-ro.socket - libvirt interface daemon read-only socket
     Loaded: loaded (/usr/lib/systemd/system/virtinterfaced-ro.socket; enabled; preset: enabled)
     Active: inactive (dead)
   Triggers: ● virtinterfaced.service
     Listen: /run/libvirt/virtinterfaced-sock-ro (Stream)

○ virtinterfaced.socket - libvirt interface daemon socket
     Loaded: loaded (/usr/lib/systemd/system/virtinterfaced.socket; enabled; preset: enabled)
     Active: inactive (dead)
   Triggers: ● virtinterfaced.service
     Listen: /run/libvirt/virtinterfaced-sock (Stream)

○ virtlockd-admin.socket - libvirt locking daemon admin socket
     Loaded: loaded (/usr/lib/systemd/system/virtlockd-admin.socket; enabled; preset: enabled)
     Active: inactive (dead)
   Triggers: ● virtlockd.service
     Listen: /run/libvirt/virtlockd-admin-sock (Stream)

● virtlockd.socket - libvirt locking daemon socket
     Loaded: loaded (/usr/lib/systemd/system/virtlockd.socket; enabled; preset: enabled)
     Active: active (listening) since Sat 2025-10-11 03:34:58 UTC; 35min ago
      Until: Sat 2025-10-11 03:34:58 UTC; 35min ago
   Triggers: ● virtlockd.service
     Listen: /run/libvirt/virtlockd-sock (Stream)
     CGroup: /system.slice/virtlockd.socket

Oct 11 03:34:58 compute-0 systemd[1]: Listening on libvirt locking daemon socket.

● virtlogd-admin.socket - libvirt logging daemon admin socket
     Loaded: loaded (/usr/lib/systemd/system/virtlogd-admin.socket; enabled; preset: enabled)
    Drop-In: /etc/systemd/system/virtlogd-admin.socket.d
             └─override.conf
     Active: active (running) since Sat 2025-10-11 03:34:54 UTC; 35min ago
      Until: Sat 2025-10-11 03:34:54 UTC; 35min ago
   Triggers: ● virtlogd.service
     Listen: /run/libvirt/virtlogd-admin-sock (Stream)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48573)
     Memory: 0B (peak: 512.0K)
        CPU: 5ms
     CGroup: /system.slice/virtlogd-admin.socket

Oct 11 03:34:54 compute-0 systemd[1]: Starting libvirt logging daemon admin socket...
Oct 11 03:34:54 compute-0 systemd[1]: Listening on libvirt logging daemon admin socket.

● virtlogd.socket - libvirt logging daemon socket
     Loaded: loaded (/usr/lib/systemd/system/virtlogd.socket; enabled; preset: enabled)
    Drop-In: /etc/systemd/system/virtlogd.socket.d
             └─override.conf
     Active: active (running) since Sat 2025-10-11 03:34:54 UTC; 35min ago
      Until: Sat 2025-10-11 03:34:54 UTC; 35min ago
   Triggers: ● virtlogd.service
     Listen: /run/libvirt/virtlogd-sock (Stream)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48573)
     Memory: 4.0K (peak: 512.0K)
        CPU: 2ms
     CGroup: /system.slice/virtlogd.socket

Oct 11 03:34:54 compute-0 systemd[1]: Starting libvirt logging daemon socket...
Oct 11 03:34:54 compute-0 systemd[1]: Listening on libvirt logging daemon socket.

○ virtnetworkd-admin.socket - libvirt network daemon admin socket
     Loaded: loaded (/usr/lib/systemd/system/virtnetworkd-admin.socket; enabled; preset: enabled)
     Active: inactive (dead)
   Triggers: ● virtnetworkd.service
     Listen: /run/libvirt/virtnetworkd-admin-sock (Stream)

○ virtnetworkd-ro.socket - libvirt network daemon read-only socket
     Loaded: loaded (/usr/lib/systemd/system/virtnetworkd-ro.socket; enabled; preset: enabled)
     Active: inactive (dead)
   Triggers: ● virtnetworkd.service
     Listen: /run/libvirt/virtnetworkd-sock-ro (Stream)

○ virtnetworkd.socket - libvirt network daemon socket
     Loaded: loaded (/usr/lib/systemd/system/virtnetworkd.socket; enabled; preset: enabled)
     Active: inactive (dead)
   Triggers: ● virtnetworkd.service
     Listen: /run/libvirt/virtnetworkd-sock (Stream)

● virtnodedevd-admin.socket - libvirt nodedev daemon admin socket
     Loaded: loaded (/usr/lib/systemd/system/virtnodedevd-admin.socket; enabled; preset: enabled)
    Drop-In: /etc/systemd/system/virtnodedevd-admin.socket.d
             └─override.conf
     Active: active (running) since Sat 2025-10-11 03:34:55 UTC; 35min ago
      Until: Sat 2025-10-11 03:34:55 UTC; 35min ago
   Triggers: ● virtnodedevd.service
     Listen: /run/libvirt/virtnodedevd-admin-sock (Stream)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48573)
     Memory: 0B (peak: 512.0K)
        CPU: 5ms
     CGroup: /system.slice/virtnodedevd-admin.socket

Oct 11 03:34:55 compute-0 systemd[1]: Starting libvirt nodedev daemon admin socket...
Oct 11 03:34:55 compute-0 systemd[1]: Listening on libvirt nodedev daemon admin socket.

● virtnodedevd-ro.socket - libvirt nodedev daemon read-only socket
     Loaded: loaded (/usr/lib/systemd/system/virtnodedevd-ro.socket; enabled; preset: enabled)
    Drop-In: /etc/systemd/system/virtnodedevd-ro.socket.d
             └─override.conf
     Active: active (running) since Sat 2025-10-11 03:34:55 UTC; 35min ago
      Until: Sat 2025-10-11 03:34:55 UTC; 35min ago
   Triggers: ● virtnodedevd.service
     Listen: /run/libvirt/virtnodedevd-sock-ro (Stream)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48573)
     Memory: 4.0K (peak: 512.0K)
        CPU: 5ms
     CGroup: /system.slice/virtnodedevd-ro.socket

Oct 11 03:34:55 compute-0 systemd[1]: Starting libvirt nodedev daemon read-only socket...
Oct 11 03:34:55 compute-0 systemd[1]: Listening on libvirt nodedev daemon read-only socket.

● virtnodedevd.socket - libvirt nodedev daemon socket
     Loaded: loaded (/usr/lib/systemd/system/virtnodedevd.socket; enabled; preset: enabled)
    Drop-In: /etc/systemd/system/virtnodedevd.socket.d
             └─override.conf
     Active: active (running) since Sat 2025-10-11 03:34:55 UTC; 35min ago
      Until: Sat 2025-10-11 03:34:55 UTC; 35min ago
   Triggers: ● virtnodedevd.service
     Listen: /run/libvirt/virtnodedevd-sock (Stream)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48573)
     Memory: 0B (peak: 512.0K)
        CPU: 3ms
     CGroup: /system.slice/virtnodedevd.socket

Oct 11 03:34:55 compute-0 systemd[1]: Starting libvirt nodedev daemon socket...
Oct 11 03:34:55 compute-0 systemd[1]: Listening on libvirt nodedev daemon socket.

○ virtnwfilterd-admin.socket - libvirt nwfilter daemon admin socket
     Loaded: loaded (/usr/lib/systemd/system/virtnwfilterd-admin.socket; enabled; preset: enabled)
     Active: inactive (dead)
   Triggers: ● virtnwfilterd.service
     Listen: /run/libvirt/virtnwfilterd-admin-sock (Stream)

○ virtnwfilterd-ro.socket - libvirt nwfilter daemon read-only socket
     Loaded: loaded (/usr/lib/systemd/system/virtnwfilterd-ro.socket; enabled; preset: enabled)
     Active: inactive (dead)
   Triggers: ● virtnwfilterd.service
     Listen: /run/libvirt/virtnwfilterd-sock-ro (Stream)

○ virtnwfilterd.socket - libvirt nwfilter daemon socket
     Loaded: loaded (/usr/lib/systemd/system/virtnwfilterd.socket; enabled; preset: enabled)
     Active: inactive (dead)
   Triggers: ● virtnwfilterd.service
     Listen: /run/libvirt/virtnwfilterd-sock (Stream)

● virtproxyd-admin.socket - libvirt proxy daemon admin socket
     Loaded: loaded (/usr/lib/systemd/system/virtproxyd-admin.socket; enabled; preset: enabled)
    Drop-In: /etc/systemd/system/virtproxyd-admin.socket.d
             └─override.conf
     Active: active (listening) since Sat 2025-10-11 03:34:57 UTC; 35min ago
      Until: Sat 2025-10-11 03:34:57 UTC; 35min ago
   Triggers: ● virtproxyd.service
     Listen: /run/libvirt/libvirt-admin-sock (Stream)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48573)
     Memory: 0B (peak: 564.0K)
        CPU: 7ms
     CGroup: /system.slice/virtproxyd-admin.socket

Oct 11 03:34:57 compute-0 systemd[1]: Starting libvirt proxy daemon admin socket...
Oct 11 03:34:57 compute-0 systemd[1]: Listening on libvirt proxy daemon admin socket.

● virtproxyd-ro.socket - libvirt proxy daemon read-only socket
     Loaded: loaded (/usr/lib/systemd/system/virtproxyd-ro.socket; enabled; preset: enabled)
    Drop-In: /etc/systemd/system/virtproxyd-ro.socket.d
             └─override.conf
     Active: active (listening) since Sat 2025-10-11 03:34:57 UTC; 35min ago
      Until: Sat 2025-10-11 03:34:57 UTC; 35min ago
   Triggers: ● virtproxyd.service
     Listen: /run/libvirt/libvirt-sock-ro (Stream)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48573)
     Memory: 0B (peak: 672.0K)
        CPU: 3ms
     CGroup: /system.slice/virtproxyd-ro.socket

Oct 11 03:34:57 compute-0 systemd[1]: Starting libvirt proxy daemon read-only socket...
Oct 11 03:34:57 compute-0 systemd[1]: Listening on libvirt proxy daemon read-only socket.

● virtproxyd-tls.socket - libvirt proxy daemon TLS IP socket
     Loaded: loaded (/usr/lib/systemd/system/virtproxyd-tls.socket; enabled; preset: disabled)
     Active: active (listening) since Sat 2025-10-11 03:33:48 UTC; 36min ago
      Until: Sat 2025-10-11 03:33:48 UTC; 36min ago
   Triggers: ● virtproxyd.service
     Listen: [::]:16514 (Stream)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48573)
     Memory: 8.0K (peak: 256.0K)
        CPU: 1ms
     CGroup: /system.slice/virtproxyd-tls.socket

Oct 11 03:33:48 compute-0 systemd[1]: Listening on libvirt proxy daemon TLS IP socket.

● virtproxyd.socket - libvirt proxy daemon socket
     Loaded: loaded (/usr/lib/systemd/system/virtproxyd.socket; enabled; preset: enabled)
    Drop-In: /etc/systemd/system/virtproxyd.socket.d
             └─override.conf
     Active: active (listening) since Sat 2025-10-11 03:33:48 UTC; 36min ago
      Until: Sat 2025-10-11 03:33:48 UTC; 36min ago
   Triggers: ● virtproxyd.service
     Listen: /run/libvirt/libvirt-sock (Stream)
     CGroup: /system.slice/virtproxyd.socket

Oct 11 03:33:48 compute-0 systemd[1]: Listening on libvirt proxy daemon socket.

● virtqemud-admin.socket - libvirt QEMU daemon admin socket
     Loaded: loaded (/usr/lib/systemd/system/virtqemud-admin.socket; enabled; preset: enabled)
    Drop-In: /etc/systemd/system/virtqemud-admin.socket.d
             └─override.conf
     Active: active (running) since Sat 2025-10-11 03:34:58 UTC; 35min ago
      Until: Sat 2025-10-11 03:34:58 UTC; 35min ago
   Triggers: ● virtqemud.service
     Listen: /run/libvirt/virtqemud-admin-sock (Stream)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48573)
     Memory: 0B (peak: 548.0K)
        CPU: 4ms
     CGroup: /system.slice/virtqemud-admin.socket

Oct 11 03:34:58 compute-0 systemd[1]: Starting libvirt QEMU daemon admin socket...
Oct 11 03:34:58 compute-0 systemd[1]: Listening on libvirt QEMU daemon admin socket.

● virtqemud-ro.socket - libvirt QEMU daemon read-only socket
     Loaded: loaded (/usr/lib/systemd/system/virtqemud-ro.socket; enabled; preset: enabled)
    Drop-In: /etc/systemd/system/virtqemud-ro.socket.d
             └─override.conf
     Active: active (running) since Sat 2025-10-11 03:34:58 UTC; 35min ago
      Until: Sat 2025-10-11 03:34:58 UTC; 35min ago
   Triggers: ● virtqemud.service
     Listen: /run/libvirt/virtqemud-sock-ro (Stream)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48573)
     Memory: 0B (peak: 512.0K)
        CPU: 2ms
     CGroup: /system.slice/virtqemud-ro.socket

Oct 11 03:34:58 compute-0 systemd[1]: Starting libvirt QEMU daemon read-only socket...
Oct 11 03:34:58 compute-0 systemd[1]: Listening on libvirt QEMU daemon read-only socket.

● virtqemud.socket - libvirt QEMU daemon socket
     Loaded: loaded (/usr/lib/systemd/system/virtqemud.socket; enabled; preset: enabled)
    Drop-In: /etc/systemd/system/virtqemud.socket.d
             └─override.conf
     Active: active (running) since Sat 2025-10-11 03:34:58 UTC; 35min ago
      Until: Sat 2025-10-11 03:34:58 UTC; 35min ago
   Triggers: ● virtqemud.service
     Listen: /run/libvirt/virtqemud-sock (Stream)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48573)
     Memory: 0B (peak: 512.0K)
        CPU: 3ms
     CGroup: /system.slice/virtqemud.socket

Oct 11 03:34:58 compute-0 systemd[1]: Starting libvirt QEMU daemon socket...
Oct 11 03:34:58 compute-0 systemd[1]: Listening on libvirt QEMU daemon socket.

● virtsecretd-admin.socket - libvirt secret daemon admin socket
     Loaded: loaded (/usr/lib/systemd/system/virtsecretd-admin.socket; enabled; preset: enabled)
    Drop-In: /etc/systemd/system/virtsecretd-admin.socket.d
             └─override.conf
     Active: active (running) since Sat 2025-10-11 03:34:59 UTC; 35min ago
      Until: Sat 2025-10-11 03:34:59 UTC; 35min ago
   Triggers: ● virtsecretd.service
     Listen: /run/libvirt/virtsecretd-admin-sock (Stream)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48573)
     Memory: 4.0K (peak: 564.0K)
        CPU: 5ms
     CGroup: /system.slice/virtsecretd-admin.socket

Oct 11 03:34:59 compute-0 systemd[1]: Starting libvirt secret daemon admin socket...
Oct 11 03:34:59 compute-0 systemd[1]: Listening on libvirt secret daemon admin socket.

● virtsecretd-ro.socket - libvirt secret daemon read-only socket
     Loaded: loaded (/usr/lib/systemd/system/virtsecretd-ro.socket; enabled; preset: enabled)
    Drop-In: /etc/systemd/system/virtsecretd-ro.socket.d
             └─override.conf
     Active: active (running) since Sat 2025-10-11 03:34:59 UTC; 35min ago
      Until: Sat 2025-10-11 03:34:59 UTC; 35min ago
   Triggers: ● virtsecretd.service
     Listen: /run/libvirt/virtsecretd-sock-ro (Stream)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48573)
     Memory: 4.0K (peak: 512.0K)
        CPU: 3ms
     CGroup: /system.slice/virtsecretd-ro.socket

Oct 11 03:34:59 compute-0 systemd[1]: Starting libvirt secret daemon read-only socket...
Oct 11 03:34:59 compute-0 systemd[1]: Listening on libvirt secret daemon read-only socket.

● virtsecretd.socket - libvirt secret daemon socket
     Loaded: loaded (/usr/lib/systemd/system/virtsecretd.socket; enabled; preset: enabled)
    Drop-In: /etc/systemd/system/virtsecretd.socket.d
             └─override.conf
     Active: active (running) since Sat 2025-10-11 03:34:59 UTC; 35min ago
      Until: Sat 2025-10-11 03:34:59 UTC; 35min ago
   Triggers: ● virtsecretd.service
     Listen: /run/libvirt/virtsecretd-sock (Stream)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48573)
     Memory: 0B (peak: 716.0K)
        CPU: 5ms
     CGroup: /system.slice/virtsecretd.socket

Oct 11 03:34:59 compute-0 systemd[1]: Starting libvirt secret daemon socket...
Oct 11 03:34:59 compute-0 systemd[1]: Listening on libvirt secret daemon socket.

○ virtstoraged-admin.socket - libvirt storage daemon admin socket
     Loaded: loaded (/usr/lib/systemd/system/virtstoraged-admin.socket; enabled; preset: enabled)
     Active: inactive (dead)
   Triggers: ● virtstoraged.service
     Listen: /run/libvirt/virtstoraged-admin-sock (Stream)

○ virtstoraged-ro.socket - libvirt storage daemon read-only socket
     Loaded: loaded (/usr/lib/systemd/system/virtstoraged-ro.socket; enabled; preset: enabled)
     Active: inactive (dead)
   Triggers: ● virtstoraged.service
     Listen: /run/libvirt/virtstoraged-sock-ro (Stream)

○ virtstoraged.socket - libvirt storage daemon socket
     Loaded: loaded (/usr/lib/systemd/system/virtstoraged.socket; enabled; preset: enabled)
     Active: inactive (dead)
   Triggers: ● virtstoraged.service
     Listen: /run/libvirt/virtstoraged-sock (Stream)

● swap.swap - /swap
     Loaded: loaded (/etc/fstab; generated)
     Active: active since Sat 2025-10-11 03:10:51 UTC; 59min ago
      Until: Sat 2025-10-11 03:10:51 UTC; 59min ago
       What: /swap
       Docs: man:fstab(5)
             man:systemd-fstab-generator(8)

● basic.target - Basic System
     Loaded: loaded (/usr/lib/systemd/system/basic.target; static)
     Active: active since Sat 2025-10-11 02:42:18 UTC; 1h 28min ago
      Until: Sat 2025-10-11 02:42:18 UTC; 1h 28min ago
       Docs: man:systemd.special(7)

Oct 11 02:42:18 localhost systemd[1]: Reached target Basic System.

○ blockdev@dev-disk-by\x2duuid-9839e2e1\x2d98a2\x2d4594\x2db609\x2d79d514deb0a3.target - Block Device Preparation for /dev/disk/by-uuid/9839e2e1-98a2-4594-b609-79d514deb0a3
     Loaded: loaded (/usr/lib/systemd/system/blockdev@.target; static)
     Active: inactive (dead)
       Docs: man:systemd.special(7)

○ blockdev@dev-vda1.target - Block Device Preparation for /dev/vda1
     Loaded: loaded (/usr/lib/systemd/system/blockdev@.target; static)
     Active: inactive (dead)
       Docs: man:systemd.special(7)

● ceph-92cfe4d4-4917-5be1-9d00-73758793a62b.target - Ceph cluster 92cfe4d4-4917-5be1-9d00-73758793a62b
     Loaded: loaded (/etc/systemd/system/ceph-92cfe4d4-4917-5be1-9d00-73758793a62b.target; enabled; preset: disabled)
     Active: active since Sat 2025-10-11 03:18:34 UTC; 52min ago
      Until: Sat 2025-10-11 03:18:34 UTC; 52min ago

Oct 11 03:18:34 compute-0 systemd[1]: Reached target Ceph cluster 92cfe4d4-4917-5be1-9d00-73758793a62b.

● ceph.target - All Ceph clusters and services
     Loaded: loaded (/etc/systemd/system/ceph.target; enabled; preset: disabled)
     Active: active since Sat 2025-10-11 03:18:34 UTC; 52min ago
      Until: Sat 2025-10-11 03:18:34 UTC; 52min ago

Oct 11 03:18:34 compute-0 systemd[1]: Reached target All Ceph clusters and services.

● cloud-config.target - Cloud-config availability
     Loaded: loaded (/usr/lib/systemd/system/cloud-config.target; static)
     Active: active since Sat 2025-10-11 02:42:21 UTC; 1h 28min ago
      Until: Sat 2025-10-11 02:42:21 UTC; 1h 28min ago

Oct 11 02:42:21 np0005480824.novalocal systemd[1]: Reached target Cloud-config availability.

● cloud-init.target - Cloud-init target
     Loaded: loaded (/usr/lib/systemd/system/cloud-init.target; enabled-runtime; preset: disabled)
     Active: active since Sat 2025-10-11 02:42:22 UTC; 1h 28min ago
      Until: Sat 2025-10-11 02:42:22 UTC; 1h 28min ago

Oct 11 02:42:22 np0005480824.novalocal systemd[1]: Reached target Cloud-init target.

○ cryptsetup-pre.target - Local Encrypted Volumes (Pre)
     Loaded: loaded (/usr/lib/systemd/system/cryptsetup-pre.target; static)
     Active: inactive (dead)
       Docs: man:systemd.special(7)

● cryptsetup.target - Local Encrypted Volumes
     Loaded: loaded (/usr/lib/systemd/system/cryptsetup.target; static)
     Active: active since Sat 2025-10-11 02:42:17 UTC; 1h 28min ago
      Until: Sat 2025-10-11 02:42:17 UTC; 1h 28min ago
       Docs: man:systemd.special(7)

● edpm_libvirt.target
     Loaded: loaded (/etc/systemd/system/edpm_libvirt.target; static)
     Active: active since Sat 2025-10-11 03:35:33 UTC; 35min ago
      Until: Sat 2025-10-11 03:35:33 UTC; 35min ago

Oct 11 03:35:33 compute-0 systemd[1]: Reached target edpm_libvirt.target.

○ emergency.target - Emergency Mode
     Loaded: loaded (/usr/lib/systemd/system/emergency.target; static)
     Active: inactive (dead)
       Docs: man:systemd.special(7)

○ first-boot-complete.target - First Boot Complete
     Loaded: loaded (/usr/lib/systemd/system/first-boot-complete.target; static)
     Active: inactive (dead)
  Condition: start condition failed at Sat 2025-10-11 02:42:17 UTC; 1h 28min ago
       Docs: man:systemd.special(7)

Oct 11 02:42:17 localhost systemd[1]: First Boot Complete was skipped because of an unmet condition check (ConditionFirstBoot=yes).

○ getty-pre.target - Preparation for Logins
     Loaded: loaded (/usr/lib/systemd/system/getty-pre.target; static)
     Active: inactive (dead)
       Docs: man:systemd.special(7)
             man:systemd-getty-generator(8)
             http://0pointer.de/blog/projects/serial-console.html

● getty.target - Login Prompts
     Loaded: loaded (/usr/lib/systemd/system/getty.target; static)
     Active: active since Sat 2025-10-11 02:42:21 UTC; 1h 28min ago
      Until: Sat 2025-10-11 02:42:21 UTC; 1h 28min ago
       Docs: man:systemd.special(7)
             man:systemd-getty-generator(8)
             http://0pointer.de/blog/projects/serial-console.html

Oct 11 02:42:21 np0005480824.novalocal systemd[1]: Reached target Login Prompts.

○ graphical.target - Graphical Interface
     Loaded: loaded (/usr/lib/systemd/system/graphical.target; static)
     Active: inactive (dead)
       Docs: man:systemd.special(7)

○ initrd-fs.target - Initrd File Systems
     Loaded: loaded (/usr/lib/systemd/system/initrd-fs.target; static)
     Active: inactive (dead) since Sat 2025-10-11 02:42:17 UTC; 1h 28min ago
       Docs: man:systemd.special(7)

Oct 11 02:42:16 localhost systemd[1]: Reached target Initrd File Systems.

○ initrd-root-device.target - Initrd Root Device
     Loaded: loaded (/usr/lib/systemd/system/initrd-root-device.target; static)
     Active: inactive (dead) since Sat 2025-10-11 02:42:16 UTC; 1h 28min ago
       Docs: man:systemd.special(7)

Oct 11 02:42:15 localhost systemd[1]: Reached target Initrd Root Device.
Oct 11 02:42:16 localhost systemd[1]: Stopped target Initrd Root Device.

○ initrd-root-fs.target - Initrd Root File System
     Loaded: loaded (/usr/lib/systemd/system/initrd-root-fs.target; static)
     Active: inactive (dead) since Sat 2025-10-11 02:42:17 UTC; 1h 28min ago
       Docs: man:systemd.special(7)

Oct 11 02:42:16 localhost systemd[1]: Reached target Initrd Root File System.

○ initrd-switch-root.target - Switch Root
     Loaded: loaded (/usr/lib/systemd/system/initrd-switch-root.target; static)
     Active: inactive (dead) since Sat 2025-10-11 02:42:17 UTC; 1h 28min ago

Oct 11 02:42:16 localhost systemd[1]: Reached target Switch Root.

○ initrd-usr-fs.target - Initrd /usr File System
     Loaded: loaded (/usr/lib/systemd/system/initrd-usr-fs.target; static)
     Active: inactive (dead) since Sat 2025-10-11 02:42:16 UTC; 1h 28min ago
       Docs: man:systemd.special(7)

Oct 11 02:42:16 localhost systemd[1]: Stopped target Initrd /usr File System.
Notice: journal has been rotated since unit was started, output may be incomplete.

○ initrd.target - Initrd Default Target
     Loaded: loaded (/usr/lib/systemd/system/initrd.target; static)
     Active: inactive (dead) since Sat 2025-10-11 02:42:16 UTC; 1h 28min ago
       Docs: man:systemd.special(7)

Oct 11 02:42:16 localhost systemd[1]: Reached target Initrd Default Target.
Oct 11 02:42:16 localhost systemd[1]: Stopped target Initrd Default Target.

● integritysetup.target - Local Integrity Protected Volumes
     Loaded: loaded (/usr/lib/systemd/system/integritysetup.target; static)
     Active: active since Sat 2025-10-11 02:42:17 UTC; 1h 28min ago
      Until: Sat 2025-10-11 02:42:17 UTC; 1h 28min ago
       Docs: man:systemd.special(7)

● local-fs-pre.target - Preparation for Local File Systems
     Loaded: loaded (/usr/lib/systemd/system/local-fs-pre.target; static)
     Active: active since Sat 2025-10-11 02:42:18 UTC; 1h 28min ago
      Until: Sat 2025-10-11 02:42:18 UTC; 1h 28min ago
       Docs: man:systemd.special(7)

Oct 11 02:42:18 localhost systemd[1]: Reached target Preparation for Local File Systems.

● local-fs.target - Local File Systems
     Loaded: loaded (/usr/lib/systemd/system/local-fs.target; static)
     Active: active since Sat 2025-10-11 02:42:18 UTC; 1h 28min ago
      Until: Sat 2025-10-11 02:42:18 UTC; 1h 28min ago
       Docs: man:systemd.special(7)

Oct 11 02:42:18 localhost systemd[1]: Reached target Local File Systems.

● multi-user.target - Multi-User System
     Loaded: loaded (/usr/lib/systemd/system/multi-user.target; indirect; preset: disabled)
     Active: active since Sat 2025-10-11 02:42:22 UTC; 1h 28min ago
      Until: Sat 2025-10-11 02:42:22 UTC; 1h 28min ago
       Docs: man:systemd.special(7)

Oct 11 02:42:22 np0005480824.novalocal systemd[1]: Reached target Multi-User System.

● network-online.target - Network is Online
     Loaded: loaded (/usr/lib/systemd/system/network-online.target; static)
     Active: active since Sat 2025-10-11 02:42:21 UTC; 1h 28min ago
      Until: Sat 2025-10-11 02:42:21 UTC; 1h 28min ago
       Docs: man:systemd.special(7)
             https://systemd.io/NETWORK_ONLINE

Oct 11 02:42:21 np0005480824.novalocal systemd[1]: Reached target Network is Online.

● network-pre.target - Preparation for Network
     Loaded: loaded (/usr/lib/systemd/system/network-pre.target; static)
     Active: active since Sat 2025-10-11 02:42:19 UTC; 1h 28min ago
      Until: Sat 2025-10-11 02:42:19 UTC; 1h 28min ago
       Docs: man:systemd.special(7)
             https://systemd.io/NETWORK_ONLINE

Oct 11 02:42:19 np0005480824.novalocal systemd[1]: Reached target Preparation for Network.

● network.target - Network
     Loaded: loaded (/usr/lib/systemd/system/network.target; static)
     Active: active since Sat 2025-10-11 02:42:20 UTC; 1h 28min ago
      Until: Sat 2025-10-11 02:42:20 UTC; 1h 28min ago
       Docs: man:systemd.special(7)
             https://systemd.io/NETWORK_ONLINE

Oct 11 02:42:20 np0005480824.novalocal systemd[1]: Reached target Network.

● nfs-client.target - NFS client services
     Loaded: loaded (/usr/lib/systemd/system/nfs-client.target; enabled; preset: disabled)
     Active: active since Sat 2025-10-11 02:42:20 UTC; 1h 28min ago
      Until: Sat 2025-10-11 02:42:20 UTC; 1h 28min ago

Oct 11 02:42:20 np0005480824.novalocal systemd[1]: Reached target NFS client services.

○ nss-lookup.target - Host and Network Name Lookups
     Loaded: loaded (/usr/lib/systemd/system/nss-lookup.target; static)
     Active: inactive (dead)
       Docs: man:systemd.special(7)

● nss-user-lookup.target - User and Group Name Lookups
     Loaded: loaded (/usr/lib/systemd/system/nss-user-lookup.target; static)
     Active: active since Sat 2025-10-11 02:42:18 UTC; 1h 28min ago
      Until: Sat 2025-10-11 02:42:18 UTC; 1h 28min ago
       Docs: man:systemd.special(7)

Oct 11 02:42:18 localhost systemd[1]: Reached target User and Group Name Lookups.

● paths.target - Path Units
     Loaded: loaded (/usr/lib/systemd/system/paths.target; static)
     Active: active since Sat 2025-10-11 02:42:17 UTC; 1h 28min ago
      Until: Sat 2025-10-11 02:42:17 UTC; 1h 28min ago
       Docs: man:systemd.special(7)

○ remote-cryptsetup.target - Remote Encrypted Volumes
     Loaded: loaded (/usr/lib/systemd/system/remote-cryptsetup.target; disabled; preset: enabled)
     Active: inactive (dead) since Sat 2025-10-11 02:42:16 UTC; 1h 28min ago
       Docs: man:systemd.special(7)

Oct 11 02:42:15 localhost systemd[1]: Reached target Remote Encrypted Volumes.
Oct 11 02:42:16 localhost systemd[1]: Stopped target Remote Encrypted Volumes.

● remote-fs-pre.target - Preparation for Remote File Systems
     Loaded: loaded (/usr/lib/systemd/system/remote-fs-pre.target; static)
     Active: active since Sat 2025-10-11 02:42:20 UTC; 1h 28min ago
      Until: Sat 2025-10-11 02:42:20 UTC; 1h 28min ago
       Docs: man:systemd.special(7)

Oct 11 02:42:20 np0005480824.novalocal systemd[1]: Reached target Preparation for Remote File Systems.

● remote-fs.target - Remote File Systems
     Loaded: loaded (/usr/lib/systemd/system/remote-fs.target; enabledUnit syslog.target could not be found.
; preset: enabled)
     Active: active since Sat 2025-10-11 02:42:20 UTC; 1h 28min ago
      Until: Sat 2025-10-11 02:42:20 UTC; 1h 28min ago
       Docs: man:systemd.special(7)

Oct 11 02:42:20 np0005480824.novalocal systemd[1]: Reached target Remote File Systems.

○ remote-veritysetup.target - Remote Verity Protected Volumes
     Loaded: loaded (/usr/lib/systemd/system/remote-veritysetup.target; disabled; preset: disabled)
     Active: inactive (dead)
       Docs: man:systemd.special(7)

○ rescue.target - Rescue Mode
     Loaded: loaded (/usr/lib/systemd/system/rescue.target; static)
     Active: inactive (dead)
       Docs: man:systemd.special(7)

● rpc_pipefs.target
     Loaded: loaded (/usr/lib/systemd/system/rpc_pipefs.target; static)
     Active: active since Sat 2025-10-11 02:42:17 UTC; 1h 28min ago
      Until: Sat 2025-10-11 02:42:17 UTC; 1h 28min ago

● rpcbind.target - RPC Port Mapper
     Loaded: loaded (/usr/lib/systemd/system/rpcbind.target; static)
     Active: active since Sat 2025-10-11 02:42:17 UTC; 1h 28min ago
      Until: Sat 2025-10-11 02:42:17 UTC; 1h 28min ago
       Docs: man:systemd.special(7)

○ shutdown.target - System Shutdown
     Loaded: loaded (/usr/lib/systemd/system/shutdown.target; static)
     Active: inactive (dead)
       Docs: man:systemd.special(7)

● slices.target - Slice Units
     Loaded: loaded (/usr/lib/systemd/system/slices.target; static)
     Active: active since Sat 2025-10-11 02:42:17 UTC; 1h 28min ago
      Until: Sat 2025-10-11 02:42:17 UTC; 1h 28min ago
       Docs: man:systemd.special(7)

● sockets.target - Socket Units
     Loaded: loaded (/usr/lib/systemd/system/sockets.target; static)
     Active: active since Sat 2025-10-11 02:42:18 UTC; 1h 28min ago
      Until: Sat 2025-10-11 02:42:18 UTC; 1h 28min ago
       Docs: man:systemd.special(7)

Oct 11 02:42:18 localhost systemd[1]: Reached target Socket Units.

● sshd-keygen.target
     Loaded: loaded (/usr/lib/systemd/system/sshd-keygen.target; static)
     Active: active since Sat 2025-10-11 03:33:32 UTC; 37min ago
      Until: Sat 2025-10-11 03:33:32 UTC; 37min ago

Oct 11 03:33:32 compute-0 systemd[1]: Reached target sshd-keygen.target.

● swap.target - Swaps
     Loaded: loaded (/usr/lib/systemd/system/swap.target; static)
     Active: active since Sat 2025-10-11 02:42:17 UTC; 1h 28min ago
      Until: Sat 2025-10-11 02:42:17 UTC; 1h 28min ago
       Docs: man:systemd.special(7)

● sysinit.target - System Initialization
     Loaded: loaded (/usr/lib/systemd/system/sysinit.target; static)
     Active: active since Sat 2025-10-11 02:42:18 UTC; 1h 28min ago
      Until: Sat 2025-10-11 02:42:18 UTC; 1h 28min ago
       Docs: man:systemd.special(7)

Oct 11 02:42:18 localhost systemd[1]: Reached target System Initialization.

● time-set.target - System Time Set
     Loaded: loaded (/usr/lib/systemd/system/time-set.target; static)
     Active: active since Sat 2025-10-11 03:18:35 UTC; 52min ago
      Until: Sat 2025-10-11 03:18:35 UTC; 52min ago
       Docs: man:systemd.special(7)

Oct 11 03:18:35 compute-0 systemd[1]: Reached target System Time Set.

● time-sync.target - System Time Synchronized
     Loaded: loaded (/usr/lib/systemd/system/time-sync.target; static)
     Active: active since Sat 2025-10-11 03:18:35 UTC; 52min ago
      Until: Sat 2025-10-11 03:18:35 UTC; 52min ago
       Docs: man:systemd.special(7)

Oct 11 03:18:35 compute-0 systemd[1]: Reached target System Time Synchronized.

● timers.target - Timer Units
     Loaded: loaded (/usr/lib/systemd/system/timers.target; static)
     Active: active since Sat 2025-10-11 02:42:18 UTC; 1h 28min ago
      Until: Sat 2025-10-11 02:42:18 UTC; 1h 28min ago
       Docs: man:systemd.special(7)

Oct 11 02:42:18 localhost systemd[1]: Reached target Timer Units.

○ umount.target - Unmount All Filesystems
     Loaded: loaded (/usr/lib/systemd/system/umount.target; static)
     Active: inactive (dead)
       Docs: man:systemd.special(7)

○ veritysetup-pre.target - Local Verity Protected Volumes (Pre)
     Loaded: loaded (/usr/lib/systemd/system/veritysetup-pre.target; static)
     Active: inactive (dead)
       Docs: man:systemd.special(7)

● veritysetup.target - Local Verity Protected Volumes
     Loaded: loaded (/usr/lib/systemd/system/veritysetup.target; static)
     Active: active since Sat 2025-10-11 02:42:17 UTC; 1h 28min ago
      Until: Sat 2025-10-11 02:42:17 UTC; 1h 28min ago
       Docs: man:systemd.special(7)

○ virt-guest-shutdown.target - Libvirt guests shutdown
     Loaded: loaded (/etc/systemd/system/virt-guest-shutdown.target; static)
     Active: inactive (dead)
       Docs: https://libvirt.org

● 65c3a3d72e1cba3c83fc771a841564f690b47cc0f5012ce0acf16e2d9f8e3fe2-d9f1cc3a1b07bd0.timer - /usr/bin/podman healthcheck run 65c3a3d72e1cba3c83fc771a841564f690b47cc0f5012ce0acf16e2d9f8e3fe2
     Loaded: loaded (/run/systemd/transient/65c3a3d72e1cba3c83fc771a841564f690b47cc0f5012ce0acf16e2d9f8e3fe2-d9f1cc3a1b07bd0.timer; transient)
  Transient: yes
     Active: active (waiting) since Sat 2025-10-11 03:30:02 UTC; 40min ago
      Until: Sat 2025-10-11 03:30:02 UTC; 40min ago
    Trigger: Sat 2025-10-11 04:11:03 UTC; 24s left
   Triggers: ● 65c3a3d72e1cba3c83fc771a841564f690b47cc0f5012ce0acf16e2d9f8e3fe2-d9f1cc3a1b07bd0.service

Oct 11 03:30:02 compute-0 systemd[1]: Started /usr/bin/podman healthcheck run 65c3a3d72e1cba3c83fc771a841564f690b47cc0f5012ce0acf16e2d9f8e3fe2.

● 8b003d65c8e439e280409825aa37dacfb921ffdd0ada54278b9746654fdc0aa8-5d762caefe58de62.timer - /usr/bin/podman healthcheck run 8b003d65c8e439e280409825aa37dacfb921ffdd0ada54278b9746654fdc0aa8
     Loaded: loaded (/run/systemd/transient/8b003d65c8e439e280409825aa37dacfb921ffdd0ada54278b9746654fdc0aa8-5d762caefe58de62.timer; transient)
  Transient: yes
     Active: active (waiting) since Sat 2025-10-11 03:37:25 UTC; 33min ago
      Until: Sat 2025-10-11 03:37:25 UTC; 33min ago
    Trigger: Sat 2025-10-11 04:10:55 UTC; 16s left
   Triggers: ● 8b003d65c8e439e280409825aa37dacfb921ffdd0ada54278b9746654fdc0aa8-5d762caefe58de62.service

Oct 11 03:37:25 compute-0 systemd[1]: Started /usr/bin/podman healthcheck run 8b003d65c8e439e280409825aa37dacfb921ffdd0ada54278b9746654fdc0aa8.

● dd5285a58cbe29a90687a00af14b934b599bb4de55df5857e4d7b7ac9f22feab-6e6beab5453a19ab.timer - /usr/bin/podman healthcheck run dd5285a58cbe29a90687a00af14b934b599bb4de55df5857e4d7b7ac9f22feab
     Loaded: loaded (/run/systemd/transient/dd5285a58cbe29a90687a00af14b934b599bb4de55df5857e4d7b7ac9f22feab-6e6beab5453a19ab.timer; transient)
  Transient: yes
     Active: active (waiting) since Sat 2025-10-11 03:31:08 UTC; 39min ago
      Until: Sat 2025-10-11 03:31:08 UTC; 39min ago
    Trigger: Sat 2025-10-11 04:11:08 UTC; 29s left
   Triggers: ● dd5285a58cbe29a90687a00af14b934b599bb4de55df5857e4d7b7ac9f22feab-6e6beab5453a19ab.service

Oct 11 03:31:08 compute-0 systemd[1]: Started /usr/bin/podman healthcheck run dd5285a58cbe29a90687a00af14b934b599bb4de55df5857e4d7b7ac9f22feab.

● dnf-makecache.timer - dnf makecache --timer
     Loaded: loaded (/usr/lib/systemd/system/dnf-makecache.timer; enabled; preset: enabled)
     Active: active (waiting) since Sat 2025-10-11 02:42:18 UTC; 1h 28min ago
      Until: Sat 2025-10-11 02:42:18 UTC; 1h 28min ago
    Trigger: Sat 2025-10-11 04:53:27 UTC; 42min left
   Triggers: ● dnf-makecache.service

Oct 11 02:42:18 localhost systemd[1]: Started dnf makecache --timer.

● f2b19cad22d0fbdd185b264c3c8b6443d09a02319dc9fb0585dc69a0f24a758d-38b9144b98f6cb45.timer - /usr/bin/podman healthcheck run f2b19cad22d0fbdd185b264c3c8b6443d09a02319dc9fb0585dc69a0f24a758d
     Loaded: loaded (/run/systemd/transient/f2b19cad22d0fbdd185b264c3c8b6443d09a02319dc9fb0585dc69a0f24a758d-38b9144b98f6cb45.timer; transient)
  Transient: yes
     Active: active (waiting) since Sat 2025-10-11 03:36:30 UTC; 34min ago
      Until: Sat 2025-10-11 03:36:30 UTC; 34min ago
    Trigger: Sat 2025-10-11 04:10:55 UTC; 16s left
   Triggers: ● f2b19cad22d0fbdd185b264c3c8b6443d09a02319dc9fb0585dc69a0f24a758d-38b9144b98f6cb45.service

Oct 11 03:36:30 compute-0 systemd[1]: Started /usr/bin/podman healthcheck run f2b19cad22d0fbdd185b264c3c8b6443d09a02319dc9fb0585dc69a0f24a758d.

● logrotate.timer - Daily rotation of log files
     Loaded: loaded (/usr/lib/systemd/system/logrotate.timer; enabled; preset: enabled)
     Active: active (waiting) since Sat 2025-10-11 02:42:18 UTC; 1h 28min ago
      Until: Sat 2025-10-11 02:42:18 UTC; 1h 28min ago
    Trigger: Sun 2025-10-12 00:00:00 UTC; 19h left
   Triggers: ● logrotate.service
       Docs: man:logrotate(8)
             man:logrotate.conf(5)

Oct 11 02:42:18 localhost systemd[1]: Started Daily rotation of log files.

○ raid-check.timer - Weekly RAID setup health check
     Loaded: loaded (/usr/lib/systemd/system/raid-check.timer; enabled; preset: enabled)
     Active: inactive (dead)
    Trigger: n/a
   Triggers: ● raid-check.service

○ sysstat-collect.timer - Run system activity accounting tool every 10 minutes
     Loaded: loaded (/usr/lib/systemd/system/sysstat-collect.timer; enabled; preset: disabled)
     Active: inactive (dead)
    Trigger: n/a
   Triggers: ● sysstat-collect.service

○ sysstat-summary.timer - Generate summary of yesterday's process accounting
     Loaded: loaded (/usr/lib/systemd/system/sysstat-summary.timer; enabled; preset: disabled)
     Active: inactive (dead)
    Trigger: n/a
   Triggers: ● sysstat-summary.service

● systemd-tmpfiles-clean.timer - Daily Cleanup of Temporary Directories
     Loaded: loaded (/usr/lib/systemd/system/systemd-tmpfiles-clean.timer; static)
     Active: active (waiting) since Sat 2025-10-11 02:42:18 UTC; 1h 28min ago
      Until: Sat 2025-10-11 02:42:18 UTC; 1h 28min ago
    Trigger: Sun 2025-10-12 02:57:22 UTC; 22h left
   Triggers: ● systemd-tmpfiles-clean.service
       Docs: man:tmpfiles.d(5)
             man:systemd-tmpfiles(8)

Oct 11 02:42:18 localhost systemd[1]: Started Daily Cleanup of Temporary Directories.

● unbound-anchor.timer - daily update of the root trust anchor for DNSSEC
     Loaded: loaded (/usr/lib/systemd/system/unbound-anchor.timer; enabled; preset: enabled)
     Active: active (waiting) since Sat 2025-10-11 03:11:22 UTC; 59min ago
      Until: Sat 2025-10-11 03:11:22 UTC; 59min ago
    Trigger: Sun 2025-10-12 00:00:00 UTC; 19h left
   Triggers: ● unbound-anchor.service
       Docs: man:unbound-anchor(8)

Oct 11 03:11:22 compute-0 systemd[1]: Started daily update of the root trust anchor for DNSSEC.
