● compute-0
    State: running
    Units: 477 loaded (incl. loaded aliases)
     Jobs: 0 queued
   Failed: 0 units
    Since: Tue 2025-12-09 14:51:36 UTC; 1h 53min ago
  systemd: 252-59.el9
   CGroup: /
           ├─268565 turbostat --debug sleep 10
           ├─268568 sleep 10
           ├─init.scope
           │ └─1 /usr/lib/systemd/systemd --switched-root --system --deserialize 31
           ├─machine.slice
           │ ├─libpod-0a76737b5f2b25872fe2b565002617e1a84450afb10d979302e98619d30d6470.scope
           │ │ └─container
           │ │   ├─145423 dumb-init --single-child -- kolla_start
           │ │   └─145426 /usr/bin/ovn-controller --pidfile unix:/run/openvswitch/db.sock -p /etc/pki/tls/private/ovndb.key -c /etc/pki/tls/certs/ovndb.crt -C /etc/pki/tls/certs/ovndbca.crt
           │ ├─libpod-84d676632bb4f080a989766472a2cc2fcf267803021f3bcdf51f0d8bfc9055e2.scope
           │ │ └─container
           │ │   ├─225740 dumb-init --single-child -- kolla_start
           │ │   └─225743 /usr/sbin/multipathd -d
           │ ├─libpod-9f2fa752ba80f2edb6a6ed5e7e6142147c4b695355174f47652b45554431a9ce.scope
           │ │ └─container
           │ │   ├─243454 dumb-init --single-child -- kolla_start
           │ │   └─243461 /usr/bin/python3 /usr/bin/nova-compute
           │ └─libpod-fbe906a4b60641b0d2bc026295092273d9a8b9783389304fc11b10bffae9c692.scope
           │   └─container
           │     ├─155088 dumb-init --single-child -- kolla_start
           │     ├─155091 "neutron-ovn-metadata-agent (/usr/bin/python3 /usr/bin/neutron-ovn-metadata-agent)"
           │     ├─155210 "neutron-ovn-metadata-agent (/usr/bin/python3 /usr/bin/neutron-ovn-metadata-agent)"
           │     └─155215 /usr/bin/python3 /bin/privsep-helper --config-file /etc/neutron/neutron.conf --config-dir /etc/neutron.conf.d --privsep_context neutron.privileged.namespace_cmd --privsep_sock_path /tmp/tmp2hzlk811/privsep.sock
           ├─system.slice
           │ ├─NetworkManager.service
           │ │ └─49021 /usr/sbin/NetworkManager --no-daemon
           │ ├─auditd.service
           │ │ ├─700 /sbin/auditd
           │ │ └─702 /usr/sbin/sedispatch
           │ ├─chronyd.service
           │ │ └─58592 /usr/sbin/chronyd -F 2
           │ ├─crond.service
           │ │ └─1008 /usr/sbin/crond -n
           │ ├─dbus-broker.service
           │ │ ├─750 /usr/bin/dbus-broker-launch --scope system --audit
           │ │ └─772 dbus-broker --log 4 --controller 9 --machine-id 4d4ef2323cc3337bbfd9081b2a323b4e --max-bytes 536870912 --max-fds 4096 --max-matches 131072 --audit
           │ ├─edpm_multipathd.service
           │ │ └─225738 /usr/bin/conmon --api-version 1 -c 84d676632bb4f080a989766472a2cc2fcf267803021f3bcdf51f0d8bfc9055e2 -u 84d676632bb4f080a989766472a2cc2fcf267803021f3bcdf51f0d8bfc9055e2 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/84d676632bb4f080a989766472a2cc2fcf267803021f3bcdf51f0d8bfc9055e2/userdata -p /run/containers/storage/overlay-containers/84d676632bb4f080a989766472a2cc2fcf267803021f3bcdf51f0d8bfc9055e2/userdata/pidfile -n multipathd --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/84d676632bb4f080a989766472a2cc2fcf267803021f3bcdf51f0d8bfc9055e2 --full-attach -s -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/84d676632bb4f080a989766472a2cc2fcf267803021f3bcdf51f0d8bfc9055e2/userdata/oci-log --conmon-pidfile /run/multipathd.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg 84d676632bb4f080a989766472a2cc2fcf267803021f3bcdf51f0d8bfc9055e2
           │ ├─edpm_nova_compute.service
           │ │ └─243452 /usr/bin/conmon --api-version 1 -c 9f2fa752ba80f2edb6a6ed5e7e6142147c4b695355174f47652b45554431a9ce -u 9f2fa752ba80f2edb6a6ed5e7e6142147c4b695355174f47652b45554431a9ce -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/9f2fa752ba80f2edb6a6ed5e7e6142147c4b695355174f47652b45554431a9ce/userdata -p /run/containers/storage/overlay-containers/9f2fa752ba80f2edb6a6ed5e7e6142147c4b695355174f47652b45554431a9ce/userdata/pidfile -n nova_compute --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/9f2fa752ba80f2edb6a6ed5e7e6142147c4b695355174f47652b45554431a9ce --full-attach -s -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/9f2fa752ba80f2edb6a6ed5e7e6142147c4b695355174f47652b45554431a9ce/userdata/oci-log --conmon-pidfile /run/nova_compute.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg 9f2fa752ba80f2edb6a6ed5e7e6142147c4b695355174f47652b45554431a9ce
           │ ├─edpm_ovn_controller.service
           │ │ └─145421 /usr/bin/conmon --api-version 1 -c 0a76737b5f2b25872fe2b565002617e1a84450afb10d979302e98619d30d6470 -u 0a76737b5f2b25872fe2b565002617e1a84450afb10d979302e98619d30d6470 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/0a76737b5f2b25872fe2b565002617e1a84450afb10d979302e98619d30d6470/userdata -p /run/containers/storage/overlay-containers/0a76737b5f2b25872fe2b565002617e1a84450afb10d979302e98619d30d6470/userdata/pidfile -n ovn_controller --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/0a76737b5f2b25872fe2b565002617e1a84450afb10d979302e98619d30d6470 --full-attach -s -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/0a76737b5f2b25872fe2b565002617e1a84450afb10d979302e98619d30d6470/userdata/oci-log --conmon-pidfile /run/ovn_controller.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg 0a76737b5f2b25872fe2b565002617e1a84450afb10d979302e98619d30d6470
           │ ├─edpm_ovn_metadata_agent.service
           │ │ └─155086 /usr/bin/conmon --api-version 1 -c fbe906a4b60641b0d2bc026295092273d9a8b9783389304fc11b10bffae9c692 -u fbe906a4b60641b0d2bc026295092273d9a8b9783389304fc11b10bffae9c692 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/fbe906a4b60641b0d2bc026295092273d9a8b9783389304fc11b10bffae9c692/userdata -p /run/containers/storage/overlay-containers/fbe906a4b60641b0d2bc026295092273d9a8b9783389304fc11b10bffae9c692/userdata/pidfile -n ovn_metadata_agent --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/fbe906a4b60641b0d2bc026295092273d9a8b9783389304fc11b10bffae9c692 --full-attach -s -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/fbe906a4b60641b0d2bc026295092273d9a8b9783389304fc11b10bffae9c692/userdata/oci-log --conmon-pidfile /run/ovn_metadata_agent.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg fbe906a4b60641b0d2bc026295092273d9a8b9783389304fc11b10bffae9c692
           │ ├─gssproxy.service
           │ │ └─869 /usr/sbin/gssproxy -D
           │ ├─irqbalance.service
           │ │ └─780 /usr/sbin/irqbalance
           │ ├─iscsid.service
           │ │ └─228459 /usr/sbin/iscsid -f
           │ ├─ovs-vswitchd.service
           │ │ └─47325 ovs-vswitchd unix:/var/run/openvswitch/db.sock -vconsole:emer -vsyslog:err -vfile:info --mlockall --user openvswitch:hugetlbfs --no-chdir --log-file=/var/log/openvswitch/ovs-vswitchd.log --pidfile=/var/run/openvswitch/ovs-vswitchd.pid --detach
           │ ├─ovsdb-server.service
           │ │ └─47243 ovsdb-server /etc/openvswitch/conf.db -vconsole:emer -vsyslog:err -vfile:info --remote=punix:/var/run/openvswitch/db.sock --private-key=db:Open_vSwitch,SSL,private_key --certificate=db:Open_vSwitch,SSL,certificate --bootstrap-ca-cert=db:Open_vSwitch,SSL,ca_cert --user openvswitch:hugetlbfs --no-chdir --log-file=/var/log/openvswitch/ovsdb-server.log --pidfile=/var/run/openvswitch/ovsdb-server.pid --detach
           │ ├─polkit.service
           │ │ └─43504 /usr/lib/polkit-1/polkitd --no-debug
           │ ├─rpcbind.service
           │ │ └─698 /usr/bin/rpcbind -w -f
           │ ├─rsyslog.service
           │ │ └─1004 /usr/sbin/rsyslogd -n
           │ ├─sshd.service
           │ │ └─181036 "sshd: /usr/sbin/sshd -D [listener] 0 of 10-100 startups"
           │ ├─system-ceph\x2d67f67f44\x2d54fc\x2d54ea\x2d8df0\x2d10931b6ecdaf.slice
           │ │ ├─ceph-67f67f44-54fc-54ea-8df0-10931b6ecdaf@crash.compute-0.service
           │ │ │ ├─libpod-payload-1d6c84974bebf4aec64bc5aa950bd94610adc6953e493e81f66035a7be677973
           │ │ │ │ ├─80274 /run/podman-init -- /usr/bin/ceph-crash -n client.crash.compute-0
           │ │ │ │ └─80276 /usr/bin/python3 -s /usr/bin/ceph-crash -n client.crash.compute-0
           │ │ │ └─runtime
           │ │ │   └─80272 /usr/bin/conmon --api-version 1 -c 1d6c84974bebf4aec64bc5aa950bd94610adc6953e493e81f66035a7be677973 -u 1d6c84974bebf4aec64bc5aa950bd94610adc6953e493e81f66035a7be677973 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/1d6c84974bebf4aec64bc5aa950bd94610adc6953e493e81f66035a7be677973/userdata -p /run/containers/storage/overlay-containers/1d6c84974bebf4aec64bc5aa950bd94610adc6953e493e81f66035a7be677973/userdata/pidfile -n ceph-67f67f44-54fc-54ea-8df0-10931b6ecdaf-crash-compute-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/1d6c84974bebf4aec64bc5aa950bd94610adc6953e493e81f66035a7be677973 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/1d6c84974bebf4aec64bc5aa950bd94610adc6953e493e81f66035a7be677973/userdata/oci-log --conmon-pidfile /run/ceph-67f67f44-54fc-54ea-8df0-10931b6ecdaf@crash.compute-0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 1d6c84974bebf4aec64bc5aa950bd94610adc6953e493e81f66035a7be677973
           │ │ ├─ceph-67f67f44-54fc-54ea-8df0-10931b6ecdaf@mds.cephfs.compute-0.izecis.service
           │ │ │ ├─libpod-payload-63e30a35b7b96951b132aa123d944f19fef8cbb23554d42058a7f839e76cf474
           │ │ │ │ ├─95394 /run/podman-init -- /usr/bin/ceph-mds -n mds.cephfs.compute-0.izecis -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
           │ │ │ │ └─95396 /usr/bin/ceph-mds -n mds.cephfs.compute-0.izecis -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
           │ │ │ └─runtime
           │ │ │   └─95392 /usr/bin/conmon --api-version 1 -c 63e30a35b7b96951b132aa123d944f19fef8cbb23554d42058a7f839e76cf474 -u 63e30a35b7b96951b132aa123d944f19fef8cbb23554d42058a7f839e76cf474 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/63e30a35b7b96951b132aa123d944f19fef8cbb23554d42058a7f839e76cf474/userdata -p /run/containers/storage/overlay-containers/63e30a35b7b96951b132aa123d944f19fef8cbb23554d42058a7f839e76cf474/userdata/pidfile -n ceph-67f67f44-54fc-54ea-8df0-10931b6ecdaf-mds-cephfs-compute-0-izecis --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/63e30a35b7b96951b132aa123d944f19fef8cbb23554d42058a7f839e76cf474 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/63e30a35b7b96951b132aa123d944f19fef8cbb23554d42058a7f839e76cf474/userdata/oci-log --conmon-pidfile /run/ceph-67f67f44-54fc-54ea-8df0-10931b6ecdaf@mds.cephfs.compute-0.izecis.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 63e30a35b7b96951b132aa123d944f19fef8cbb23554d42058a7f839e76cf474
           │ │ ├─ceph-67f67f44-54fc-54ea-8df0-10931b6ecdaf@mgr.compute-0.ysegzv.service
           │ │ │ ├─libpod-payload-f232def5bd3d41fdf0b35f628fe45f0e39d35b90e0d04b3d069f81dcb3d82662
           │ │ │ │ ├─75513 /run/podman-init -- /usr/bin/ceph-mgr -n mgr.compute-0.ysegzv -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
           │ │ │ │ └─75515 /usr/bin/ceph-mgr -n mgr.compute-0.ysegzv -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
           │ │ │ └─runtime
           │ │ │   └─75511 /usr/bin/conmon --api-version 1 -c f232def5bd3d41fdf0b35f628fe45f0e39d35b90e0d04b3d069f81dcb3d82662 -u f232def5bd3d41fdf0b35f628fe45f0e39d35b90e0d04b3d069f81dcb3d82662 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/f232def5bd3d41fdf0b35f628fe45f0e39d35b90e0d04b3d069f81dcb3d82662/userdata -p /run/containers/storage/overlay-containers/f232def5bd3d41fdf0b35f628fe45f0e39d35b90e0d04b3d069f81dcb3d82662/userdata/pidfile -n ceph-67f67f44-54fc-54ea-8df0-10931b6ecdaf-mgr-compute-0-ysegzv --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/f232def5bd3d41fdf0b35f628fe45f0e39d35b90e0d04b3d069f81dcb3d82662 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/f232def5bd3d41fdf0b35f628fe45f0e39d35b90e0d04b3d069f81dcb3d82662/userdata/oci-log --conmon-pidfile /run/ceph-67f67f44-54fc-54ea-8df0-10931b6ecdaf@mgr.compute-0.ysegzv.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg f232def5bd3d41fdf0b35f628fe45f0e39d35b90e0d04b3d069f81dcb3d82662
           │ │ ├─ceph-67f67f44-54fc-54ea-8df0-10931b6ecdaf@mon.compute-0.service
           │ │ │ ├─libpod-payload-9ce3cdfc68db4310535ef64a87efb40353dcdfbbac71cac592072bd903c643f6
           │ │ │ │ ├─75220 /run/podman-init -- /usr/bin/ceph-mon -n mon.compute-0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false
           │ │ │ │ └─75222 /usr/bin/ceph-mon -n mon.compute-0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false
           │ │ │ └─runtime
           │ │ │   └─75218 /usr/bin/conmon --api-version 1 -c 9ce3cdfc68db4310535ef64a87efb40353dcdfbbac71cac592072bd903c643f6 -u 9ce3cdfc68db4310535ef64a87efb40353dcdfbbac71cac592072bd903c643f6 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/9ce3cdfc68db4310535ef64a87efb40353dcdfbbac71cac592072bd903c643f6/userdata -p /run/containers/storage/overlay-containers/9ce3cdfc68db4310535ef64a87efb40353dcdfbbac71cac592072bd903c643f6/userdata/pidfile -n ceph-67f67f44-54fc-54ea-8df0-10931b6ecdaf-mon-compute-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/9ce3cdfc68db4310535ef64a87efb40353dcdfbbac71cac592072bd903c643f6 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/9ce3cdfc68db4310535ef64a87efb40353dcdfbbac71cac592072bd903c643f6/userdata/oci-log --conmon-pidfile /run/ceph-67f67f44-54fc-54ea-8df0-10931b6ecdaf@mon.compute-0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 9ce3cdfc68db4310535ef64a87efb40353dcdfbbac71cac592072bd903c643f6
           │ │ ├─ceph-67f67f44-54fc-54ea-8df0-10931b6ecdaf@osd.0.service
           │ │ │ ├─libpod-payload-012822ae8bedefb05d753efd429cb131456844e02bd9516e891592371e33cae1
           │ │ │ │ ├─86011 /run/podman-init -- /usr/bin/ceph-osd -n osd.0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --osd-objectstore=bluestore
           │ │ │ │ └─86013 /usr/bin/ceph-osd -n osd.0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --osd-objectstore=bluestore
           │ │ │ └─runtime
           │ │ │   └─86009 /usr/bin/conmon --api-version 1 -c 012822ae8bedefb05d753efd429cb131456844e02bd9516e891592371e33cae1 -u 012822ae8bedefb05d753efd429cb131456844e02bd9516e891592371e33cae1 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/012822ae8bedefb05d753efd429cb131456844e02bd9516e891592371e33cae1/userdata -p /run/containers/storage/overlay-containers/012822ae8bedefb05d753efd429cb131456844e02bd9516e891592371e33cae1/userdata/pidfile -n ceph-67f67f44-54fc-54ea-8df0-10931b6ecdaf-osd-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/012822ae8bedefb05d753efd429cb131456844e02bd9516e891592371e33cae1 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/012822ae8bedefb05d753efd429cb131456844e02bd9516e891592371e33cae1/userdata/oci-log --conmon-pidfile /run/ceph-67f67f44-54fc-54ea-8df0-10931b6ecdaf@osd.0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 012822ae8bedefb05d753efd429cb131456844e02bd9516e891592371e33cae1
           │ │ ├─ceph-67f67f44-54fc-54ea-8df0-10931b6ecdaf@osd.1.service
           │ │ │ ├─libpod-payload-187eb50611d2ddb1b41d9735d4d6534119cb949c4865c9c5b94e777c53253f5a
           │ │ │ │ ├─87053 /run/podman-init -- /usr/bin/ceph-osd -n osd.1 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --osd-objectstore=bluestore
           │ │ │ │ └─87055 /usr/bin/ceph-osd -n osd.1 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --osd-objectstore=bluestore
           │ │ │ └─runtime
           │ │ │   └─87051 /usr/bin/conmon --api-version 1 -c 187eb50611d2ddb1b41d9735d4d6534119cb949c4865c9c5b94e777c53253f5a -u 187eb50611d2ddb1b41d9735d4d6534119cb949c4865c9c5b94e777c53253f5a -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/187eb50611d2ddb1b41d9735d4d6534119cb949c4865c9c5b94e777c53253f5a/userdata -p /run/containers/storage/overlay-containers/187eb50611d2ddb1b41d9735d4d6534119cb949c4865c9c5b94e777c53253f5a/userdata/pidfile -n ceph-67f67f44-54fc-54ea-8df0-10931b6ecdaf-osd-1 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/187eb50611d2ddb1b41d9735d4d6534119cb949c4865c9c5b94e777c53253f5a --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/187eb50611d2ddb1b41d9735d4d6534119cb949c4865c9c5b94e777c53253f5a/userdata/oci-log --conmon-pidfile /run/ceph-67f67f44-54fc-54ea-8df0-10931b6ecdaf@osd.1.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 187eb50611d2ddb1b41d9735d4d6534119cb949c4865c9c5b94e777c53253f5a
           │ │ ├─ceph-67f67f44-54fc-54ea-8df0-10931b6ecdaf@osd.2.service
           │ │ │ ├─libpod-payload-2c04f740a8b253d8ecd8e18a6e47e557960887b7213c8498c38edd5db483ed78
           │ │ │ │ ├─88097 /run/podman-init -- /usr/bin/ceph-osd -n osd.2 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --osd-objectstore=bluestore
           │ │ │ │ └─88099 /usr/bin/ceph-osd -n osd.2 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --osd-objectstore=bluestore
           │ │ │ └─runtime
           │ │ │   └─88095 /usr/bin/conmon --api-version 1 -c 2c04f740a8b253d8ecd8e18a6e47e557960887b7213c8498c38edd5db483ed78 -u 2c04f740a8b253d8ecd8e18a6e47e557960887b7213c8498c38edd5db483ed78 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/2c04f740a8b253d8ecd8e18a6e47e557960887b7213c8498c38edd5db483ed78/userdata -p /run/containers/storage/overlay-containers/2c04f740a8b253d8ecd8e18a6e47e557960887b7213c8498c38edd5db483ed78/userdata/pidfile -n ceph-67f67f44-54fc-54ea-8df0-10931b6ecdaf-osd-2 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/2c04f740a8b253d8ecd8e18a6e47e557960887b7213c8498c38edd5db483ed78 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/2c04f740a8b253d8ecd8e18a6e47e557960887b7213c8498c38edd5db483ed78/userdata/oci-log --conmon-pidfile /run/ceph-67f67f44-54fc-54ea-8df0-10931b6ecdaf@osd.2.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 2c04f740a8b253d8ecd8e18a6e47e557960887b7213c8498c38edd5db483ed78
           │ │ └─ceph-67f67f44-54fc-54ea-8df0-10931b6ecdaf@rgw.rgw.compute-0.efuxpz.service
           │ │   ├─libpod-payload-7f00419a4644aef9dce1fa0301835835d4bb386233bc723500dd8ee22076ad4d
           │ │   │ ├─94931 /run/podman-init -- /usr/bin/radosgw -n client.rgw.rgw.compute-0.efuxpz -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --stop-timeout=120
           │ │   │ └─94933 /usr/bin/radosgw -n client.rgw.rgw.compute-0.efuxpz -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --stop-timeout=120
           │ │   └─runtime
           │ │     └─94929 /usr/bin/conmon --api-version 1 -c 7f00419a4644aef9dce1fa0301835835d4bb386233bc723500dd8ee22076ad4d -u 7f00419a4644aef9dce1fa0301835835d4bb386233bc723500dd8ee22076ad4d -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/7f00419a4644aef9dce1fa0301835835d4bb386233bc723500dd8ee22076ad4d/userdata -p /run/containers/storage/overlay-containers/7f00419a4644aef9dce1fa0301835835d4bb386233bc723500dd8ee22076ad4d/userdata/pidfile -n ceph-67f67f44-54fc-54ea-8df0-10931b6ecdaf-rgw-rgw-compute-0-efuxpz --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/7f00419a4644aef9dce1fa0301835835d4bb386233bc723500dd8ee22076ad4d --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/7f00419a4644aef9dce1fa0301835835d4bb386233bc723500dd8ee22076ad4d/userdata/oci-log --conmon-pidfile /run/ceph-67f67f44-54fc-54ea-8df0-10931b6ecdaf@rgw.rgw.compute-0.efuxpz.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 7f00419a4644aef9dce1fa0301835835d4bb386233bc723500dd8ee22076ad4d
           │ ├─system-getty.slice
           │ │ └─getty@tty1.service
           │ │   └─1009 /sbin/agetty -o "-p -- \\u" --noclear - linux
           │ ├─system-serial\x2dgetty.slice
           │ │ └─serial-getty@ttyS0.service
           │ │   └─1010 /sbin/agetty -o "-p -- \\u" --keep-baud 115200,57600,38400,9600 - vt220
           │ ├─systemd-hostnamed.service
           │ │ └─265996 /usr/lib/systemd/systemd-hostnamed
           │ ├─systemd-journald.service
           │ │ └─675 /usr/lib/systemd/systemd-journald
           │ ├─systemd-logind.service
           │ │ └─786 /usr/lib/systemd/systemd-logind
           │ ├─systemd-machined.service
           │ │ └─206383 /usr/lib/systemd/systemd-machined
           │ ├─systemd-udevd.service
           │ │ └─udev
           │ │   └─728 /usr/lib/systemd/systemd-udevd
           │ ├─tuned.service
           │ │ └─105736 /usr/bin/python3 -Es /usr/sbin/tuned -l -P
           │ ├─virtlogd.service
           │ │ └─205752 /usr/sbin/virtlogd
           │ ├─virtnodedevd.service
           │ │ └─244229 /usr/sbin/virtnodedevd --timeout 120
           │ └─virtqemud.service
           │   └─243015 /usr/sbin/virtqemud --timeout 120
           └─user.slice
             ├─user-1000.slice
             │ ├─session-1.scope
             │ │ └─4551 /usr/bin/python3
             │ ├─session-56.scope
             │ │ ├─262891 "sshd-session: zuul [priv]"
             │ │ ├─262913 "sshd-session: zuul@notty"
             │ │ ├─262914 sudo bash -c "rm -rf /var/tmp/sos-osp && mkdir /var/tmp/sos-osp && sos report --batch --all-logs --tmp-dir=/var/tmp/sos-osp  -p container,openstack_edpm,system,storage,virt"
             │ │ ├─262938 /usr/bin/python3 -s /sbin/sos report --batch --all-logs --tmp-dir=/var/tmp/sos-osp -p container,openstack_edpm,system,storage,virt
             │ │ ├─268564 timeout 15s turbostat --debug sleep 10
             │ │ ├─269056 timeout 300s ceph osd df tree --format json-pretty
             │ │ ├─269058 /usr/bin/python3 -s /usr/bin/ceph osd df tree --format json-pretty
             │ │ ├─269106 timeout 300s tuned-adm recommend
             │ │ ├─269107 /usr/bin/python3 -Es /usr/sbin/tuned-adm recommend
             │ │ ├─269108 timeout 300s systemctl status --all
             │ │ └─269111 systemctl status --all
             │ └─user@1000.service
             │   ├─app.slice
             │   │ └─dbus-broker.service
             │   │   ├─12153 /usr/bin/dbus-broker-launch --scope user
             │   │   └─12182 dbus-broker --log 4 --controller 9 --machine-id 4d4ef2323cc3337bbfd9081b2a323b4e --max-bytes 100000000000000 --max-fds 25000000000000 --max-matches 5000000000
             │   ├─init.scope
             │   │ ├─4339 /usr/lib/systemd/systemd --user
             │   │ └─4342 "(sd-pam)"
             │   └─user.slice
             │     └─podman-pause-305cba0a.scope
             │       └─12094 catatonit -P
             └─user-42477.slice
               ├─session-21.scope
               │ ├─76647 "sshd-session: ceph-admin [priv]"
               │ └─76670 "sshd-session: ceph-admin"
               ├─session-23.scope
               │ ├─76665 "sshd-session: ceph-admin [priv]"
               │ └─76671 "sshd-session: ceph-admin@notty"
               ├─session-24.scope
               │ ├─76697 "sshd-session: ceph-admin [priv]"
               │ └─76700 "sshd-session: ceph-admin@notty"
               ├─session-25.scope
               │ ├─76726 "sshd-session: ceph-admin [priv]"
               │ └─76729 "sshd-session: ceph-admin@notty"
               ├─session-26.scope
               │ ├─76755 "sshd-session: ceph-admin [priv]"
               │ └─76758 "sshd-session: ceph-admin@notty"
               ├─session-27.scope
               │ ├─76784 "sshd-session: ceph-admin [priv]"
               │ └─76787 "sshd-session: ceph-admin@notty"
               ├─session-28.scope
               │ ├─76813 "sshd-session: ceph-admin [priv]"
               │ └─76816 "sshd-session: ceph-admin@notty"
               ├─session-29.scope
               │ ├─76842 "sshd-session: ceph-admin [priv]"
               │ └─76845 "sshd-session: ceph-admin@notty"
               ├─session-30.scope
               │ ├─76871 "sshd-session: ceph-admin [priv]"
               │ └─7687Unit boot.automount could not be found.
4 "sshd-session: ceph-admin@notty"
               ├─session-31.scope
               │ ├─76900 "sshd-session: ceph-admin [priv]"
               │ └─76903 "sshd-session: ceph-admin@notty"
               ├─session-32.scope
               │ ├─76927 "sshd-session: ceph-admin [priv]"
               │ └─76930 "sshd-session: ceph-admin@notty"
               ├─session-33.scope
               │ ├─ 76956 "sshd-session: ceph-admin [priv]"
               │ ├─ 76959 "sshd-session: ceph-admin@notty"
               │ ├─269057 sudo /bin/python3 /var/lib/ceph/67f67f44-54fc-54ea-8df0-10931b6ecdaf/cephadm.ed5a13ad26f7f55dd30e9b63855e4e581fd86973bec1d21a12ed0bb26af19c8b --env CEPH_VOLUME_OSDSPEC_AFFINITY=default_drive_group --image quay.io/ceph/ceph@sha256:1228c3d05e45fbc068a8c33614e4409b6dac688bcc77369b06009b5830fa8d86 --timeout 895 ceph-volume --fsid 67f67f44-54fc-54ea-8df0-10931b6ecdaf --config-json - -- lvm batch --no-auto /dev/ceph_vg0/ceph_lv0 /dev/ceph_vg1/ceph_lv1 /dev/ceph_vg2/ceph_lv2 --objectstore bluestore --yes --no-systemd
               │ ├─269082 /bin/python3 /var/lib/ceph/67f67f44-54fc-54ea-8df0-10931b6ecdaf/cephadm.ed5a13ad26f7f55dd30e9b63855e4e581fd86973bec1d21a12ed0bb26af19c8b --env CEPH_VOLUME_OSDSPEC_AFFINITY=default_drive_group --image quay.io/ceph/ceph@sha256:1228c3d05e45fbc068a8c33614e4409b6dac688bcc77369b06009b5830fa8d86 --timeout 895 ceph-volume --fsid 67f67f44-54fc-54ea-8df0-10931b6ecdaf --config-json - -- lvm batch --no-auto /dev/ceph_vg0/ceph_lv0 /dev/ceph_vg1/ceph_lv1 /dev/ceph_vg2/ceph_lv2 --objectstore bluestore --yes --no-systemd
               │ └─269109 /bin/podman version --format {{.Client.Version}}
               └─user@42477.service
                 └─init.scope
                   ├─76651 /usr/lib/systemd/systemd --user
                   └─76654 "(sd-pam)"

● proc-sys-fs-binfmt_misc.automount - Arbitrary Executable File Formats File System Automount Point
     Loaded: loaded (/usr/lib/systemd/system/proc-sys-fs-binfmt_misc.automount; static)
     Active: active (running) since Tue 2025-12-09 14:51:36 UTC; 1h 53min ago
      Until: Tue 2025-12-09 14:51:36 UTC; 1h 53min ago
   Triggers: ● proc-sys-fs-binfmt_misc.mount
      Where: /proc/sys/fs/binfmt_misc
       Docs: https://docs.kernel.org/admin-guide/binfmt-misc.html
             https://www.freedesktop.org/wiki/Software/systemd/APIFileSystems

Dec 09 16:03:41 compute-0 systemd[1]: proc-sys-fs-binfmt_misc.automount: Got automount request for /proc/sys/fs/binfmt_misc, triggered by 77727 (sysctl)

● dev-cdrom.device - QEMU_DVD-ROM config-2
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:01.1-ata1-host0-target0:0:0-0:0:0:0-block-sr0.device
     Loaded: loaded
     Active: active (plugged) since Tue 2025-12-09 14:51:37 UTC; 1h 53min ago
      Until: Tue 2025-12-09 14:51:37 UTC; 1h 53min ago
     Device: /sys/devices/pci0000:00/0000:00:01.1/ata1/host0/target0:0:0/0:0:0:0/block/sr0

● dev-ceph_vg0-ceph_lv0.device - /dev/ceph_vg0/ceph_lv0
    Follows: unit currently follows state of sys-devices-virtual-block-dm\x2d0.device
     Loaded: loaded
     Active: active (plugged) since Tue 2025-12-09 16:02:05 UTC; 42min ago
      Until: Tue 2025-12-09 16:02:05 UTC; 42min ago
     Device: /sys/devices/virtual/block/dm-0

● dev-ceph_vg1-ceph_lv1.device - /dev/ceph_vg1/ceph_lv1
    Follows: unit currently follows state of sys-devices-virtual-block-dm\x2d1.device
     Loaded: loaded
     Active: active (plugged) since Tue 2025-12-09 16:02:10 UTC; 42min ago
      Until: Tue 2025-12-09 16:02:10 UTC; 42min ago
     Device: /sys/devices/virtual/block/dm-1

● dev-ceph_vg2-ceph_lv2.device - /dev/ceph_vg2/ceph_lv2
    Follows: unit currently follows state of sys-devices-virtual-block-dm\x2d2.device
     Loaded: loaded
     Active: active (plugged) since Tue 2025-12-09 16:02:15 UTC; 42min ago
      Until: Tue 2025-12-09 16:02:15 UTC; 42min ago
     Device: /sys/devices/virtual/block/dm-2

● dev-disk-by\x2ddiskseq-1.device - /dev/disk/by-diskseq/1
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:04.0-virtio2-block-vda.device
     Loaded: loaded
     Active: active (plugged) since Tue 2025-12-09 14:51:37 UTC; 1h 53min ago
      Until: Tue 2025-12-09 14:51:37 UTC; 1h 53min ago
     Device: /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda

● dev-disk-by\x2ddiskseq-11.device - /dev/disk/by-diskseq/11
    Follows: unit currently follows state of sys-devices-virtual-block-loop5.device
     Loaded: loaded
     Active: active (plugged) since Tue 2025-12-09 16:02:14 UTC; 42min ago
      Until: Tue 2025-12-09 16:02:14 UTC; 42min ago
     Device: /sys/devices/virtual/block/loop5

● dev-disk-by\x2ddiskseq-12.device - /dev/disk/by-diskseq/12
    Follows: unit currently follows state of sys-devices-virtual-block-dm\x2d2.device
     Loaded: loaded
     Active: active (plugged) since Tue 2025-12-09 16:02:15 UTC; 42min ago
      Until: Tue 2025-12-09 16:02:15 UTC; 42min ago
     Device: /sys/devices/virtual/block/dm-2

● dev-disk-by\x2ddiskseq-3.device - QEMU_DVD-ROM config-2
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:01.1-ata1-host0-target0:0:0-0:0:0:0-block-sr0.device
     Loaded: loaded
     Active: active (plugged) since Tue 2025-12-09 14:51:37 UTC; 1h 53min ago
      Until: Tue 2025-12-09 14:51:37 UTC; 1h 53min ago
     Device: /sys/devices/pci0000:00/0000:00:01.1/ata1/host0/target0:0:0/0:0:0:0/block/sr0

● dev-disk-by\x2ddiskseq-5.device - /dev/disk/by-diskseq/5
    Follows: unit currently follows state of sys-devices-virtual-block-loop3.device
     Loaded: loaded
     Active: active (plugged) since Tue 2025-12-09 16:02:04 UTC; 42min ago
      Until: Tue 2025-12-09 16:02:04 UTC; 42min ago
     Device: /sys/devices/virtual/block/loop3

● dev-disk-by\x2ddiskseq-6.device - /dev/disk/by-diskseq/6
    Follows: unit currently follows state of sys-devices-virtual-block-dm\x2d0.device
     Loaded: loaded
     Active: active (plugged) since Tue 2025-12-09 16:02:05 UTC; 42min ago
      Until: Tue 2025-12-09 16:02:05 UTC; 42min ago
     Device: /sys/devices/virtual/block/dm-0

● dev-disk-by\x2ddiskseq-8.device - /dev/disk/by-diskseq/8
    Follows: unit currently follows state of sys-devices-virtual-block-loop4.device
     Loaded: loaded
     Active: active (plugged) since Tue 2025-12-09 16:02:10 UTC; 42min ago
      Until: Tue 2025-12-09 16:02:10 UTC; 42min ago
     Device: /sys/devices/virtual/block/loop4

● dev-disk-by\x2ddiskseq-9.device - /dev/disk/by-diskseq/9
    Follows: unit currently follows state of sys-devices-virtual-block-dm\x2d1.device
     Loaded: loaded
     Active: active (plugged) since Tue 2025-12-09 16:02:10 UTC; 42min ago
      Until: Tue 2025-12-09 16:02:10 UTC; 42min ago
     Device: /sys/devices/virtual/block/dm-1

● dev-disk-by\x2did-ata\x2dQEMU_DVD\x2dROM_QM00001.device - QEMU_DVD-ROM config-2
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:01.1-ata1-host0-target0:0:0-0:0:0:0-block-sr0.device
     Loaded: loaded
     Active: active (plugged) since Tue 2025-12-09 14:51:37 UTC; 1h 53min ago
      Until: Tue 2025-12-09 14:51:37 UTC; 1h 53min ago
     Device: /sys/devices/pci0000:00/0000:00:01.1/ata1/host0/target0:0:0/0:0:0:0/block/sr0

● dev-disk-by\x2did-dm\x2dname\x2dceph_vg0\x2dceph_lv0.device - /dev/disk/by-id/dm-name-ceph_vg0-ceph_lv0
    Follows: unit currently follows state of sys-devices-virtual-block-dm\x2d0.device
     Loaded: loaded
     Active: active (plugged) since Tue 2025-12-09 16:02:05 UTC; 42min ago
      Until: Tue 2025-12-09 16:02:05 UTC; 42min ago
     Device: /sys/devices/virtual/block/dm-0

● dev-disk-by\x2did-dm\x2dname\x2dceph_vg1\x2dceph_lv1.device - /dev/disk/by-id/dm-name-ceph_vg1-ceph_lv1
    Follows: unit currently follows state of sys-devices-virtual-block-dm\x2d1.device
     Loaded: loaded
     Active: active (plugged) since Tue 2025-12-09 16:02:10 UTC; 42min ago
      Until: Tue 2025-12-09 16:02:10 UTC; 42min ago
     Device: /sys/devices/virtual/block/dm-1

● dev-disk-by\x2did-dm\x2dname\x2dceph_vg2\x2dceph_lv2.device - /dev/disk/by-id/dm-name-ceph_vg2-ceph_lv2
    Follows: unit currently follows state of sys-devices-virtual-block-dm\x2d2.device
     Loaded: loaded
     Active: active (plugged) since Tue 2025-12-09 16:02:15 UTC; 42min ago
      Until: Tue 2025-12-09 16:02:15 UTC; 42min ago
     Device: /sys/devices/virtual/block/dm-2

● dev-disk-by\x2did-dm\x2duuid\x2dLVM\x2dJAkaBzaScWjdQzKLiPCf1xqZ0AjLO4ydBaHDugxY6Yaff1E3cGA9v7kNodIZev1q.device - /dev/disk/by-id/dm-uuid-LVM-JAkaBzaScWjdQzKLiPCf1xqZ0AjLO4ydBaHDugxY6Yaff1E3cGA9v7kNodIZev1q
    Follows: unit currently follows state of sys-devices-virtual-block-dm\x2d0.device
     Loaded: loaded
     Active: active (plugged) since Tue 2025-12-09 16:02:05 UTC; 42min ago
      Until: Tue 2025-12-09 16:02:05 UTC; 42min ago
     Device: /sys/devices/virtual/block/dm-0

● dev-disk-by\x2did-dm\x2duuid\x2dLVM\x2doQhCeYkOegPYysBwlVdYAgUtBkTyFBZYOaW9QxBdTYxaK4NxSTyWYVT3eCC7DH8o.device - /dev/disk/by-id/dm-uuid-LVM-oQhCeYkOegPYysBwlVdYAgUtBkTyFBZYOaW9QxBdTYxaK4NxSTyWYVT3eCC7DH8o
    Follows: unit currently follows state of sys-devices-virtual-block-dm\x2d2.device
     Loaded: loaded
     Active: active (plugged) since Tue 2025-12-09 16:02:15 UTC; 42min ago
      Until: Tue 2025-12-09 16:02:15 UTC; 42min ago
     Device: /sys/devices/virtual/block/dm-2

● dev-disk-by\x2did-dm\x2duuid\x2dLVM\x2dQjvAuwG2pQD8HrdUjWakSJFF41phfeaT6BCrYg2FCXcV6a5YLOhRwyu4Q74Y6mtP.device - /dev/disk/by-id/dm-uuid-LVM-QjvAuwG2pQD8HrdUjWakSJFF41phfeaT6BCrYg2FCXcV6a5YLOhRwyu4Q74Y6mtP
    Follows: unit currently follows state of sys-devices-virtual-block-dm\x2d1.device
     Loaded: loaded
     Active: active (plugged) since Tue 2025-12-09 16:02:10 UTC; 42min ago
      Until: Tue 2025-12-09 16:02:10 UTC; 42min ago
     Device: /sys/devices/virtual/block/dm-1

● dev-disk-by\x2did-lvm\x2dpv\x2duuid\x2dLVgbIu\x2dSXky\x2dqGHx\x2dSj1j\x2dwhKh\x2dgaqC\x2dKzCY8l.device - /dev/disk/by-id/lvm-pv-uuid-LVgbIu-SXky-qGHx-Sj1j-whKh-gaqC-KzCY8l
    Follows: unit currently follows state of sys-devices-virtual-block-loop3.device
     Loaded: loaded
     Active: active (plugged) since Tue 2025-12-09 16:02:04 UTC; 42min ago
      Until: Tue 2025-12-09 16:02:04 UTC; 42min ago
     Device: /sys/devices/virtual/block/loop3

● dev-disk-by\x2did-lvm\x2dpv\x2duuid\x2drY8kk7\x2dldOn\x2dQkVR\x2dveYs\x2dxlTb\x2dkP2k\x2dLneLJe.device - /dev/disk/by-id/lvm-pv-uuid-rY8kk7-ldOn-QkVR-veYs-xlTb-kP2k-LneLJe
    Follows: unit currently follows state of sys-devices-virtual-block-loop4.device
     Loaded: loaded
     Active: active (plugged) since Tue 2025-12-09 16:02:10 UTC; 42min ago
      Until: Tue 2025-12-09 16:02:10 UTC; 42min ago
     Device: /sys/devices/virtual/block/loop4

● dev-disk-by\x2did-lvm\x2dpv\x2duuid\x2dTOiVcW\x2dDoz9\x2dfNUo\x2drfF4\x2dOFWX\x2dx2Fs\x2dL9DCR5.device - /dev/disk/by-id/lvm-pv-uuid-TOiVcW-Doz9-fNUo-rfF4-OFWX-x2Fs-L9DCR5
    Follows: unit currently follows state of sys-devices-virtual-block-loop5.device
     Loaded: loaded
     Active: active (plugged) since Tue 2025-12-09 16:02:15 UTC; 42min ago
      Until: Tue 2025-12-09 16:02:15 UTC; 42min ago
     Device: /sys/devices/virtual/block/loop5

● dev-disk-by\x2dlabel-config\x2d2.device - QEMU_DVD-ROM config-2
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:01.1-ata1-host0-target0:0:0-0:0:0:0-block-sr0.device
     Loaded: loaded
     Active: active (plugged) since Tue 2025-12-09 14:51:37 UTC; 1h 53min ago
      Until: Tue 2025-12-09 14:51:37 UTC; 1h 53min ago
     Device: /sys/devices/pci0000:00/0000:00:01.1/ata1/host0/target0:0:0/0:0:0:0/block/sr0

● dev-disk-by\x2dpartuuid-eedec978\x2d01.device - /dev/disk/by-partuuid/eedec978-01
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:04.0-virtio2-block-vda-vda1.device
     Loaded: loaded
     Active: active (plugged) since Tue 2025-12-09 14:51:37 UTC; 1h 53min ago
      Until: Tue 2025-12-09 14:51:37 UTC; 1h 53min ago
     Device: /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda/vda1

● dev-disk-by\x2dpath-pci\x2d0000:00:01.1\x2data\x2d1.0.device - QEMU_DVD-ROM config-2
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:01.1-ata1-host0-target0:0:0-0:0:0:0-block-sr0.device
     Loaded: loaded
     Active: active (plugged) since Tue 2025-12-09 14:51:37 UTC; 1h 53min ago
      Until: Tue 2025-12-09 14:51:37 UTC; 1h 53min ago
     Device: /sys/devices/pci0000:00/0000:00:01.1/ata1/host0/target0:0:0/0:0:0:0/block/sr0

● dev-disk-by\x2dpath-pci\x2d0000:00:01.1\x2data\x2d1.device - QEMU_DVD-ROM config-2
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:01.1-ata1-host0-target0:0:0-0:0:0:0-block-sr0.device
     Loaded: loaded
     Active: active (plugged) since Tue 2025-12-09 14:51:37 UTC; 1h 53min ago
      Until: Tue 2025-12-09 14:51:37 UTC; 1h 53min ago
     Device: /sys/devices/pci0000:00/0000:00:01.1/ata1/host0/target0:0:0/0:0:0:0/block/sr0

● dev-disk-by\x2dpath-pci\x2d0000:00:04.0.device - /dev/disk/by-path/pci-0000:00:04.0
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:04.0-virtio2-block-vda.device
     Loaded: loaded
     Active: active (plugged) since Tue 2025-12-09 14:51:37 UTC; 1h 53min ago
      Until: Tue 2025-12-09 14:51:37 UTC; 1h 53min ago
     Device: /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda

● dev-disk-by\x2dpath-pci\x2d0000:00:04.0\x2dpart1.device - /dev/disk/by-path/pci-0000:00:04.0-part1
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:04.0-virtio2-block-vda-vda1.device
     Loaded: loaded
     Active: active (plugged) since Tue 2025-12-09 14:51:37 UTC; 1h 53min ago
      Until: Tue 2025-12-09 14:51:37 UTC; 1h 53min ago
     Device: /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda/vda1

● dev-disk-by\x2dpath-virtio\x2dpci\x2d0000:00:04.0.device - /dev/disk/by-path/virtio-pci-0000:00:04.0
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:04.0-virtio2-block-vda.device
     Loaded: loaded
     Active: active (plugged) since Tue 2025-12-09 14:51:37 UTC; 1h 53min ago
      Until: Tue 2025-12-09 14:51:37 UTC; 1h 53min ago
     Device: /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda

● dev-disk-by\x2dpath-virtio\x2dpci\x2d0000:00:04.0\x2dpart1.device - /dev/disk/by-path/virtio-pci-0000:00:04.0-part1
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:04.0-virtio2-block-vda-vda1.device
     Loaded: loaded
     Active: active (plugged) since Tue 2025-12-09 14:51:37 UTC; 1h 53min ago
      Until: Tue 2025-12-09 14:51:37 UTC; 1h 53min ago
     Device: /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda/vda1

● dev-disk-by\x2duuid-2025\x2d12\x2d09\x2d14\x2d51\x2d23\x2d00.device - QEMU_DVD-ROM config-2
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:01.1-ata1-host0-target0:0:0-0:0:0:0-block-sr0.device
     Loaded: loaded
     Active: active (plugged) since Tue 2025-12-09 14:51:37 UTC; 1h 53min ago
      Until: Tue 2025-12-09 14:51:37 UTC; 1h 53min ago
     Device: /sys/devices/pci0000:00/0000:00:01.1/ata1/host0/target0:0:0/0:0:0:0/block/sr0

● dev-disk-by\x2duuid-fcf6b761\x2d831a\x2d48a7\x2d9f5f\x2d068b5063763f.device - /dev/disk/by-uuid/fcf6b761-831a-48a7-9f5f-068b5063763f
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:04.0-virtio2-block-vda-vda1.device
     Loaded: loaded
     Active: active (plugged) since Tue 2025-12-09 14:51:37 UTC; 1h 53min ago
      Until: Tue 2025-12-09 14:51:37 UTC; 1h 53min ago
     Device: /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda/vda1

Dec 09 14:51:35 localhost systemd[1]: Found device /dev/disk/by-uuid/fcf6b761-831a-48a7-9f5f-068b5063763f.

● dev-dm\x2d0.device - /dev/dm-0
    Follows: unit currently follows state of sys-devices-virtual-block-dm\x2d0.device
     Loaded: loaded
     Active: active (plugged) since Tue 2025-12-09 16:02:05 UTC; 42min ago
      Until: Tue 2025-12-09 16:02:05 UTC; 42min ago
     Device: /sys/devices/virtual/block/dm-0

● dev-dm\x2d1.device - /dev/dm-1
    Follows: unit currently follows state of sys-devices-virtual-block-dm\x2d1.device
     Loaded: loaded
     Active: active (plugged) since Tue 2025-12-09 16:02:10 UTC; 42min ago
      Until: Tue 2025-12-09 16:02:10 UTC; 42min ago
     Device: /sys/devices/virtual/block/dm-1

● dev-dm\x2d2.device - /dev/dm-2
    Follows: unit currently follows state of sys-devices-virtual-block-dm\x2d2.device
     Loaded: loaded
     Active: active (plugged) since Tue 2025-12-09 16:02:15 UTC; 42min ago
      Until: Tue 2025-12-09 16:02:15 UTC; 42min ago
     Device: /sys/devices/virtual/block/dm-2

● dev-loop3.device - /dev/loop3
    Follows: unit currently follows state of sys-devices-virtual-block-loop3.device
     Loaded: loaded
     Active: active (plugged) since Tue 2025-12-09 16:02:04 UTC; 42min ago
      Until: Tue 2025-12-09 16:02:04 UTC; 42min ago
     Device: /sys/devices/virtual/block/loop3

● dev-loop4.device - /dev/loop4
    Follows: unit currently follows state of sys-devices-virtual-block-loop4.device
     Loaded: loaded
     Active: active (plugged) since Tue 2025-12-09 16:02:10 UTC; 42min ago
      Until: Tue 2025-12-09 16:02:10 UTC; 42min ago
     Device: /sys/devices/virtual/block/loop4

● dev-loop5.device - /dev/loop5
    Follows: unit currently follows state of sys-devices-virtual-block-loop5.device
     Loaded: loaded
     Active: active (plugged) since Tue 2025-12-09 16:02:14 UTC; 42min ago
      Until: Tue 2025-12-09 16:02:14 UTC; 42min ago
     Device: /sys/devices/virtual/block/loop5

● dev-mapper-ceph_vg0\x2dceph_lv0.device - /dev/mapper/ceph_vg0-ceph_lv0
    Follows: unit currently follows state of sys-devices-virtual-block-dm\x2d0.device
     Loaded: loaded
     Active: active (plugged) since Tue 2025-12-09 16:02:05 UTC; 42min ago
      Until: Tue 2025-12-09 16:02:05 UTC; 42min ago
     Device: /sys/devices/virtual/block/dm-0

● dev-mapper-ceph_vg1\x2dceph_lv1.device - /dev/mapper/ceph_vg1-ceph_lv1
    Follows: unit currently follows state of sys-devices-virtual-block-dm\x2d1.device
     Loaded: loaded
     Active: active (plugged) since Tue 2025-12-09 16:02:10 UTC; 42min ago
      Until: Tue 2025-12-09 16:02:10 UTC; 42min ago
     Device: /sys/devices/virtual/block/dm-1

● dev-mapper-ceph_vg2\x2dceph_lv2.device - /dev/mapper/ceph_vg2-ceph_lv2
    Follows: unit currently follows state of sys-devices-virtual-block-dm\x2d2.device
     Loaded: loaded
     Active: active (plugged) since Tue 2025-12-09 16:02:15 UTC; 42min ago
      Until: Tue 2025-12-09 16:02:15 UTC; 42min ago
     Device: /sys/devices/virtual/block/dm-2

● dev-rfkill.device - /dev/rfkill
    Follows: unit currently follows state of sys-devices-virtual-misc-rfkill.device
     Loaded: loaded
     Active: active (plugged) since Tue 2025-12-09 14:51:38 UTC; 1h 53min ago
      Until: Tue 2025-12-09 14:51:38 UTC; 1h 53min ago
     Device: /sys/devices/virtual/misc/rfkill

● dev-sr0.device - QEMU_DVD-ROM config-2
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:01.1-ata1-host0-target0:0:0-0:0:0:0-block-sr0.device
     Loaded: loaded
     Active: active (plugged) since Tue 2025-12-09 14:51:37 UTC; 1h 53min ago
      Until: Tue 2025-12-09 14:51:37 UTC; 1h 53min ago
     Device: /sys/devices/pci0000:00/0000:00:01.1/ata1/host0/target0:0:0/0:0:0:0/block/sr0

● dev-ttyS0.device - /dev/ttyS0
    Follows: unit currently follows state of sys-devices-pnp0-00:00-tty-ttyS0.device
     Loaded: loaded
     Active: active (plugged) since Tue 2025-12-09 14:51:37 UTC; 1h 53min ago
      Until: Tue 2025-12-09 14:51:37 UTC; 1h 53min ago
     Device: /sys/devices/pnp0/00:00/tty/ttyS0

Dec 09 14:51:37 localhost systemd[1]: Condition check resulted in /dev/ttyS0 being skipped.

● dev-ttyS1.device - /dev/ttyS1
    Follows: unit currently follows state of sys-devices-platform-serial8250-tty-ttyS1.device
     Loaded: loaded
     Active: active (plugged) since Tue 2025-12-09 14:51:37 UTC; 1h 53min ago
      Until: Tue 2025-12-09 14:51:37 UTC; 1h 53min ago
     Device: /sys/devices/platform/serial8250/tty/ttyS1

● dev-ttyS2.device - /dev/ttyS2
    Follows: unit currently follows state of sys-devices-platform-serial8250-tty-ttyS2.device
     Loaded: loaded
     Active: active (plugged) since Tue 2025-12-09 14:51:37 UTC; 1h 53min ago
      Until: Tue 2025-12-09 14:51:37 UTC; 1h 53min ago
     Device: /sys/devices/platform/serial8250/tty/ttyS2

● dev-ttyS3.device - /dev/ttyS3
    Follows: unit currently follows state of sys-devices-platform-serial8250-tty-ttyS3.device
     Loaded: loaded
     Active: active (plugged) since Tue 2025-12-09 14:51:37 UTC; 1h 53min ago
      Until: Tue 2025-12-09 14:51:37 UTC; 1h 53min ago
     Device: /sys/devices/platform/serial8250/tty/ttyS3

● dev-vda.device - /dev/vda
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:04.0-virtio2-block-vda.device
     Loaded: loaded
     Active: active (plugged) since Tue 2025-12-09 14:51:37 UTC; 1h 53min ago
      Until: Tue 2025-12-09 14:51:37 UTC; 1h 53min ago
     Device: /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda

● dev-vda1.device - /dev/vda1
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:04.0-virtio2-block-vda-vda1.device
     Loaded: loaded
     Active: active (plugged) since Tue 2025-12-09 14:51:37 UTC; 1h 53min ago
      Until: Tue 2025-12-09 14:51:37 UTC; 1h 53min ago
     Device: /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda/vda1

● sys-devices-pci0000:00-0000:00:01.1-ata1-host0-target0:0:0-0:0:0:0-block-sr0.device - QEMU_DVD-ROM config-2
     Loaded: loaded
     Active: active (plugged) since Tue 2025-12-09 14:51:37 UTC; 1h 53min ago
      Until: Tue 2025-12-09 14:51:37 UTC; 1h 53min ago
     Device: /sys/devices/pci0000:00/0000:00:01.1/ata1/host0/target0:0:0/0:0:0:0/block/sr0

● sys-devices-pci0000:00-0000:00:03.0-virtio1-net-eth0.device - Virtio network device
     Loaded: loaded
     Active: active (plugged) since Tue 2025-12-09 14:51:37 UTC; 1h 53min ago
      Until: Tue 2025-12-09 14:51:37 UTC; 1h 53min ago
     Device: /sys/devices/pci0000:00/0000:00:03.0/virtio1/net/eth0

● sys-devices-pci0000:00-0000:00:04.0-virtio2-block-vda-vda1.device - /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda/vda1
     Loaded: loaded
     Active: active (plugged) since Tue 2025-12-09 14:51:37 UTC; 1h 53min ago
      Until: Tue 2025-12-09 14:51:37 UTC; 1h 53min ago
     Device: /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda/vda1

● sys-devices-pci0000:00-0000:00:04.0-virtio2-block-vda.device - /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda
     Loaded: loaded
     Active: active (plugged) since Tue 2025-12-09 14:51:37 UTC; 1h 53min ago
      Until: Tue 2025-12-09 14:51:37 UTC; 1h 53min ago
     Device: /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda

● sys-devices-pci0000:00-0000:00:07.0-virtio5-net-eth1.device - Virtio network device
     Loaded: loaded
     Active: active (plugged) since Tue 2025-12-09 15:29:33 UTC; 1h 15min ago
      Until: Tue 2025-12-09 15:29:33 UTC; 1h 15min ago
     Device: /sys/devices/pci0000:00/0000:00:07.0/virtio5/net/eth1

● sys-devices-platform-serial8250-tty-ttyS1.device - /sys/devices/platform/serial8250/tty/ttyS1
     Loaded: loaded
     Active: active (plugged) since Tue 2025-12-09 14:51:37 UTC; 1h 53min ago
      Until: Tue 2025-12-09 14:51:37 UTC; 1h 53min ago
     Device: /sys/devices/platform/serial8250/tty/ttyS1

● sys-devices-platform-serial8250-tty-ttyS2.device - /sys/devices/platform/serial8250/tty/ttyS2
     Loaded: loaded
     Active: active (plugged) since Tue 2025-12-09 14:51:37 UTC; 1h 53min ago
      Until: Tue 2025-12-09 14:51:37 UTC; 1h 53min ago
     Device: /sys/devices/platform/serial8250/tty/ttyS2

● sys-devices-platform-serial8250-tty-ttyS3.device - /sys/devices/platform/serial8250/tty/ttyS3
     Loaded: loaded
     Active: active (plugged) since Tue 2025-12-09 14:51:37 UTC; 1h 53min ago
      Until: Tue 2025-12-09 14:51:37 UTC; 1h 53min ago
     Device: /sys/devices/platform/serial8250/tty/ttyS3

● sys-devices-pnp0-00:00-tty-ttyS0.device - /sys/devices/pnp0/00:00/tty/ttyS0
     Loaded: loaded
     Active: active (plugged) since Tue 2025-12-09 14:51:37 UTC; 1h 53min ago
      Until: Tue 2025-12-09 14:51:37 UTC; 1h 53min ago
     Device: /sys/devices/pnp0/00:00/tty/ttyS0

● sys-devices-virtual-block-dm\x2d0.device - /sys/devices/virtual/block/dm-0
     Loaded: loaded
     Active: active (plugged) since Tue 2025-12-09 16:02:05 UTC; 42min ago
      Until: Tue 2025-12-09 16:02:05 UTC; 42min ago
     Device: /sys/devices/virtual/block/dm-0

● sys-devices-virtual-block-dm\x2d1.device - /sys/devices/virtual/block/dm-1
     Loaded: loaded
     Active: active (plugged) since Tue 2025-12-09 16:02:10 UTC; 42min ago
      Until: Tue 2025-12-09 16:02:10 UTC; 42min ago
     Device: /sys/devices/virtual/block/dm-1

● sys-devices-virtual-block-dm\x2d2.device - /sys/devices/virtual/block/dm-2
     Loaded: loaded
     Active: active (plugged) since Tue 2025-12-09 16:02:15 UTC; 42min ago
      Until: Tue 2025-12-09 16:02:15 UTC; 42min ago
     Device: /sys/devices/virtual/block/dm-2

● sys-devices-virtual-block-loop3.device - /sys/devices/virtual/block/loop3
     Loaded: loaded
     Active: active (plugged) since Tue 2025-12-09 16:02:04 UTC; 42min ago
      Until: Tue 2025-12-09 16:02:04 UTC; 42min ago
     Device: /sys/devices/virtual/block/loop3

● sys-devices-virtual-block-loop4.device - /sys/devices/virtual/block/loop4
     Loaded: loaded
     Active: active (plugged) since Tue 2025-12-09 16:02:10 UTC; 42min ago
      Until: Tue 2025-12-09 16:02:10 UTC; 42min ago
     Device: /sys/devices/virtual/block/loop4

● sys-devices-virtual-block-loop5.device - /sys/devices/virtual/block/loop5
     Loaded: loaded
     Active: active (plugged) since Tue 2025-12-09 16:02:14 UTC; 42min ago
      Until: Tue 2025-12-09 16:02:14 UTC; 42min ago
     Device: /sys/devices/virtual/block/loop5

● sys-devices-virtual-misc-rfkill.device - /sys/devices/virtual/misc/rfkill
     Loaded: loaded
     Active: active (plugged) since Tue 2025-12-09 14:51:38 UTC; 1h 53min ago
      Until: Tue 2025-12-09 14:51:38 UTC; 1h 53min ago
     Device: /sys/devices/virtual/misc/rfkill

● sys-devices-virtual-net-br\x2dex.device - /sys/devices/virtual/net/br-ex
     Loaded: loaded
     Active: active (plugged) since Tue 2025-12-09 15:59:04 UTC; 45min ago
      Until: Tue 2025-12-09 15:59:04 UTC; 45min ago
     Device: /sys/devices/virtual/net/br-ex

● sys-devices-virtual-net-br\x2dint.device - /sys/devices/virtual/net/br-int
     Loaded: loaded
     Active: active (plugged) since Tue 2025-12-09 16:13:10 UTC; 31min ago
      Until: Tue 2025-12-09 16:13:10 UTC; 31min ago
     Device: /sys/devices/virtual/net/br-int

● sys-devices-virtual-net-genev_sys_6081.device - /sys/devices/virtual/net/genev_sys_6081
     Loaded: loaded
     Active: active (plugged) since Tue 2025-12-09 16:13:10 UTC; 31min ago
      Until: Tue 2025-12-09 16:13:10 UTC; 31min ago
     Device: /sys/devices/virtual/net/genev_sys_6081

● sys-devices-virtual-net-ovs\x2dsystem.device - /sys/devices/virtual/net/ovs-system
     Loaded: loaded
     Active: active (plugged) since Tue 2025-12-09 15:59:04 UTC; 45min ago
      Until: Tue 2025-12-09 15:59:04 UTC; 45min ago
     Device: /sys/devices/virtual/net/ovs-system

● sys-devices-virtual-net-vlan20.device - /sys/devices/virtual/net/vlan20
     Loaded: loaded
     Active: active (plugged) since Tue 2025-12-09 15:59:04 UTC; 45min ago
      Until: Tue 2025-12-09 15:59:04 UTC; 45min ago
     Device: /sys/devices/virtual/net/vlan20

● sys-devices-virtual-net-vlan21.device - /sys/devices/virtual/net/vlan21
     Loaded: loaded
     Active: active (plugged) since Tue 2025-12-09 15:59:04 UTC; 45min ago
      Until: Tue 2025-12-09 15:59:04 UTC; 45min ago
     Device: /sys/devices/virtual/net/vlan21

● sys-devices-virtual-net-vlan22.device - /sys/devices/virtual/net/vlan22
     Loaded: loaded
     Active: active (plugged) since Tue 2025-12-09 15:59:04 UTC; 45min ago
      Until: Tue 2025-12-09 15:59:04 UTC; 45min ago
     Device: /sys/devices/virtual/net/vlan22

● sys-devices-virtual-net-vlan23.device - /sys/devices/virtual/net/vlan23
     Loaded: loaded
     Active: active (plugged) since Tue 2025-12-09 15:59:04 UTC; 45min ago
      Until: Tue 2025Unit boot.mount could not be found.
-12-09 15:59:04 UTC; 45min ago
     Device: /sys/devices/virtual/net/vlan23

● sys-module-configfs.device - /sys/module/configfs
     Loaded: loaded
     Active: active (plugged) since Tue 2025-12-09 14:51:37 UTC; 1h 53min ago
      Until: Tue 2025-12-09 14:51:37 UTC; 1h 53min ago
     Device: /sys/module/configfs

● sys-module-fuse.device - /sys/module/fuse
     Loaded: loaded
     Active: active (plugged) since Tue 2025-12-09 14:51:37 UTC; 1h 53min ago
      Until: Tue 2025-12-09 14:51:37 UTC; 1h 53min ago
     Device: /sys/module/fuse

● sys-subsystem-net-devices-br\x2dex.device - /sys/subsystem/net/devices/br-ex
     Loaded: loaded
     Active: active (plugged) since Tue 2025-12-09 15:59:04 UTC; 45min ago
      Until: Tue 2025-12-09 15:59:04 UTC; 45min ago
     Device: /sys/devices/virtual/net/br-ex

● sys-subsystem-net-devices-br\x2dint.device - /sys/subsystem/net/devices/br-int
     Loaded: loaded
     Active: active (plugged) since Tue 2025-12-09 16:13:10 UTC; 31min ago
      Until: Tue 2025-12-09 16:13:10 UTC; 31min ago
     Device: /sys/devices/virtual/net/br-int

● sys-subsystem-net-devices-eth0.device - Virtio network device
     Loaded: loaded
     Active: active (plugged) since Tue 2025-12-09 14:51:37 UTC; 1h 53min ago
      Until: Tue 2025-12-09 14:51:37 UTC; 1h 53min ago
     Device: /sys/devices/pci0000:00/0000:00:03.0/virtio1/net/eth0

● sys-subsystem-net-devices-eth1.device - Virtio network device
     Loaded: loaded
     Active: active (plugged) since Tue 2025-12-09 15:29:33 UTC; 1h 15min ago
      Until: Tue 2025-12-09 15:29:33 UTC; 1h 15min ago
     Device: /sys/devices/pci0000:00/0000:00:07.0/virtio5/net/eth1

● sys-subsystem-net-devices-genev_sys_6081.device - /sys/subsystem/net/devices/genev_sys_6081
     Loaded: loaded
     Active: active (plugged) since Tue 2025-12-09 16:13:10 UTC; 31min ago
      Until: Tue 2025-12-09 16:13:10 UTC; 31min ago
     Device: /sys/devices/virtual/net/genev_sys_6081

● sys-subsystem-net-devices-ovs\x2dsystem.device - /sys/subsystem/net/devices/ovs-system
     Loaded: loaded
     Active: active (plugged) since Tue 2025-12-09 15:59:04 UTC; 45min ago
      Until: Tue 2025-12-09 15:59:04 UTC; 45min ago
     Device: /sys/devices/virtual/net/ovs-system

● sys-subsystem-net-devices-vlan20.device - /sys/subsystem/net/devices/vlan20
     Loaded: loaded
     Active: active (plugged) since Tue 2025-12-09 15:59:04 UTC; 45min ago
      Until: Tue 2025-12-09 15:59:04 UTC; 45min ago
     Device: /sys/devices/virtual/net/vlan20

● sys-subsystem-net-devices-vlan21.device - /sys/subsystem/net/devices/vlan21
     Loaded: loaded
     Active: active (plugged) since Tue 2025-12-09 15:59:04 UTC; 45min ago
      Until: Tue 2025-12-09 15:59:04 UTC; 45min ago
     Device: /sys/devices/virtual/net/vlan21

● sys-subsystem-net-devices-vlan22.device - /sys/subsystem/net/devices/vlan22
     Loaded: loaded
     Active: active (plugged) since Tue 2025-12-09 15:59:04 UTC; 45min ago
      Until: Tue 2025-12-09 15:59:04 UTC; 45min ago
     Device: /sys/devices/virtual/net/vlan22

● sys-subsystem-net-devices-vlan23.device - /sys/subsystem/net/devices/vlan23
     Loaded: loaded
     Active: active (plugged) since Tue 2025-12-09 15:59:04 UTC; 45min ago
      Until: Tue 2025-12-09 15:59:04 UTC; 45min ago
     Device: /sys/devices/virtual/net/vlan23

● -.mount - Root Mount
     Loaded: loaded (/etc/fstab; generated)
     Active: active (mounted)
      Where: /
       What: /dev/vda1
       Docs: man:fstab(5)
             man:systemd-fstab-generator(8)

● dev-hugepages.mount - Huge Pages File System
     Loaded: loaded (/usr/lib/systemd/system/dev-hugepages.mount; static)
     Active: active (mounted) since Tue 2025-12-09 14:51:37 UTC; 1h 53min ago
      Until: Tue 2025-12-09 14:51:37 UTC; 1h 53min ago
      Where: /dev/hugepages
       What: hugetlbfs
       Docs: https://docs.kernel.org/admin-guide/mm/hugetlbpage.html
             https://www.freedesktop.org/wiki/Software/systemd/APIFileSystems
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48628)
     Memory: 52.0K (peUnit home.mount could not be found.
ak: 560.0K)
        CPU: 6ms
     CGroup: /dev-hugepages.mount

● dev-hugepages1G.mount - /dev/hugepages1G
     Loaded: loaded (/etc/fstab; generated)
     Active: active (mounted) since Tue 2025-12-09 16:01:06 UTC; 43min ago
      Until: Tue 2025-12-09 16:01:06 UTC; 43min ago
      Where: /dev/hugepages1G
       What: none
       Docs: man:fstab(5)
             man:systemd-fstab-generator(8)

● dev-hugepages2M.mount - /dev/hugepages2M
     Loaded: loaded (/etc/fstab; generated)
     Active: active (mounted) since Tue 2025-12-09 16:01:07 UTC; 43min ago
      Until: Tue 2025-12-09 16:01:07 UTC; 43min ago
      Where: /dev/hugepages2M
       What: none
       Docs: man:fstab(5)
             man:systemd-fstab-generator(8)

● dev-mqueue.mount - POSIX Message Queue File System
     Loaded: loaded (/usr/lib/systemd/system/dev-mqueue.mount; static)
     Active: active (mounted) since Tue 2025-12-09 14:51:37 UTC; 1h 53min ago
      Until: Tue 2025-12-09 14:51:37 UTC; 1h 53min ago
      Where: /dev/mqueue
       What: mqueue
       Docs: man:mq_overview(7)
             https://www.freedesktop.org/wiki/Software/systemd/APIFileSystems
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48628)
     Memory: 4.0K (peak: 540.0K)
        CPU: 3ms
     CGroup: /dev-mqueue.mount

Dec 09 14:51:37 localhost systemd[1]: Mounted POSIX Message Queue File System.

○ proc-fs-nfsd.mount - NFSD configuration filesystem
     Loaded: loaded (/usr/lib/systemd/system/proc-fs-nfsd.mount; static)
     Active: inactive (dead)
      Where: /proc/fs/nfsd
       What: nfsd

● proc-sys-fs-binfmt_misc.mount - Arbitrary Executable File Formats File System
     Loaded: loaded (/usr/lib/systemd/system/proc-sys-fs-binfmt_misc.mount; disabled; preset: disabled)
     Active: active (mounted) since Tue 2025-12-09 16:03:41 UTC; 41min ago
      Until: Tue 2025-12-09 16:03:41 UTC; 41min ago
TriggeredBy: ● proc-sys-fs-binfmt_misc.automount
      Where: /proc/sys/fs/binfmt_misc
       What: binfmt_misc
       Docs: https://docs.kernel.org/admin-guide/binfmt-misc.html
             https://www.freedesktop.org/wiki/Software/systemd/APIFileSystems
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48628)
     Memory: 8.0K (peak: 552.0K)
        CPU: 6ms
     CGroup: /proc-sys-fs-binfmt_misc.mount

Dec 09 16:03:41 compute-0 systemd[1]: Mounting Arbitrary Executable File Formats File System...
Dec 09 16:03:41 compute-0 systemd[1]: Mounted Arbitrary Executable File Formats File System.

● run-credentials-systemd\x2dsysctl.service.mount - /run/credentials/systemd-sysctl.service
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Tue 2025-12-09 15:57:56 UTC; 46min ago
      Until: Tue 2025-12-09 15:57:56 UTC; 46min ago
      Where: /run/credentials/systemd-sysctl.service
       What: none

● run-credentials-systemd\x2dsysusers.service.mount - /run/credentials/systemd-sysusers.service
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Tue 2025-12-09 14:51:37 UTC; 1h 53min ago
      Until: Tue 2025-12-09 14:51:37 UTC; 1h 53min ago
      Where: /run/credentials/systemd-sysusers.service
       What: none

● run-credentials-systemd\x2dtmpfiles\x2dsetup.service.mount - /run/credentials/systemd-tmpfiles-setup.service
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Tue 2025-12-09 14:51:37 UTC; 1h 53min ago
      Until: Tue 2025-12-09 14:51:37 UTC; 1h 53min ago
      Where: /run/credentials/systemd-tmpfiles-setup.service
       What: none

● run-credentials-systemd\x2dtmpfiles\x2dsetup\x2ddev.service.mount - /run/credentials/systemd-tmpfiles-setup-dev.service
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Tue 2025-12-09 14:51:37 UTC; 1h 53min ago
      Until: Tue 2025-12-09 14:51:37 UTC; 1h 53min ago
      Where: /run/credentials/systemd-tmpfiles-setup-dev.service
       What: none

● run-netns.mount - /run/netns
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Tue 2025-12-09 16:00:35 UTC; 44min ago
      Until: Unit sysroot.mount could not be found.
Tue 2025-12-09 16:00:35 UTC; 44min ago
      Where: /run/netns
       What: tmpfs

● run-user-1000.mount - /run/user/1000
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Tue 2025-12-09 15:27:58 UTC; 1h 16min ago
      Until: Tue 2025-12-09 15:27:58 UTC; 1h 16min ago
      Where: /run/user/1000
       What: tmpfs

● run-user-42477.mount - /run/user/42477
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Tue 2025-12-09 16:03:32 UTC; 41min ago
      Until: Tue 2025-12-09 16:03:32 UTC; 41min ago
      Where: /run/user/42477
       What: tmpfs

● sys-fs-fuse-connections.mount - FUSE Control File System
     Loaded: loaded (/usr/lib/systemd/system/sys-fs-fuse-connections.mount; static)
     Active: active (mounted) since Tue 2025-12-09 14:51:37 UTC; 1h 53min ago
      Until: Tue 2025-12-09 14:51:37 UTC; 1h 53min ago
      Where: /sys/fs/fuse/connections
       What: fusectl
       Docs: https://docs.kernel.org/filesystems/fuse.html
             https://www.freedesktop.org/wiki/Software/systemd/APIFileSystems
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48628)
     Memory: 4.0K (peak: 540.0K)
        CPU: 3ms
     CGroup: /sys-fs-fuse-connections.mount

Dec 09 14:51:37 localhost systemd[1]: Mounting FUSE Control File System...
Dec 09 14:51:37 localhost systemd[1]: Mounted FUSE Control File System.

● sys-kernel-config.mount - Kernel Configuration File System
     Loaded: loaded (/proc/self/mountinfo; static)
     Active: active (mounted) since Tue 2025-12-09 14:51:36 UTC; 1h 53min ago
      Until: Tue 2025-12-09 14:51:36 UTC; 1h 53min ago
      Where: /sys/kernel/config
       What: configfs
       Docs: https://docs.kernel.org/filesystems/configfs.html
             https://www.freedesktop.org/wiki/Software/systemd/APIFileSystems

● sys-kernel-debug-tracing.mount - /sys/kernel/debug/tracing
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Tue 2025-12-09 16:44:17 UTC; 38s ago
      Until: Tue 2025-12-09 16:44:17 UTC; 38s ago
      Where: /sys/kernel/debug/tracing
       What: tracefs

● sys-kernel-debug.mount - Kernel Debug File System
     Loaded: loaded (/usr/lib/systemd/system/sys-kernel-debug.mount; static)
     Active: active (mounted) since Tue 2025-12-09 14:51:37 UTC; 1h 53min ago
      Until: Tue 2025-12-09 14:51:37 UTC; 1h 53min ago
      Where: /sys/kernel/debug
       What: debugfs
       Docs: https://docs.kernel.org/filesystems/debugfs.html
             https://www.freedesktop.org/wiki/Software/systemd/APIFileSystems
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48628)
     Memory: 4.0K (peak: 540.0K)
        CPU: 3ms
     CGroup: /sys-kernel-debug.mount

Dec 09 14:51:37 localhost systemd[1]: Mounted Kernel Debug File System.

● sys-kernel-tracing.mount - Kernel Trace File System
     Loaded: loaded (/usr/lib/systemd/system/sys-kernel-tracing.mount; static)
     Active: active (mounted) since Tue 2025-12-09 14:51:37 UTC; 1h 53min ago
      Until: Tue 2025-12-09 14:51:37 UTC; 1h 53min ago
      Where: /sys/kernel/tracing
       What: tracefs
       Docs: https://docs.kernel.org/trace/ftrace.html
             https://www.freedesktop.org/wiki/Software/systemd/APIFileSystems
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48628)
     Memory: 4.0K (peak: 540.0K)
        CPU: 2ms
     CGroup: /sys-kernel-tracing.mount

Dec 09 14:51:37 localhost systemd[1]: Mounted Kernel Trace File System.

○ tmp.mount - Temporary Directory /tmp
     Loaded: loaded (/usr/lib/systemd/system/tmp.mount; disabled; preset: disabled)
     Active: inactive (dead)
      Where: /tmp
       What: tmpfs
       Docs: https://systemd.io/TEMPORARY_DIRECTORIES
             man:file-hierarchy(7)
             https://www.freedesktop.org/wiki/Software/systemd/APIFileSystems

● var-lib-containers-storage-overlay-0425d596bb7daad543f9d5011c58c09bcb5d964e2cdbbdbfcaf1ccef172b7cf6-merged.mount - /var/lib/containers/storage/overlay/0425d596bb7daad543f9d5011c58c09bcb5d964e2cdbbdbfcaf1ccef172b7cf6/merged
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Tue 2025-12-09 16:21:49 UTC; 23min ago
      Until: Tue 2025-12-09 16:21:49 UTC; 23min ago
      Where: /var/lib/containers/storage/overlay/0425d596bb7daad543f9d5011c58c09bcb5d964e2cdbbdbfcaf1ccef172b7cf6/merged
       What: overlay

● var-lib-containers-storage-overlay-14a8b433bff4c349f902ba21e1dfd966e575379a119782d3abbca749ebd42cd4-merged.mount - /var/lib/containers/storage/overlay/14a8b433bff4c349f902ba21e1dfd966e575379a119782d3abbca749ebd42cd4/merged
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Tue 2025-12-09 16:20:03 UTC; 24min ago
      Until: Tue 2025-12-09 16:20:03 UTC; 24min ago
      Where: /var/lib/containers/storage/overlay/14a8b433bff4c349f902ba21e1dfd966e575379a119782d3abbca749ebd42cd4/merged
       What: overlay

● var-lib-containers-storage-overlay-34790271d93ab40782c77266edb9237153122730b6bcd4d08ea44b0952275156-merged.mount - /var/lib/containers/storage/overlay/34790271d93ab40782c77266edb9237153122730b6bcd4d08ea44b0952275156/merged
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Tue 2025-12-09 16:05:06 UTC; 39min ago
      Until: Tue 2025-12-09 16:05:06 UTC; 39min ago
      Where: /var/lib/containers/storage/overlay/34790271d93ab40782c77266edb9237153122730b6bcd4d08ea44b0952275156/merged
       What: overlay

● var-lib-containers-storage-overlay-3842b0bbc551dc8639734ff18d73578e37839cd9d0ce8e86f7ac3e0dac9f749c-merged.mount - /var/lib/containers/storage/overlay/3842b0bbc551dc8639734ff18d73578e37839cd9d0ce8e86f7ac3e0dac9f749c/merged
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Tue 2025-12-09 16:14:15 UTC; 30min ago
      Until: Tue 2025-12-09 16:14:15 UTC; 30min ago
      Where: /var/lib/containers/storage/overlay/3842b0bbc551dc8639734ff18d73578e37839cd9d0ce8e86f7ac3e0dac9f749c/merged
       What: overlay

● var-lib-containers-storage-overlay-8596c538902bc4535dff82b7a8a22981e72d70123b2e43b209e873dedc5199d1-merged.mount - /var/lib/containers/storage/overlay/8596c538902bc4535dff82b7a8a22981e72d70123b2e43b209e873dedc5199d1/merged
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Tue 2025-12-09 16:04:16 UTC; 40min ago
      Until: Tue 2025-12-09 16:04:16 UTC; 40min ago
      Where: /var/lib/containers/storage/overlay/8596c538902bc4535dff82b7a8a22981e72d70123b2e43b209e873dedc5199d1/merged
       What: overlay

● var-lib-containers-storage-overlay-899f00770b450d8fd8d68af8f0fb6aa81333156e58e00294e9f2d7080ad8954c-merged.mount - /var/lib/containers/storage/overlay/899f00770b450d8fd8d68af8f0fb6aa81333156e58e00294e9f2d7080ad8954c/merged
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Tue 2025-12-09 16:03:04 UTC; 41min ago
      Until: Tue 2025-12-09 16:03:04 UTC; 41min ago
      Where: /var/lib/containers/storage/overlay/899f00770b450d8fd8d68af8f0fb6aa81333156e58e00294e9f2d7080ad8954c/merged
       What: overlay

● var-lib-containers-storage-overlay-9c8a83efe91097139ff8420d1b99fa7334a2b40159947443bc0b52fb0b956608-merged.mount - /var/lib/containers/storage/overlay/9c8a83efe91097139ff8420d1b99fa7334a2b40159947443bc0b52fb0b956608/merged
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Tue 2025-12-09 16:13:10 UTC; 31min ago
      Until: Tue 2025-12-09 16:13:10 UTC; 31min ago
      Where: /var/lib/containers/storage/overlay/9c8a83efe91097139ff8420d1b99fa7334a2b40159947443bc0b52fb0b956608/merged
       What: overlay

● var-lib-containers-storage-overlay-b0cc9fed9fbae5f5db81501d3394a8a9c2fa7822b9143e14331fd1f86760c7be-merged.mount - /var/lib/containers/storage/overlay/b0cc9fed9fbae5f5db81501d3394a8a9c2fa7822b9143e14331fd1f86760c7be/merged
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Tue 2025-12-09 16:04:12 UTC; 40min ago
      Until: Tue 2025-12-09 16:04:12 UTC; 40min ago
      Where: /var/lib/containers/storage/overlay/b0cc9fed9fbae5f5db81501d3394a8a9c2fa7822b9143e14331fd1f86760c7be/merged
       What: overlay

● var-lib-containers-storage-overlay-b42024003d38bb02feb1addc572909e5dfad393f2a600bf3d26bf6bef64b57da-merged.mount - /var/lib/containers/storage/overlay/b42024003d38bb02feb1addc572909e5dfad393f2a600bf3d26bf6bef64b57da/merged
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Tue 2025-12-09 16:05:08 UTC; 39min ago
      Until: Tue 2025-12-09 16:05:08 UTC; 39min ago
      Where: /var/lib/containers/storage/overlay/b42024003d38bb02feb1addc572909e5dfad393f2a600bf3d26bf6bef64b57da/merged
       What: overlay

● var-lib-containers-storage-overlay-e6642f7dd12681e1bf5c7fb0ded327a91c47ff4d7980f5ce1bae6614248b368c-merged.mount - /var/lib/containers/storage/overlay/e6642f7dd12681e1bf5c7fb0ded327a91c47ff4d7980f5ce1bae6614248b368c/merged
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Tue 2025-12-09 16:04:21 UTC; 40min ago
      Until: Tue 2025-12-09 16:04:21 UTC; 40min ago
      Where: /var/lib/containers/storage/overlay/e6642f7dd12681e1bf5c7fb0ded327a91c47ff4d7980f5ce1bae6614248b368c/merged
       What: overlay

● var-lib-containers-storage-overlay-f06d8f96dc8caece02c3c26d657a19a110817749d0b00df484983688552a82b0-merged.mount - /var/lib/containers/storage/overlay/f06d8f96dc8caece02c3c26d657a19a110817749d0b00df484983688552a82b0/merged
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Tue 2025-12-09 16:03:48 UTC; 41min ago
      Until: Tue 2025-12-09 16:03:48 UTC; 41min ago
      Where: /var/lib/containers/storage/overlay/f06d8f96dc8caece02c3c26d657a19a110817749d0b00df484983688552a82b0/merged
       What: overlay

● var-lib-containers-storage-overlay-f37c0497699fb8f415b55c05c3051410023af17b3b8cbf5af0fa2e2206b776b2-merged.mount - /var/lib/containers/storage/overlay/f37c0497699fb8f415b55c05c3051410023af17b3b8cbf5af0fa2e2206b776b2/merged
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Tue 2025-12-09 16:03:06 UTC; 41min ago
      Until: Tue 2025-12-09 16:03:06 UTC; 41min ago
      Where: /var/lib/containers/storage/overlay/f37c0497699fb8f415b55c05c3051410023af17b3b8cbf5af0fa2e2206b776b2/merged
       What: overlay

● var-lib-containers-storage-overlay.mount - /var/lib/containers/storage/overlay
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Tue 2025-12-09 16:03:04 UTC; 41min ago
      Until: Tue 2025-12-09 16:03:04 UTC; 41min ago
      Where: /var/lib/containers/storage/overlay
       What: /dev/vda1

● var-lib-containers-storage-overlay\x2dcontainers-0a76737b5f2b25872fe2b565002617e1a84450afb10d979302e98619d30d6470-userdata-shm.mount - /var/lib/containers/storage/overlay-containers/0a76737b5f2b25872fe2b565002617e1a84450afb10d979302e98619d30d6470/userdata/shm
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Tue 2025-12-09 16:13:10 UTC; 31min ago
      Until: Tue 2025-12-09 16:13:10 UTC; 31min ago
      Where: /var/lib/containers/storage/overlay-containers/0a76737b5f2b25872fe2b565002617e1a84450afb10d979302e98619d30d6470/userdata/shm
       What: shm

● var-lib-containers-storage-overlay\x2dcontainers-84d676632bb4f080a989766472a2cc2fcf267803021f3bcdf51f0d8bfc9055e2-userdata-shm.mount - /var/lib/containers/storage/overlay-containers/84d676632bb4f080a989766472a2cc2fcf267803021f3bcdf51f0d8bfc9055e2/userdata/shm
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Tue 2025-12-09 16:20:03 UTC; 24min ago
      Until: Tue 2025-12-09 16:20:03 UTC; 24min ago
      Where: /var/lib/containers/storage/overlay-containers/84d676632bb4f080a989766472a2cc2fcf267803021f3bcdf51f0d8bfc9055e2/userdata/shm
       What: shm

● var-lib-containers-storage-overlay\x2dcontainers-9f2fa752ba80f2edb6a6ed5e7e6142147c4b695355174f47652b45554431a9ce-userdata-shm.mount - /var/lib/containers/storage/overlay-containers/9f2fa752ba80f2edb6a6ed5e7e6142147c4b695355174f47652b45554431a9ce/userdata/shm
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Tue 2025-12-09 16:21:49 UTC; 23min ago
      Until: Tue 2025-12-09 16:21:49 UTC; 23min ago
      Where: /var/lib/containers/storage/overlay-containers/9f2fa752ba80f2edb6a6ed5e7e6142147c4b695355174f47652b45554431a9ce/userdata/shm
       What: shm

● var-lib-containers-storage-overlay\x2dcontainers-fbe906a4b60641b0d2bc026295092273d9a8b9783389304fc11b10bffae9c692-userdata-shm.mount - /var/lib/containers/storage/overlay-containers/fbe906a4b60641b0d2bc026295092273d9a8b9783389304fc11b10bffae9c692/userdata/shm
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Tue 2025-12-09 16:14:15 UTC; 30min ago
      Until: Tue 2025-12-09 16:14:15 UTC; 30min ago
      Where: /var/lib/containers/storage/overlay-containers/fbe906a4b60641b0d2bc026295092273d9a8b9783389304fc11b10bffae9c692/userdata/shm
       What: shm

○ var-lib-machines.mount - Virtual Machine and Container Storage (Compatibility)
     Loaded: loaded (/usr/lib/systemd/system/var-lib-machines.mount; static)
     Active: inactive (dead)
  Condition: start condition failed at Tue 2025-12-09 16:18:06 UTC; 26min ago
      Where: /var/lib/machines
       What: /var/lib/machines.raw

Dec 09 16:18:06 compute-0 systemd[1]: Virtual Machine and Container Storage (Compatibility) was skipped because of an unmet condition check (ConditionPathExists=/var/lib/machines.raw).

● var-lib-nfs-rpc_pipefs.mount - RPC Pipe File System
     Loaded: loaded (/proc/self/mountinfo; static)
     Active: active (mounted) since Tue 2025-12-09 14:51:36 UTC; 1h 53min ago
      Until: Tue 2025-12-09 14:51:36 UTC; 1h 53min ago
      Where: /var/lib/nfs/rpc_pipefs
       What: rpc_pipefs

● systemd-ask-password-console.path - Dispatch Password Requests to Console Directory Watch
     Loaded: loaded (/usr/lib/systemd/system/systemd-ask-password-console.path; static)
     Active: active (waiting) since Tue 2025-12-09 14:51:36 UTC; 1h 53min ago
      Until: Tue 2025-12-09 14:51:36 UTC; 1h 53min ago
   Triggers: ● systemd-ask-password-console.service
       Docs: man:systemd-ask-password-console.path(8)

● systemd-ask-password-wall.path - Forward Password Requests to Wall Directory Watch
     Loaded: loaded (/usr/lib/systemd/system/systemd-ask-password-wall.path; static)
     Active: active (waiting) since Tue 2025-12-09 14:51:36 UTC; 1h 53min ago
      Until: Tue 2025-12-09 14:51:36 UTC; 1h 53min ago
   Triggers: ● systemd-ask-password-wall.service
       Docs: man:systemd-ask-password-wall.path(8)

● init.scope - System and Service Manager
     Loaded: loaded
  Transient: yes
     Active: active (running) since Tue 2025-12-09 14:51:34 UTC; 1h 53min ago
       Docs: man:systemd(1)
         IO: 2.9M read, 0B written
      Tasks: 1 (limit: 48628)
     Memory: 48.5M (peak: 67.2M)
        CPU: 1min 11.849s
     CGroup: /init.scope
             └─1 /usr/lib/systemd/systemd --switched-root --system --deserialize 31

Dec 09 16:43:54 compute-0 systemd[1]: var-lib-containers-storage-overlay-e311a7d7bf4a26b3f527aa768cefdd08366db4150db657fb872991a4ab6cd54d-merged.mount: Deactivated successfully.
Dec 09 16:43:54 compute-0 systemd[1]: libpod-conmon-f70b248c28c80787fcef6dd50e7c2036522011ca5122e5c21838cb4f3c187701.scope: Deactivated successfully.
Dec 09 16:44:09 compute-0 systemd[1]: Started Session 56 of User zuul.
Dec 09 16:44:38 compute-0 systemd[1]: Starting Hostname Service...
Dec 09 16:44:38 compute-0 systemd[1]: Started Hostname Service.
Dec 09 16:44:55 compute-0 systemd[1]: Started libpod-conmon-6de51581d8e77a1bc4dbf7916fdfe9005c1866ceba3ee9b9582f2907611c6fda.scope.
Dec 09 16:44:55 compute-0 systemd[1]: Started libcrun container.
Dec 09 16:44:55 compute-0 systemd[1]: libpod-6de51581d8e77a1bc4dbf7916fdfe9005c1866ceba3ee9b9582f2907611c6fda.scope: Deactivated successfully.
Dec 09 16:44:55 compute-0 systemd[1]: var-lib-containers-storage-overlay-9d2474e97d2f6ca8d7311d97cac788396ef7bcc51bb073801856fcc9ff22e173-merged.mount: Deactivated successfully.
Dec 09 16:44:55 compute-0 systemd[1]: libpod-conmon-6de51581d8e77a1bc4dbf7916fdfe9005c1866ceba3ee9b9582f2907611c6fda.scope: Deactivated successfully.

● libpod-0a76737b5f2b25872fe2b565002617e1a84450afb10d979302e98619d30d6470.scope - libcrun container
     Loaded: loaded (/run/systemd/transient/libpod-0a76737b5f2b25872fe2b565002617e1a84450afb10d979302e98619d30d6470.scope; transient)
  Transient: yes
    Drop-In: /run/systemd/transient/libpod-0a76737b5f2b25872fe2b565002617e1a84450afb10d979302e98619d30d6470.scope.d
             └─dep.conf
     Active: active (running) since Tue 2025-12-09 16:13:10 UTC; 31min ago
         IO: 9.8M read, 4.0K written
      Tasks: 6 (limit: 4096)
     Memory: 16.7M (peak: 19.4M)
        CPU: 2.944s
     CGroup: /machine.slice/libpod-0a76737b5f2b25872fe2b565002617e1a84450afb10d979302e98619d30d6470.scope
             └─container
               ├─145423 dumb-init --single-child -- kolla_start
               └─145426 /usr/bin/ovn-controller --pidfile unix:/run/openvswitch/db.sock -p /etc/pki/tls/private/ovndb.key -c /etc/pki/tls/certs/ovndb.crt -C /etc/pki/tls/certs/ovndbca.crt

Dec 09 16:13:10 compute-0 systemd[1]: Started libcrun container.

● libpod-84d676632bb4f080a989766472a2cc2fcf267803021f3bcdf51f0d8bfc9055e2.scope - libcrun container
     Loaded: loaded (/run/systemd/transient/libpod-84d676632bb4f080a989766472a2cc2fcf267803021f3bcdf51f0d8bfc9055e2.scope; transient)
  Transient: yes
     Active: active (running) since Tue 2025-12-09 16:20:03 UTC; 24min ago
         IO: 96.0K read, 4.0K written
      Tasks: 8 (limit: 4096)
     Memory: 19.1M (peak: 21.1M)
        CPU: 1.044s
     CGroup: /machine.slice/libpod-84d676632bb4f080a989766472a2cc2fcf267803021f3bcdf51f0d8bfc9055e2.scope
             └─container
               ├─225740 dumb-init --single-child -- kolla_start
               └─225743 /usr/sbin/multipathd -d

Dec 09 16:20:03 compute-0 systemd[1]: Started libcrun container.
Dec 09 16:20:03 compute-0 sudo[225744]:     root : PWD=/ ; USER=root ; COMMAND=/usr/local/bin/kolla_set_configs
Dec 09 16:20:03 compute-0 sudo[225744]: pam_systemd(sudo:session): Failed to connect to system bus: No such file or directory
Dec 09 16:20:03 compute-0 sudo[225744]: pam_unix(sudo:session): session opened for user root(uid=0) by (uid=0)
Dec 09 16:20:03 compute-0 sudo[225744]: pam_unix(sudo:session): session closed for user root
Dec 09 16:20:03 compute-0 sudo[225776]:     root : PWD=/ ; USER=root ; COMMAND=/usr/local/bin/kolla_copy_cacerts
Dec 09 16:20:03 compute-0 sudo[225776]: pam_systemd(sudo:session): Failed to connect to system bus: No such file or directory
Dec 09 16:20:03 compute-0 sudo[225776]: pam_unix(sudo:session): session opened for user root(uid=0) by (uid=0)
Dec 09 16:20:03 compute-0 sudo[225776]: pam_unix(sudo:session): session closed for user root

● libpod-9f2fa752ba80f2edb6a6ed5e7e6142147c4b695355174f47652b45554431a9ce.scope - libcrun container
     Loaded: loaded (/run/systemd/transient/libpod-9f2fa752ba80f2edb6a6ed5e7e6142147c4b695355174f47652b45554431a9ce.scope; transient)
  Transient: yes
     Active: active (running) since Tue 2025-12-09 16:21:49 UTC; 23min ago
         IO: 0B read, 36.0K written
      Tasks: 23 (limit: 4096)
     Memory: 137.2M (peak: 170.2M)
        CPU: 34.303s
     CGroup: /machine.slice/libpod-9f2fa752ba80f2edb6a6ed5e7e6142147c4b695355174f47652b45554431a9ce.scope
             └─container
               ├─243454 dumb-init --single-child -- kolla_start
               └─243461 /usr/bin/python3 /usr/bin/nova-compute

Dec 09 16:21:49 compute-0 systemd[1]: Started libcrun container.

● libpod-fbe906a4b60641b0d2bc026295092273d9a8b9783389304fc11b10bffae9c692.scope - libcrun container
     Loaded: loaded (/run/systemd/transient/libpod-fbe906a4b60641b0d2bc026295092273d9a8b9783389304fc11b10bffae9c692.scope; transient)
  Transient: yes
    Drop-In: /run/systemd/transient/libpod-fbe906a4b60641b0d2bc026295092273d9a8b9783389304fc11b10bffae9c692.scope.d
             └─dep.conf
     Active: active (running) since Tue 2025-12-09 16:14:15 UTC; 30min ago
         IO: 11.8M read, 220.0K written
      Tasks: 5 (limit: 4096)
     Memory: 234.4M (peak: 236.1M)
        CPU: 5.952s
     CGroup: /machine.slice/libpod-fbe906a4b60641b0d2bc026295092273d9a8b9783389304fc11b10bffae9c692.scope
             └─container
               ├─155088 dumb-init --single-child -- kolla_start
               ├─155091 "neutron-ovn-metadata-agent (/usr/bin/python3 /usr/bin/neutron-ovn-metadata-agent)"
               ├─155210 "neutron-ovn-metadata-agent (/usr/bin/python3 /usr/bin/neutron-ovn-metadata-agent)"
               └─155215 /usr/bin/python3 /bin/privsep-helper --config-file /etc/neutron/neutron.conf --config-dir /etc/neutron.conf.d --privsep_context neutron.privileged.namespace_cmd --privsep_sock_path /tmp/tmp2hzlk811/privsep.sock

Dec 09 16:14:15 compute-0 systemd[1]: Started libcrun container.

● session-1.scope - Session 1 of User zuul
     Loaded: loaded (/run/systemd/transient/session-1.scope; transient)
  Transient: yes
     Active: active (abandoned) since Tue 2025-12-09 15:27:59 UTC; 1h 16min ago
         IO: 0B read, 0B written
      Tasks: 1
     Memory: 17.2M (peak: 39.8M)
        CPU: 1min 27.630s
     CGroup: /user.slice/user-1000.slice/session-1.scope
             └─4551 /usr/bin/python3

Dec 09 15:29:45 np0005552052.novalocal python3[7169]: ansible-ansible.legacy.copy Invoked with src=/home/zuul/.ansible/tmp/ansible-tmp-1765294184.4033675-102-224505552608258/source dest=/etc/NetworkManager/system-connections/ci-private-network.nmconnection mode=0600 owner=root group=root follow=False _original_basename=bootstrap-ci-network-nm-connection.nmconnection.j2 checksum=0d36724261da39a94482e744a08ff1edc04b67ea backup=False force=True unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None remote_src=None local_follow=None seuser=None serole=None selevel=None setype=None attributes=None
Dec 09 15:29:45 np0005552052.novalocal sudo[7167]: pam_unix(sudo:session): session closed for user root
Dec 09 15:29:45 np0005552052.novalocal sudo[7217]:     zuul : PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-grmxzwpsgpxqzcjxjzyfigkxmpagzajz ; OS_CLOUD=vexxhost /usr/bin/python3'
Dec 09 15:29:45 np0005552052.novalocal sudo[7217]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Dec 09 15:29:45 np0005552052.novalocal python3[7219]: ansible-ansible.builtin.systemd Invoked with name=NetworkManager state=restarted daemon_reload=False daemon_reexec=False scope=system no_block=False enabled=None force=None masked=None
Dec 09 15:29:46 np0005552052.novalocal sudo[7217]: pam_unix(sudo:session): session closed for user root
Dec 09 15:29:47 np0005552052.novalocal python3[7286]: ansible-ansible.legacy.command Invoked with _raw_params=ip route zuul_log_id=fa163ef9-e89a-b07d-56ca-0000000000a7-0-controller zuul_ansible_split_streams=False _uses_shell=False warn=False stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None
Dec 09 15:30:47 np0005552052.novalocal sshd-session[4350]: Received disconnect from 38.102.83.114 port 48740:11: disconnected by user
Dec 09 15:30:47 np0005552052.novalocal sshd-session[4350]: Disconnected from user zuul 38.102.83.114 port 48740
Dec 09 15:30:47 np0005552052.novalocal sshd-session[4335]: pam_unix(sshd:session): session closed for user zuul

● session-21.scope - Session 21 of User ceph-admin
     Loaded: loaded (/run/systemd/transient/session-21.scope; transient)
  Transient: yes
     Active: active (running) since Tue 2025-12-09 16:03:32 UTC; 41min ago
         IO: 0B read, 0B written
      Tasks: 2
     Memory: 1.1M (peak: 1.3M)
        CPU: 68ms
     CGroup: /user.slice/user-42477.slice/session-21.scope
             ├─76647 "sshd-session: ceph-admin [priv]"
             └─76670 "sshd-session: ceph-admin"

Dec 09 16:03:32 compute-0 systemd[1]: Started Session 21 of User ceph-admin.

● session-23.scope - Session 23 of User ceph-admin
     Loaded: loaded (/run/systemd/transient/session-23.scope; transient)
  Transient: yes
     Active: active (running) since Tue 2025-12-09 16:03:32 UTC; 41min ago
         IO: 0B read, 0B written
      Tasks: 2
     Memory: 1.1M (peak: 4.1M)
        CPU: 151ms
     CGroup: /user.slice/user-42477.slice/session-23.scope
             ├─76665 "sshd-session: ceph-admin [priv]"
             └─76671 "sshd-session: ceph-admin@notty"

Dec 09 16:03:32 compute-0 systemd[1]: Started Session 23 of User ceph-admin.
Dec 09 16:03:32 compute-0 sudo[76672]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Dec 09 16:03:32 compute-0 sudo[76672]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Dec 09 16:03:32 compute-0 sudo[76672]: pam_unix(sudo:session): session closed for user root

● session-24.scope - Session 24 of User ceph-admin
     Loaded: loaded (/run/systemd/transient/session-24.scope; transient)
  Transient: yes
     Active: active (running) since Tue 2025-12-09 16:03:32 UTC; 41min ago
         IO: 0B read, 0B written
      Tasks: 2
     Memory: 1.1M (peak: 5.9M)
        CPU: 169ms
     CGroup: /user.slice/user-42477.slice/session-24.scope
             ├─76697 "sshd-session: ceph-admin [priv]"
             └─76700 "sshd-session: ceph-admin@notty"

Dec 09 16:03:32 compute-0 systemd[1]: Started Session 24 of User ceph-admin.
Dec 09 16:03:32 compute-0 sudo[76701]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/67f67f44-54fc-54ea-8df0-10931b6ecdaf/cephadm.ed5a13ad26f7f55dd30e9b63855e4e581fd86973bec1d21a12ed0bb26af19c8b --timeout 895 check-host --expect-hostname compute-0
Dec 09 16:03:32 compute-0 sudo[76701]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Dec 09 16:03:32 compute-0 sudo[76701]: pam_unix(sudo:session): session closed for user root

● session-25.scope - Session 25 of User ceph-admin
     Loaded: loaded (/run/systemd/transient/session-25.scope; transient)
  Transient: yes
     Active: active (running) since Tue 2025-12-09 16:03:33 UTC; 41min ago
         IO: 0B read, 0B written
      Tasks: 2
     Memory: 1.1M (peak: 4.1M)
        CPU: 129ms
     CGroup: /user.slice/user-42477.slice/session-25.scope
             ├─76726 "sshd-session: ceph-admin [priv]"
             └─76729 "sshd-session: ceph-admin@notty"

Dec 09 16:03:33 compute-0 systemd[1]: Started Session 25 of User ceph-admin.
Dec 09 16:03:33 compute-0 sudo[76730]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/ls /var/lib/ceph/67f67f44-54fc-54ea-8df0-10931b6ecdaf/cephadm.ed5a13ad26f7f55dd30e9b63855e4e581fd86973bec1d21a12ed0bb26af19c8b
Dec 09 16:03:33 compute-0 sudo[76730]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Dec 09 16:03:33 compute-0 sudo[76730]: pam_unix(sudo:session): session closed for user root

● session-26.scope - Session 26 of User ceph-admin
     Loaded: loaded (/run/systemd/transient/session-26.scope; transient)
  Transient: yes
     Active: active (running) since Tue 2025-12-09 16:03:33 UTC; 41min ago
         IO: 0B read, 0B written
      Tasks: 2
     Memory: 1.1M (peak: 4.0M)
        CPU: 151ms
     CGroup: /user.slice/user-42477.slice/session-26.scope
             ├─76755 "sshd-session: ceph-admin [priv]"
             └─76758 "sshd-session: ceph-admin@notty"

Dec 09 16:03:33 compute-0 systemd[1]: Started Session 26 of User ceph-admin.
Dec 09 16:03:33 compute-0 sudo[76759]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/mkdir -p /var/lib/ceph/67f67f44-54fc-54ea-8df0-10931b6ecdaf
Dec 09 16:03:33 compute-0 sudo[76759]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Dec 09 16:03:33 compute-0 sudo[76759]: pam_unix(sudo:session): session closed for user root

● session-27.scope - Session 27 of User ceph-admin
     Loaded: loaded (/run/systemd/transient/session-27.scope; transient)
  Transient: yes
     Active: active (running) since Tue 2025-12-09 16:03:33 UTC; 41min ago
         IO: 0B read, 0B written
      Tasks: 2
     Memory: 1.1M (peak: 3.9M)
        CPU: 145ms
     CGroup: /user.slice/user-42477.slice/session-27.scope
             ├─76784 "sshd-session: ceph-admin [priv]"
             └─76787 "sshd-session: ceph-admin@notty"

Dec 09 16:03:33 compute-0 systemd[1]: Started Session 27 of User ceph-admin.
Dec 09 16:03:34 compute-0 sudo[76788]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/mkdir -p /tmp/cephadm-67f67f44-54fc-54ea-8df0-10931b6ecdaf/var/lib/ceph/67f67f44-54fc-54ea-8df0-10931b6ecdaf
Dec 09 16:03:34 compute-0 sudo[76788]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Dec 09 16:03:34 compute-0 sudo[76788]: pam_unix(sudo:session): session closed for user root

● session-28.scope - Session 28 of User ceph-admin
     Loaded: loaded (/run/systemd/transient/session-28.scope; transient)
  Transient: yes
     Active: active (running) since Tue 2025-12-09 16:03:34 UTC; 41min ago
         IO: 0B read, 0B written
      Tasks: 2
     Memory: 1.1M (peak: 4.3M)
        CPU: 153ms
     CGroup: /user.slice/user-42477.slice/session-28.scope
             ├─76813 "sshd-session: ceph-admin [priv]"
             └─76816 "sshd-session: ceph-admin@notty"

Dec 09 16:03:34 compute-0 systemd[1]: Started Session 28 of User ceph-admin.
Dec 09 16:03:34 compute-0 sudo[76817]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/touch /tmp/cephadm-67f67f44-54fc-54ea-8df0-10931b6ecdaf/var/lib/ceph/67f67f44-54fc-54ea-8df0-10931b6ecdaf/cephadm.ed5a13ad26f7f55dd30e9b63855e4e581fd86973bec1d21a12ed0bb26af19c8b.new
Dec 09 16:03:34 compute-0 sudo[76817]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Dec 09 16:03:34 compute-0 sudo[76817]: pam_unix(sudo:session): session closed for user root

● session-29.scope - Session 29 of User ceph-admin
     Loaded: loaded (/run/systemd/transient/session-29.scope; transient)
  Transient: yes
     Active: active (running) since Tue 2025-12-09 16:03:34 UTC; 41min ago
         IO: 0B read, 0B written
      Tasks: 2
     Memory: 1.1M (peak: 3.8M)
        CPU: 172ms
     CGroup: /user.slice/user-42477.slice/session-29.scope
             ├─76842 "sshd-session: ceph-admin [priv]"
             └─76845 "sshd-session: ceph-admin@notty"

Dec 09 16:03:34 compute-0 systemd[1]: Started Session 29 of User ceph-admin.
Dec 09 16:03:34 compute-0 sudo[76846]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/chown -R ceph-admin /tmp/cephadm-67f67f44-54fc-54ea-8df0-10931b6ecdaf
Dec 09 16:03:34 compute-0 sudo[76846]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Dec 09 16:03:34 compute-0 sudo[76846]: pam_unix(sudo:session): session closed for user root

● session-30.scope - Session 30 of User ceph-admin
     Loaded: loaded (/run/systemd/transient/session-30.scope; transient)
  Transient: yes
     Active: active (running) since Tue 2025-12-09 16:03:35 UTC; 41min ago
         IO: 0B read, 0B written
      Tasks: 2
     Memory: 1.1M (peak: 4.3M)
        CPU: 152ms
     CGroup: /user.slice/user-42477.slice/session-30.scope
             ├─76871 "sshd-session: ceph-admin [priv]"
             └─76874 "sshd-session: ceph-admin@notty"

Dec 09 16:03:35 compute-0 systemd[1]: Started Session 30 of User ceph-admin.
Dec 09 16:03:35 compute-0 sudo[76875]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/chmod 644 /tmp/cephadm-67f67f44-54fc-54ea-8df0-10931b6ecdaf/var/lib/ceph/67f67f44-54fc-54ea-8df0-10931b6ecdaf/cephadm.ed5a13ad26f7f55dd30e9b63855e4e581fd86973bec1d21a12ed0bb26af19c8b.new
Dec 09 16:03:35 compute-0 sudo[76875]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Dec 09 16:03:35 compute-0 sudo[76875]: pam_unix(sudo:session): session closed for user root

● session-31.scope - Session 31 of User ceph-admin
     Loaded: loaded (/run/systemd/transient/session-31.scope; transient)
  Transient: yes
     Active: active (running) since Tue 2025-12-09 16:03:35 UTC; 41min ago
         IO: 0B read, 1016.0K written
      Tasks: 2
     Memory: 2.2M (peak: 3.6M)
        CPU: 180ms
     CGroup: /user.slice/user-42477.slice/session-31.scope
             ├─76900 "sshd-session: ceph-admin [priv]"
             └─76903 "sshd-session: ceph-admin@notty"

Dec 09 16:03:35 compute-0 systemd[1]: Started Session 31 of User ceph-admin.

● session-32.scope - Session 32 of User ceph-admin
     Loaded: loaded (/run/systemd/transient/session-32.scope; transient)
  Transient: yes
     Active: active (running) since Tue 2025-12-09 16:03:37 UTC; 41min ago
         IO: 0B read, 0B written
      Tasks: 2
     Memory: 1.1M (peak: 5.6M)
        CPU: 150ms
     CGroup: /user.slice/user-42477.slice/session-32.scope
             ├─76927 "sshd-session: ceph-admin [priv]"
             └─76930 "sshd-session: ceph-admin@notty"

Dec 09 16:03:37 compute-0 systemd[1]: Started Session 32 of User ceph-admin.
Dec 09 16:03:37 compute-0 sudo[76931]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/mv -Z /tmp/cephadm-67f67f44-54fc-54ea-8df0-10931b6ecdaf/var/lib/ceph/67f67f44-54fc-54ea-8df0-10931b6ecdaf/cephadm.ed5a13ad26f7f55dd30e9b63855e4e581fd86973bec1d21a12ed0bb26af19c8b.new /var/lib/ceph/67f67f44-54fc-54ea-8df0-10931b6ecdaf/cephadm.ed5a13ad26f7f55dd30e9b63855e4e581fd86973bec1d21a12ed0bb26af19c8b
Dec 09 16:03:37 compute-0 sudo[76931]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Dec 09 16:03:37 compute-0 sudo[76931]: pam_unix(sudo:session): session closed for user root

● session-33.scope - Session 33 of User ceph-admin
     Loaded: loaded (/run/systemd/transient/session-33.scope; transient)
  Transient: yes
     Active: active (running) since Tue 2025-12-09 16:03:37 UTC; 41min ago
         IO: 2.7M read, 135.6M written
      Tasks: 18
     Memory: 46.8M (peak: 57.6M)
        CPU: 2min 54.907s
     CGroup: /user.slice/user-42477.slice/session-33.scope
             ├─ 76956 "sshd-session: ceph-admin [priv]"
             ├─ 76959 "sshd-session: ceph-admin@notty"
             ├─269057 sudo /bin/python3 /var/lib/ceph/67f67f44-54fc-54ea-8df0-10931b6ecdaf/cephadm.ed5a13ad26f7f55dd30e9b63855e4e581fd86973bec1d21a12ed0bb26af19c8b --env CEPH_VOLUME_OSDSPEC_AFFINITY=default_drive_group --image quay.io/ceph/ceph@sha256:1228c3d05e45fbc068a8c33614e4409b6dac688bcc77369b06009b5830fa8d86 --timeout 895 ceph-volume --fsid 67f67f44-54fc-54ea-8df0-10931b6ecdaf --config-json - -- lvm batch --no-auto /dev/ceph_vg0/ceph_lv0 /dev/ceph_vg1/ceph_lv1 /dev/ceph_vg2/ceph_lv2 --objectstore bluestore --yes --no-systemd
             ├─269082 /bin/python3 /var/lib/ceph/67f67f44-54fc-54ea-8df0-10931b6ecdaf/cephadm.ed5a13ad26f7f55dd30e9b63855e4e581fd86973bec1d21a12ed0bb26af19c8b --env CEPH_VOLUME_OSDSPEC_AFFINITY=default_drive_group --image quay.io/ceph/ceph@sha256:1228c3d05e45fbc068a8c33614e4409b6dac688bcc77369b06009b5830fa8d86 --timeout 895 ceph-volume --fsid 67f67f44-54fc-54ea-8df0-10931b6ecdaf --config-json - -- lvm batch --no-auto /dev/ceph_vg0/ceph_lv0 /dev/ceph_vg1/ceph_lv1 /dev/ceph_vg2/ceph_lv2 --objectstore bluestore --yes --no-systemd
             └─269391 /bin/podman run --rm --ipc=host --stop-signal=SIGTERM --net=host --entrypoint /usr/sbin/ceph-volume --privileged --group-add=disk --init -e CONTAINER_IMAGE=quay.io/ceph/ceph@sha256:1228c3d05e45fbc068a8c33614e4409b6dac688bcc77369b06009b5830fa8d86 -e NODE_NAME=compute-0 -e CEPH_VOLUME_OSDSPEC_AFFINITY=default_drive_group -e CEPH_VOLUME_SKIP_RESTORECON=yes -e CEPH_VOLUME_DEBUG=1 -v /var/run/ceph/67f67f44-54fc-54ea-8df0-10931b6ecdaf:/var/run/ceph:z -v /var/log/ceph/67f67f44-54fc-54ea-8df0-10931b6ecdaf:/var/log/ceph:z -v /var/lib/ceph/67f67f44-54fc-54ea-8df0-10931b6ecdaf/crash:/var/lib/ceph/crash:z -v /run/systemd/journal:/run/systemd/journal -v /dev:/dev -v /run/udev:/run/udev -v /sys:/sys -v /run/lvm:/run/lvm -v /run/lock/lvm:/run/lock/lvm -v /var/lib/ceph/67f67f44-54fc-54ea-8df0-10931b6ecdaf/selinux:/sys/fs/selinux:ro -v /:/rootfs:rslave -v /etc/hosts:/etc/hosts:ro -v /tmp/ceph-tmpfuu3d7xo:/etc/ceph/ceph.conf:z -v /tmp/ceph-tmpoufvpa0c:/var/lib/ceph/bootstrap-osd/ceph.keyring:z quay.io/ceph/ceph@sha256:1228c3d05e45fbc068a8c33614e4409b6dac688bcc77369b06009b5830fa8d86 lvm batch --no-auto /dev/ceph_vg0/ceph_lv0 /dev/ceph_vg1/ceph_lv1 /dev/ceph_vg2/ceph_lv2 --objectstore bluestore --yes --no-systemd

Dec 09 16:44:55 compute-0 podman[269121]: 2025-12-09 16:44:55.382306482 +0000 UTC m=+0.022073257 image pull 524f3da276461682bec27427fb8a63b5139c40ad4185939aede197474a6817b3 quay.io/ceph/ceph@sha256:1228c3d05e45fbc068a8c33614e4409b6dac688bcc77369b06009b5830fa8d86
Dec 09 16:44:55 compute-0 podman[269121]: 2025-12-09 16:44:55.483354857 +0000 UTC m=+0.123121622 container init 6de51581d8e77a1bc4dbf7916fdfe9005c1866ceba3ee9b9582f2907611c6fda (image=quay.io/ceph/ceph@sha256:1228c3d05e45fbc068a8c33614e4409b6dac688bcc77369b06009b5830fa8d86, name=inspiring_pare, org.label-schema.license=GPLv2, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=69f84cc2651aa259a15bc192ddaabd3baba07489, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20251030, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, io.buildah.version=1.41.3, ceph=True, CEPH_REF=tentacle)
Dec 09 16:44:55 compute-0 podman[269121]: 2025-12-09 16:44:55.491330793 +0000 UTC m=+0.131097538 container start 6de51581d8e77a1bc4dbf7916fdfe9005c1866ceba3ee9b9582f2907611c6fda (image=quay.io/ceph/ceph@sha256:1228c3d05e45fbc068a8c33614e4409b6dac688bcc77369b06009b5830fa8d86, name=inspiring_pare, io.buildah.version=1.41.3, CEPH_REF=tentacle, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_SHA1=69f84cc2651aa259a15bc192ddaabd3baba07489, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20251030, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True)
Dec 09 16:44:55 compute-0 podman[269121]: 2025-12-09 16:44:55.494698428 +0000 UTC m=+0.134465193 container attach 6de51581d8e77a1bc4dbf7916fdfe9005c1866ceba3ee9b9582f2907611c6fda (image=quay.io/ceph/ceph@sha256:1228c3d05e45fbc068a8c33614e4409b6dac688bcc77369b06009b5830fa8d86, name=inspiring_pare, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.41.3, CEPH_SHA1=69f84cc2651aa259a15bc192ddaabd3baba07489, org.label-schema.license=GPLv2, org.label-schema.build-date=20251030, CEPH_REF=tentacle, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS)
Dec 09 16:44:55 compute-0 podman[269121]: 2025-12-09 16:44:55.496633073 +0000 UTC m=+0.136399818 container died 6de51581d8e77a1bc4dbf7916fdfe9005c1866ceba3ee9b9582f2907611c6fda (image=quay.io/ceph/ceph@sha256:1228c3d05e45fbc068a8c33614e4409b6dac688bcc77369b06009b5830fa8d86, name=inspiring_pare, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20251030, org.label-schema.vendor=CentOS, io.buildah.version=1.41.3, CEPH_REF=tentacle, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=69f84cc2651aa259a15bc192ddaabd3baba07489, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Dec 09 16:44:55 compute-0 podman[269121]: 2025-12-09 16:44:55.535835024 +0000 UTC m=+0.175601769 container remove 6de51581d8e77a1bc4dbf7916fdfe9005c1866ceba3ee9b9582f2907611c6fda (image=quay.io/ceph/ceph@sha256:1228c3d05e45fbc068a8c33614e4409b6dac688bcc77369b06009b5830fa8d86, name=inspiring_pare, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20251030, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, CEPH_SHA1=69f84cc2651aa259a15bc192ddaabd3baba07489, OSD_FLAVOR=default, io.buildah.version=1.41.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0, CEPH_REF=tentacle, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True)
Dec 09 16:44:55 compute-0 podman[269391]: 2025-12-09 16:44:55.694358369 +0000 UTC m=+0.040353085 container create 4323287b9d9f02a12363c9f5cc1c8f6d0d210317f09eb056106c7d354196560f (image=quay.io/ceph/ceph@sha256:1228c3d05e45fbc068a8c33614e4409b6dac688bcc77369b06009b5830fa8d86, name=funny_jepsen, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, ceph=True, io.buildah.version=1.41.3, CEPH_REF=tentacle, CEPH_SHA1=69f84cc2651aa259a15bc192ddaabd3baba07489, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, org.label-schema.build-date=20251030)
Dec 09 16:44:55 compute-0 podman[269391]: 2025-12-09 16:44:55.677284905 +0000 UTC m=+0.023279641 image pull 524f3da276461682bec27427fb8a63b5139c40ad4185939aede197474a6817b3 quay.io/ceph/ceph@sha256:1228c3d05e45fbc068a8c33614e4409b6dac688bcc77369b06009b5830fa8d86
Dec 09 16:44:55 compute-0 podman[269391]: 2025-12-09 16:44:55.782562159 +0000 UTC m=+0.128556925 container init 4323287b9d9f02a12363c9f5cc1c8f6d0d210317f09eb056106c7d354196560f (image=quay.io/ceph/ceph@sha256:1228c3d05e45fbc068a8c33614e4409b6dac688bcc77369b06009b5830fa8d86, name=funny_jepsen, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, io.buildah.version=1.41.3, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=tentacle, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, CEPH_SHA1=69f84cc2651aa259a15bc192ddaabd3baba07489, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2, org.label-schema.build-date=20251030, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/)
Dec 09 16:44:55 compute-0 podman[269391]: 2025-12-09 16:44:55.792402088 +0000 UTC m=+0.138396824 container start 4323287b9d9f02a12363c9f5cc1c8f6d0d210317f09eb056106c7d354196560f (image=quay.io/ceph/ceph@sha256:1228c3d05e45fbc068a8c33614e4409b6dac688bcc77369b06009b5830fa8d86, name=funny_jepsen, ceph=True, org.label-schema.build-date=20251030, io.buildah.version=1.41.3, CEPH_REF=tentacle, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, CEPH_SHA1=69f84cc2651aa259a15bc192ddaabd3baba07489, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)

● session-56.scope - Session 56 of User zuul
     Loaded: loaded (/run/systemd/transient/session-56.scope; transient)
  Transient: yes
     Active: active (running) since Tue 2025-12-09 16:44:09 UTC; 46s ago
         IO: 279.6M read, 232.7M written
      Tasks: 29
     Memory: 706.7M (peak: 753.6M)
        CPU: 2min 11.615s
     CGroup: /user.slice/user-1000.slice/session-56.scope
             ├─262891 "sshd-session: zuul [priv]"
             ├─262913 "sshd-session: zuul@notty"
             ├─262914 sudo bash -c "rm -rf /var/tmp/sos-osp && mkdir /var/tmp/sos-osp && sos report --batch --all-logs --tmp-dir=/var/tmp/sos-osp  -p container,openstack_edpm,system,storage,virt"
             ├─262938 /usr/bin/python3 -s /sbin/sos report --batch --all-logs --tmp-dir=/var/tmp/sos-osp -p container,openstack_edpm,system,storage,virt
             ├─268564 timeout 15s turbostat --debug sleep 10
             ├─269108 timeout 300s systemctl status --all
             ├─269111 systemctl status --all
             ├─269220 timeout 300s ceph osd df --format json-pretty
             └─269228 /usr/bin/python3 -s /usr/bin/ceph osd df --format json-pretty

Dec 09 16:44:09 compute-0 systemd[1]: Started Session 56 of User zuul.
Dec 09 16:44:09 compute-0 sudo[262914]:     zuul : PWD=/home/zuul ; USER=root ; COMMAND=/bin/bash -c 'rm -rf /var/tmp/sos-osp && mkdir /var/tmp/sos-osp && sos report --batch --all-logs --tmp-dir=/var/tmp/sos-osp  -p container,openstack_edpm,system,storage,virt'
Dec 09 16:44:09 compute-0 sudo[262914]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Dec 09 16:44:18 compute-0 ovs-vsctl[263261]: ovs|00001|db_ctl_base|ERR|no key "dpdk-init" in Open_vSwitch record "." column other_config
Dec 09 16:44:29 compute-0 crontab[264994]: (root) LIST (root)
Dec 09 16:44:46 compute-0 ovs-appctl[267640]: ovs|00001|daemon_unix|WARN|/var/run/openvswitch/ovs-monitor-ipsec.pid: open: No such file or directory
Dec 09 16:44:46 compute-0 ovs-appctl[267650]: ovs|00001|daemon_unix|WARN|/var/run/openvswitch/ovs-monitor-ipsec.pid: open: No such file or directory
Dec 09 16:44:46 compute-0 ovs-appctl[267654]: ovs|00001|daemon_unix|WARN|/var/run/openvswitch/ovs-monitor-ipsec.pid: open: No such file or directory

○ 0a76737b5f2b25872fe2b565002617e1a84450afb10d979302e98619d30d6470-46e7fcdfdf3aba34.service - /usr/bin/podman healthcheck run 0a76737b5f2b25872fe2b565002617e1a84450afb10d979302e98619d30d6470
     Loaded: loaded (/run/systemd/transient/0a76737b5f2b25872fe2b565002617e1a84450afb10d979302e98619d30d6470-46e7fcdfdf3aba34.service; transient)
  Transient: yes
     Active: inactive (dead) since Tue 2025-12-09 16:44:27 UTC; 28s ago
   Duration: 135ms
TriggeredBy: ● 0a76737b5f2b25872fe2b565002617e1a84450afb10d979302e98619d30d6470-46e7fcdfdf3aba34.timer
    Process: 264740 ExecStart=/usr/bin/podman healthcheck run 0a76737b5f2b25872fe2b565002617e1a84450afb10d979302e98619d30d6470 (code=exited, status=0/SUCCESS)
   Main PID: 264740 (code=exited, status=0/SUCCESS)
        CPU: 91ms

Dec 09 16:44:27 compute-0 podman[264740]: 2025-12-09 16:44:27.665854033 +0000 UTC m=+0.110580826 container health_status 0a76737b5f2b25872fe2b565002617e1a84450afb10d979302e98619d30d6470 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller@sha256:fa24ce4aa285e3632c86a53e8d0385d4c788d049da42dd06570ad9d44aae00de, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.name=CentOS Stream 9 Base Image, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller@sha256:fa24ce4aa285e3632c86a53e8d0385d4c788d049da42dd06570ad9d44aae00de', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/confiUnit apparmor.service could not be found.
Unit apt-daily.service could not be found.
g_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, managed_by=edpm_ansible, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, tcib_build_tag=c3923531bcda0b0811b2d5053f189beb, container_name=ovn_controller, org.label-schema.build-date=20251202, tcib_managed=true, config_id=ovn_controller, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team)

○ 84d676632bb4f080a989766472a2cc2fcf267803021f3bcdf51f0d8bfc9055e2-c203cf0939cbbd6.service - /usr/bin/podman healthcheck run 84d676632bb4f080a989766472a2cc2fcf267803021f3bcdf51f0d8bfc9055e2
     Loaded: loaded (/run/systemd/transient/84d676632bb4f080a989766472a2cc2fcf267803021f3bcdf51f0d8bfc9055e2-c203cf0939cbbd6.service; transient)
  Transient: yes
     Active: inactive (dead) since Tue 2025-12-09 16:44:41 UTC; 14s ago
   Duration: 103ms
TriggeredBy: ● 84d676632bb4f080a989766472a2cc2fcf267803021f3bcdf51f0d8bfc9055e2-c203cf0939cbbd6.timer
    Process: 266462 ExecStart=/usr/bin/podman healthcheck run 84d676632bb4f080a989766472a2cc2fcf267803021f3bcdf51f0d8bfc9055e2 (code=exited, status=0/SUCCESS)
   Main PID: 266462 (code=exited, status=0/SUCCESS)
        CPU: 90ms

Dec 09 16:44:41 compute-0 podman[266462]: 2025-12-09 16:44:41.76384783 +0000 UTC m=+0.081322047 container health_status 84d676632bb4f080a989766472a2cc2fcf267803021f3bcdf51f0d8bfc9055e2 (image=quay.io/podified-antelope-centos9/openstack-multipathd@sha256:df38dbd6b3eccec2abaa8e3618a385405ccec1b73ae8c3573a138b0c961ed31f, name=multipathd, health_status=healthy, health_failing_streak=0, health_log=, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, tcib_build_tag=c3923531bcda0b0811b2d5053f189beb, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/multipathd', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-multipathd@sha256:df38dbd6b3eccec2abaa8e3618a385405ccec1b73ae8c3573a138b0c961ed31f', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/multipathd.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run/udev:/run/udev', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:ro', '/var/lib/iscsi:/var/lib/iscsi', '/etc/multipath:/etc/multipath:z', '/etc/multipath.conf:/etc/multipath.conf:ro', '/var/lib/openstack/healthchecks/multipathd:/openstack:ro,z']}, managed_by=edpm_ansible, tcib_managed=true, org.label-schema.build-date=20251202, config_id=multipathd, container_name=multipathd, org.label-schema.name=CentOS Stream 9 Base Image)

● auditd.service - Security Auditing Service
     Loaded: loaded (/usr/lib/systemd/system/auditd.service; enabled; preset: enabled)
     Active: active (running) since Tue 2025-12-09 14:51:37 UTC; 1h 53min ago
       Docs: man:auditd(8)
             https://github.com/linux-audit/audit-documentation
   Main PID: 700 (auditd)
         IO: 4.0K read, 23.4M written
      Tasks: 4 (limit: 48628)
     Memory: 14.0M (peak: 14.5M)
        CPU: 5.598s
     CGroup: /system.slice/auditd.service
             ├─700 Unit auto-cpufreq.service could not be found.
Unit autofs.service could not be found.
/sbin/auditd
             └─702 /usr/sbin/sedispatch

Dec 09 14:51:37 localhost augenrules[720]: failure 1
Dec 09 14:51:37 localhost augenrules[720]: pid 700
Dec 09 14:51:37 localhost augenrules[720]: rate_limit 0
Dec 09 14:51:37 localhost augenrules[720]: backlog_limit 8192
Dec 09 14:51:37 localhost augenrules[720]: lost 0
Dec 09 14:51:37 localhost augenrules[720]: backlog 0
Dec 09 14:51:37 localhost augenrules[720]: backlog_wait_time 60000
Dec 09 14:51:37 localhost augenrules[720]: backlog_wait_time_actual 0
Dec 09 14:51:37 localhost systemd[1]: Started Security Auditing Service.
Dec 09 16:17:39 compute-0 auditd[700]: Audit daemon rotating log files

○ auth-rpcgss-module.service - Kernel Module supporting RPCSEC_GSS
     Loaded: loaded (/usr/lib/systemd/system/auth-rpcgss-module.service; static)
     Active: inactive (dead)
  Condition: start condition failed at Tue 2025-12-09 14:51:36 UTC; 1h 53min ago

● ceph-67f67f44-54fc-54ea-8df0-10931b6ecdaf@crash.compute-0.service - Ceph crash.compute-0 for 67f67f44-54fc-54ea-8df0-10931b6ecdaf
     Loaded: loaded (/etc/systemd/system/ceph-67f67f44-54fc-54ea-8df0-10931b6ecdaf@.service; enabled; preset: disabled)
     Active: active (running) since Tue 2025-12-09 16:03:48 UTC; 41min ago
   Main PID: 80272 (conmon)
         IO: 0B read, 177.0K written
      Tasks: 3 (limit: 48628)
     Memory: 7.7M (peak: 24.1M)
        CPU: 601ms
     CGroup: /system.slice/system-ceph\x2d67f67f44\x2d54fc\x2d54ea\x2d8df0\x2d10931b6ecdaf.slice/ceph-67f67f44-54fc-54ea-8df0-10931b6ecdaf@crash.compute-0.service
             ├─libpod-payload-1d6c84974bebf4aec64bc5aa950bd94610adc6953e493e81f66035a7be677973
             │ ├─80274 /run/podman-init -- /usr/bin/ceph-crash -n client.crash.compute-0
             │ └─80276 /usr/bin/python3 -s /usr/bin/ceph-crash -n client.crash.compute-0
             └─runtime
               └─80272 /usr/bin/conmon --api-version 1 -c 1d6c84974bebf4aec64bc5aa950bd94610adc6953e493e81f66035a7be677973 -u 1d6c84974bebf4aec64bc5aa950bd94610adc6953e493e81f66035a7be677973 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/1d6c84974bebf4aec64bc5aa950bd94610adc6953e493e81f66035a7be677973/userdata -p /run/containers/storage/overlay-containers/1d6c84974bebf4aec64bc5aa950bd94610adc6953e493e81f66035a7be677973/userdata/pidfile -n ceph-67f67f44-54fc-54ea-8df0-10931b6ecdaf-crash-compute-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/1d6c84974bebf4aec64bc5aa950bd94610adc6953e493e81f66035a7be677973 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/1d6c84974bebf4aec64bc5aa950bd94610adc6953e493e81f66035a7be677973/userdata/oci-log --conmon-pidfile /run/ceph-67f67f44-54fc-54ea-8df0-10931b6ecdaf@crash.compute-0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 1d6c84974bebf4aec64bc5aa950bd94610adc6953e493e81f66035a7be677973

Dec 09 16:03:48 compute-0 systemd[1]: Started Ceph crash.compute-0 for 67f67f44-54fc-54ea-8df0-10931b6ecdaf.
Dec 09 16:03:48 compute-0 ceph-67f67f44-54fc-54ea-8df0-10931b6ecdaf-crash-compute-0[80272]: INFO:ceph-crash:pinging cluster to exercise our key
Dec 09 16:03:48 compute-0 ceph-67f67f44-54fc-54ea-8df0-10931b6ecdaf-crash-compute-0[80272]: 2025-12-09T16:03:48.378+0000 7f8428fd7640 -1 auth: unable to find a keyring on /etc/ceph/ceph.client.admin.keyring,/etc/ceph/ceph.keyring,/etc/ceph/keyring,/etc/ceph/keyring.bin: (2) No such file or directory
Dec 09 16:03:48 compute-0 ceph-67f67f44-54fc-54ea-8df0-10931b6ecdaf-crash-compute-0[80272]: 2025-12-09T16:03:48.378+0000 7f8428fd7640 -1 AuthRegistry(0x7f8424052d90) no keyring found at /etc/ceph/ceph.client.admin.keyring,/etc/ceph/ceph.keyring,/etc/ceph/keyring,/etc/ceph/keyring.bin, disabling cephx
Dec 09 16:03:48 compute-0 ceph-67f67f44-54fc-54ea-8df0-10931b6ecdaf-crash-compute-0[80272]: 2025-12-09T16:03:48.380+0000 7f8428fd7640 -1 auth: unable to find a keyring on /etc/ceph/ceph.client.admin.keyring,/etc/ceph/ceph.keyring,/etc/ceph/keyring,/etc/ceph/keyring.bin: (2) No such file or directory
Dec 09 16:03:48 compute-0 ceph-67f67f44-54fc-54ea-8df0-10931b6ecdaf-crash-compute-0[80272]: 2025-12-09T16:03:48.380+0000 7f8428fd7640 -1 AuthRegistry(0x7f8428fd5fe0) no keyring found at /etc/ceph/ceph.client.admin.keyring,/etc/ceph/ceph.keyring,/etc/ceph/keyring,/etc/ceph/keyring.bin, disabling cephx
Dec 09 16:03:48 compute-0 ceph-67f67f44-54fc-54ea-8df0-10931b6ecdaf-crash-compute-0[80272]: 2025-12-09T16:03:48.381+0000 7f8422575640 -1 monclient(hunting): handle_auth_bad_method server allowed_methods [2] but i only support [1]
Dec 09 16:03:48 compute-0 ceph-67f67f44-54fc-54ea-8df0-10931b6ecdaf-crash-compute-0[80272]: 2025-12-09T16:03:48.381+0000 7f8428fd7640 -1 monclient: authenticate NOTE: no keyring found; disabled cephx authentication
Dec 09 16:03:48 compute-0 ceph-67f67f44-54fc-54ea-8df0-10931b6ecdaf-crash-compute-0[80272]: [errno 13] RADOS permission denied (error connecting to the cluster)
Dec 09 16:03:48 compute-0 ceph-67f67f44-54fc-54ea-8df0-10931b6ecdaf-crash-compute-0[80272]: INFO:ceph-crash:monitoring path /var/lib/ceph/crash, delay 600s

● ceph-67f67f44-54fc-54ea-8df0-10931b6ecdaf@mds.cephfs.compute-0.izecis.service - Ceph mds.cephfs.compute-0.izecis for 67f67f44-54fc-54ea-8df0-10931b6ecdaf
     Loaded: loaded (/etc/systemd/system/ceph-67f67f44-54fc-54ea-8df0-10931b6ecdaf@.service; enabled; preset: disabled)
     Active: active (running) since Tue 2025-12-09 16:05:08 UTC; 39min ago
   Main PID: 95392 (conmon)
         IO: 0B read, 3.5M written
      Tasks: 31 (limit: 48628)
     Memory: 26.2M (peak: 27.2M)
        CPU: 4.397s
     CGroup: /system.slice/system-ceph\x2d67f67f44\x2d54fc\x2d54ea\x2d8df0\x2d10931b6ecdaf.slice/ceph-67f67f44-54fc-54ea-8df0-10931b6ecdaf@mds.cephfs.compute-0.izecis.service
             ├─libpod-payload-63e30a35b7b96951b132aa123d944f19fef8cbb23554d42058a7f839e76cf474
             │ ├─95394 /run/podman-init -- /usr/bin/ceph-mds -n mds.cephfs.compute-0.izecis -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ └─95396 /usr/bin/ceph-mds -n mds.cephfs.compute-0.izecis -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             └─runtime
               └─95392 /usr/bin/conmon --api-version 1 -c 63e30a35b7b96951b132aa123d944f19fef8cbb23554d42058a7f839e76cf474 -u 63e30a35b7b96951b132aa123d944f19fef8cbb23554d42058a7f839e76cf474 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/63e30a35b7b96951b132aa123d944f19fef8cbb23554d42058a7f839e76cf474/userdata -p /run/containers/storage/overlay-containers/63e30a35b7b96951b132aa123d944f19fef8cbb23554d42058a7f839e76cf474/userdata/pidfile -n ceph-67f67f44-54fc-54ea-8df0-10931b6ecdaf-mds-cephfs-compute-0-izecis --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/63e30a35b7b96951b132aa123d944f19fef8cbb23554d42058a7f839e76cf474 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/63e30a35b7b96951b132aa123d944f19fef8cbb23554d42058a7f839e76cf474/userdata/oci-log --conmon-pidfile /run/ceph-67f67f44-54fc-54ea-8df0-10931b6ecdaf@mds.cephfs.compute-0.izecis.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 63e30a35b7b96951b132aa123d944f19fef8cbb23554d42058a7f839e76cf474

Dec 09 16:44:20 compute-0 ceph-mds[95396]: mds.cephfs.compute-0.izecis asok_command: dump loads {prefix=dump loads} (starting...)
Dec 09 16:44:20 compute-0 ceph-mds[95396]: mds.cephfs.compute-0.izecis asok_command: dump tree {prefix=dump tree,root=/} (starting...)
Dec 09 16:44:20 compute-0 ceph-mds[95396]: mds.cephfs.compute-0.izecis asok_command: dump_blocked_ops {prefix=dump_blocked_ops} (starting...)
Dec 09 16:44:20 compute-0 ceph-mds[95396]: mds.cephfs.compute-0.izecis asok_command: dump_historic_ops {prefix=dump_historic_ops} (starting...)
Dec 09 16:44:20 compute-0 ceph-mds[95396]: mds.cephfs.compute-0.izecis asok_command: dump_historic_ops_by_duration {prefix=dump_historic_ops_by_duration} (starting...)
Dec 09 16:44:21 compute-0 ceph-mds[95396]: mds.cephfs.compute-0.izecis asok_command: dump_ops_in_flight {prefix=dump_ops_in_flight} (starting...)
Dec 09 16:44:21 compute-0 ceph-mds[95396]: mds.cephfs.compute-0.izecis asok_command: get subtrees {prefix=get subtrees} (starting...)
Dec 09 16:44:21 compute-0 ceph-mds[95396]: mds.cephfs.compute-0.izecis asok_command: ops {prefix=ops} (starting...)
Dec 09 16:44:22 compute-0 ceph-mds[95396]: mds.cephfs.compute-0.izecis asok_command: session ls {prefix=session ls} (starting...)
Dec 09 16:44:22 compute-0 ceph-mds[95396]: mds.cephfs.compute-0.izecis asok_command: status {prefix=status} (starting...)

● ceph-67f67f44-54fc-54ea-8df0-10931b6ecdaf@mgr.compute-0.ysegzv.service - Ceph mgr.compute-0.ysegzv for 67f67f44-54fc-54ea-8df0-10931b6ecdaf
     Loaded: loaded (/etc/systemd/system/ceph-67f67f44-54fc-54ea-8df0-10931b6ecdaf@.service; enabled; preset: disabled)
     Active: active (running) since Tue 2025-12-09 16:03:06 UTC; 41min ago
   Main PID: 75511 (conmon)
         IO: 0B read, 2.2M written
      Tasks: 144 (limit: 48628)
     Memory: 522.6M (peak: 523.3M)
        CPU: 49.195s
     CGroup: /system.slice/system-ceph\x2d67f67f44\x2d54fc\x2d54ea\x2d8df0\x2d10931b6ecdaf.slice/ceph-67f67f44-54fc-54ea-8df0-10931b6ecdaf@mgr.compute-0.ysegzv.service
             ├─libpod-payload-f232def5bd3d41fdf0b35f628fe45f0e39d35b90e0d04b3d069f81dcb3d82662
             │ ├─75513 /run/podman-init -- /usr/bin/ceph-mgr -n mgr.compute-0.ysegzv -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ └─75515 /usr/bin/ceph-mgr -n mgr.compute-0.ysegzv -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             └─runtime
               └─75511 /usr/bin/conmon --api-version 1 -c f232def5bd3d41fdf0b35f628fe45f0e39d35b90e0d04b3d069f81dcb3d82662 -u f232def5bd3d41fdf0b35f628fe45f0e39d35b90e0d04b3d069f81dcb3d82662 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/f232def5bd3d41fdf0b35f628fe45f0e39d35b90e0d04b3d069f81dcb3d82662/userdata -p /run/containers/storage/overlay-containers/f232def5bd3d41fdf0b35f628fe45f0e39d35b90e0d04b3d069f81dcb3d82662/userdata/pidfile -n ceph-67f67f44-54fc-54ea-8df0-10931b6ecdaf-mgr-compute-0-ysegzv --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/f232def5bd3d41fdf0b35f628fe45f0e39d35b90e0d04b3d069f81dcb3d82662 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/f232def5bd3d41fdf0b35f628fe45f0e39d35b90e0d04b3d069f81dcb3d82662/userdata/oci-log --conmon-pidfile /run/ceph-67f67f44-54fc-54ea-8df0-10931b6ecdaf@mgr.compute-0.ysegzv.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg f232def5bd3d41fdf0b35f628fe45f0e39d35b90e0d04b3d069f81dcb3d82662

Dec 09 16:44:48 compute-0 ceph-mgr[75515]: log_channel(audit) log [DBG] : from='client.14796 -' entity='client.admin' cmd=[{"prefix": "pg dump", "target": ["mon-mgr", ""]}]: dispatch
Dec 09 16:44:48 compute-0 ceph-mgr[75515]: log_channel(audit) log [DBG] : from='client.14798 -' entity='client.admin' cmd=[{"prefix": "pg stat", "target": ["mon-mgr", ""]}]: dispatch
Dec 09 16:44:48 compute-0 ceph-mgr[75515]: log_channel(cluster) log [DBG] : pgmap v1384: 305 pgs: 305 active+clean; 461 KiB data, 137 MiB used, 60 GiB / 60 GiB avail
Dec 09 16:44:50 compute-0 ceph-mgr[75515]: log_channel(audit) log [DBG] : from='client.14806 -' entity='client.admin' cmd=[{"prefix": "device ls", "target": ["mon-mgr", ""], "format": "json-pretty"}]: dispatch
Dec 09 16:44:50 compute-0 ceph-mgr[75515]: log_channel(cluster) log [DBG] : pgmap v1385: 305 pgs: 305 active+clean; 461 KiB data, 137 MiB used, 60 GiB / 60 GiB avail
Dec 09 16:44:52 compute-0 ceph-mgr[75515]: log_channel(cluster) log [DBG] : pgmap v1386: 305 pgs: 305 active+clean; 461 KiB data, 137 MiB used, 60 GiB / 60 GiB avail
Dec 09 16:44:53 compute-0 ceph-mgr[75515]: log_channel(audit) log [DBG] : from='client.14816 -' entity='client.admin' cmd=[{"prefix": "fs status", "target": ["mon-mgr", ""], "format": "json-pretty"}]: dispatch
Dec 09 16:44:54 compute-0 ceph-mgr[75515]: log_channel(audit) log [DBG] : from='client.14822 -' entity='client.admin' cmd=[{"prefix": "osd blocked-by", "target": ["mon-mgr", ""], "format": "json-pretty"}]: dispatch
Dec 09 16:44:54 compute-0 ceph-mgr[75515]: log_channel(cluster) log [DBG] : pgmap v1387: 305 pgs: 305 active+clean; 461 KiB data, 137 MiB used, 60 GiB / 60 GiB avail
Dec 09 16:44:55 compute-0 ceph-mgr[75515]: log_channel(audit) log [DBG] : from='client.14826 -' entity='client.admin' cmd=[{"prefix": "osd df", "output_method": "tree", "target": ["mon-mgr", ""], "format": "json-pretty"}]: dispatch

● ceph-67f67f44-54fc-54ea-8df0-10931b6ecdaf@mon.compute-0.service - Ceph mon.compute-0 for 67f67f44-54fc-54ea-8df0-10931b6ecdaf
     Loaded: loaded (/etc/systemd/system/ceph-67f67f44-54fc-54ea-8df0-10931b6ecdaf@.service; enabled; preset: disabled)
     Active: active (running) since Tue 2025-12-09 16:03:04 UTC; 41min ago
   Main PID: 75218 (conmon)
         IO: 1.6M read, 255.0M written
      Tasks: 27 (limit: 48628)
     Memory: 74.7M (peak: 86.0M)
        CPU: 24.524s
     CGroup: /system.slice/system-ceph\x2d67f67f44\x2d54fc\x2d54ea\x2d8df0\x2d10931b6ecdaf.slice/ceph-67f67f44-54fc-54ea-8df0-10931b6ecdaf@mon.compute-0.service
             ├─libpod-payload-9ce3cdfc68db4310535ef64a87efb40353dcdfbbac71cac592072bd903c643f6
             │ ├─75220 /run/podman-init -- /usr/bin/ceph-mon -n mon.compute-0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false
             │ └─75222 /usr/bin/ceph-mon -n mon.compute-0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false
             └─runtime
               └─75218 /usr/bin/conmon --api-version 1 -c 9ce3cdfc68db4310535ef64a87efb40353dcdfbbac71cac592072bd903c643f6 -u 9ce3cdfc68db4310535ef64a87efb40353dcdfbbac71cac592072bd903c643f6 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/9ce3cdfc68db4310535ef64a87efb40353dcdfbbac71cac592072bd903c643f6/userdata -p /run/containers/storage/overlay-containers/9ce3cdfc68db4310535ef64a87efb40353dcdfbbac71cac592072bd903c643f6/userdata/pidfile -n ceph-67f67f44-54fc-54ea-8df0-10931b6ecdaf-mon-compute-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/9ce3cdfc68db4310535ef64a87efb40353dcdfbbac71cac592072bd903c643f6 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/9ce3cdfc68db4310535ef64a87efb40353dcdfbbac71cac592072bd903c643f6/userdata/oci-log --conmon-pidfile /run/ceph-67f67f44-54fc-54ea-8df0-10931b6ecdaf@mon.compute-0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 9ce3cdfc68db4310535ef64a87efb40353dcdfbbac71cac592072bd903c643f6

Dec 09 16:44:55 compute-0 ceph-mon[75222]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd blocklist ls", "format": "json-pretty"} v 0)
Dec 09 16:44:55 compute-0 ceph-mon[75222]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/4175834408' entity='client.admin' cmd={"prefix": "osd blocklist ls", "format": "json-pretty"} : dispatch
Dec 09 16:44:55 compute-0 ceph-mon[75222]: from='client.? 192.168.122.100:0/1211349026' entity='client.admin' cmd={"prefix": "mon dump", "format": "json-pretty"} : dispatch
Dec 09 16:44:55 compute-0 ceph-mon[75222]: from='mgr.14124 192.168.122.100:0/2022010261' entity='mgr.compute-0.ysegzv' cmd={"prefix": "config generate-minimal-conf"} : dispatch
Dec 09 16:44:55 compute-0 ceph-mon[75222]: from='mgr.14124 192.168.122.100:0/2022010261' entity='mgr.compute-0.ysegzv' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch
Dec 09 16:44:55 compute-0 ceph-mon[75222]: from='mgr.14124 192.168.122.100:0/2022010261' entity='mgr.compute-0.ysegzv' 
Dec 09 16:44:55 compute-0 ceph-mon[75222]: from='mgr.14124 192.168.122.100:0/2022010261' entity='mgr.compute-0.ysegzv' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch
Dec 09 16:44:55 compute-0 ceph-mon[75222]: from='mgr.14124 192.168.122.100:0/2022010261' entity='mgr.compute-0.ysegzv' cmd={"prefix": "auth get", "entity": "client.bootstrap-osd"} : dispatch
Dec 09 16:44:55 compute-0 ceph-mon[75222]: from='mgr.14124 192.168.122.100:0/2022010261' entity='mgr.compute-0.ysegzv' cmd={"prefix": "config generate-minimal-conf"} : dispatch
Dec 09 16:44:55 compute-0 ceph-mon[75222]: from='client.? 192.168.122.100:0/4175834408' entity='client.admin' cmd={"prefix": "osd blocklist ls", "format": "json-pretty"} : dispatch

● ceph-67f67f44-54fc-54ea-8df0-10931b6ecdaf@osd.0.service - Ceph osd.0 for 67f67f44-54fc-54ea-8df0-10931b6ecdaf
     Loaded: loaded (/etc/systemd/system/ceph-67f67f44-54fc-54ea-8df0-10931b6ecdaf@.service; enabled; preset: disabled)
     Active: active (running) since Tue 2025-12-09 16:04:12 UTC; 40min ago
   Main PID: 86009 (conmon)
         IO: 6.7M read, 1.2G written
      Tasks: 61 (limit: 48628)
     Memory: 454.9M (peak: 514.6M)
        CPU: 18.143s
     CGroup: /system.slice/system-ceph\x2d67f67f44\x2d54fc\x2d54ea\x2d8df0\x2d10931b6ecdaf.slice/ceph-67f67f44-54fc-54ea-8df0-10931b6ecdaf@osd.0.service
             ├─libpod-payload-012822ae8bedefb05d753efd429cb131456844e02bd9516e891592371e33cae1
             │ ├─86011 /run/podman-init -- /usr/bin/ceph-osd -n osd.0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --osd-objectstore=bluestore
             │ └─86013 /usr/bin/ceph-osd -n osd.0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --osd-objectstore=bluestore
             └─runtime
               └─86009 /usr/bin/conmon --api-version 1 -c 012822ae8bedefb05d753efd429cb131456844e02bd9516e891592371e33cae1 -u 012822ae8bedefb05d753efd429cb131456844e02bd9516e891592371e33cae1 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/012822ae8bedefb05d753efd429cb131456844e02bd9516e891592371e33cae1/userdata -p /run/containers/storage/overlay-containers/012822ae8bedefb05d753efd429cb131456844e02bd9516e891592371e33cae1/userdata/pidfile -n ceph-67f67f44-54fc-54ea-8df0-10931b6ecdaf-osd-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/012822ae8bedefb05d753efd429cb131456844e02bd9516e891592371e33cae1 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/012822ae8bedefb05d753efd429cb131456844e02bd9516e891592371e33cae1/userdata/oci-log --conmon-pidfile /run/ceph-67f67f44-54fc-54ea-8df0-10931b6ecdaf@osd.0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 012822ae8bedefb05d753efd429cb131456844e02bd9516e891592371e33cae1

Dec 09 16:44:35 compute-0 ceph-osd[86013]: osd.0 152 heartbeat osd_stat(store_statfs(0x4fce5b000/0x0/0x4ffc00000, data 0xe7f1c/0x1cf000, compress 0x0/0x0/0x0, omap 0x1bced, meta 0x2bb4313), peers [1,2] op hist [])
Dec 09 16:44:35 compute-0 ceph-osd[86013]: monclient: tick
Dec 09 16:44:35 compute-0 ceph-osd[86013]: monclient: _check_auth_tickets
Dec 09 16:44:35 compute-0 ceph-osd[86013]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-12-09T16:44:03.860058+0000)
Dec 09 16:44:35 compute-0 ceph-osd[86013]: prioritycache tune_memory target: 4294967296 mapped: 81600512 unmapped: 9879552 heap: 91480064 old mem: 2845415832 new mem: 2845415832
Dec 09 16:44:35 compute-0 ceph-osd[86013]: monclient: tick
Dec 09 16:44:35 compute-0 ceph-osd[86013]: monclient: _check_auth_tickets
Dec 09 16:44:35 compute-0 ceph-osd[86013]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-12-09T16:44:04.860222+0000)
Dec 09 16:44:35 compute-0 ceph-osd[86013]: prioritycache tune_memory target: 4294967296 mapped: 81903616 unmapped: 9576448 heap: 91480064 old mem: 2845415832 new mem: 2845415832
Dec 09 16:44:35 compute-0 ceph-osd[86013]: do_command 'log dump' '{prefix=log dump}'

● ceph-67f67f44-54fc-54ea-8df0-10931b6ecdaf@osd.1.service - Ceph osd.1 for 67f67f44-54fc-54ea-8df0-10931b6ecdaf
     Loaded: loaded (/etc/systemd/system/ceph-67f67f44-54fc-54ea-8df0-10931b6ecdaf@.service; enabled; preset: disabled)
     Active: active (running) since Tue 2025-12-09 16:04:16 UTC; 40min ago
   Main PID: 87051 (conmon)
         IO: 6.8M read, 1.4G written
      Tasks: 61 (limit: 48628)
     Memory: 488.0M (peak: 521.8M)
        CPU: 19.010s
     CGroup: /system.slice/system-ceph\x2d67f67f44\x2d54fc\x2d54ea\x2d8df0\x2d10931b6ecdaf.slice/ceph-67f67f44-54fc-54ea-8df0-10931b6ecdaf@osd.1.service
             ├─libpod-payload-187eb50611d2ddb1b41d9735d4d6534119cb949c4865c9c5b94e777c53253f5a
             │ ├─87053 /run/podman-init -- /usr/bin/ceph-osd -n osd.1 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --osd-objectstore=bluestore
             │ └─87055 /usr/bin/ceph-osd -n osd.1 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --osd-objectstore=bluestore
             └─runtime
               └─87051 /usr/bin/conmon --api-version 1 -c 187eb50611d2ddb1b41d9735d4d6534119cb949c4865c9c5b94e777c53253f5a -u 187eb50611d2ddb1b41d9735d4d6534119cb949c4865c9c5b94e777c53253f5a -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/187eb50611d2ddb1b41d9735d4d6534119cb949c4865c9c5b94e777c53253f5a/userdata -p /run/containers/storage/overlay-containers/187eb50611d2ddb1b41d9735d4d6534119cb949c4865c9c5b94e777c53253f5a/userdata/pidfile -n ceph-67f67f44-54fc-54ea-8df0-10931b6ecdaf-osd-1 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/187eb50611d2ddb1b41d9735d4d6534119cb949c4865c9c5b94e777c53253f5a --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/187eb50611d2ddb1b41d9735d4d6534119cb949c4865c9c5b94e777c53253f5a/userdata/oci-log --conmon-pidfile /run/ceph-67f67f44-54fc-54ea-8df0-10931b6ecdaf@osd.1.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 187eb50611d2ddb1b41d9735d4d6534119cb949c4865c9c5b94e777c53253f5a

Dec 09 16:44:31 compute-0 ceph-osd[87055]: do_command 'counter schema' '{prefix=counter schema}' result is 0 bytes
Dec 09 16:44:31 compute-0 ceph-osd[87055]: prioritycache tune_memory target: 4294967296 mapped: 89161728 unmapped: 30392320 heap: 119554048 old mem: 2845415832 new mem: 2845415832
Dec 09 16:44:31 compute-0 ceph-osd[87055]: monclient: tick
Dec 09 16:44:31 compute-0 ceph-osd[87055]: monclient: _check_auth_tickets
Dec 09 16:44:31 compute-0 ceph-osd[87055]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-12-09T16:43:59.405236+0000)
Dec 09 16:44:31 compute-0 ceph-osd[87055]: prioritycache tune_memory target: 4294967296 mapped: 89456640 unmapped: 30097408 heap: 119554048 old mem: 2845415832 new mem: 2845415832
Dec 09 16:44:31 compute-0 ceph-osd[87055]: monclient: tick
Dec 09 16:44:31 compute-0 ceph-osd[87055]: monclient: _check_auth_tickets
Dec 09 16:44:31 compute-0 ceph-osd[87055]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-12-09T16:44:00.405350+0000)
Dec 09 16:44:31 compute-0 ceph-osd[87055]: do_command 'log dump' '{prefix=log dump}'

● ceph-67f67f44-54fc-54ea-8df0-10931b6ecdaf@osd.2.service - Ceph osd.2 for 67f67f44-54fc-54ea-8df0-10931b6ecdaf
     Loaded: loaded (/etc/systemd/system/ceph-67f67f44-54fc-54ea-8df0-10931b6ecdaf@.service; enabled; preset: disabled)
     Active: active (running) since Tue 2025-12-09 16:04:21 UTC; 40min ago
   Main PID: 88095 (conmon)
         IO: 6.6M read, 1.3G written
      Tasks: 61 (limit: 48628)
     Memory: 477.6M (peak: 511.3M)
        CPU: 17.515s
     CGroup: /system.slice/system-ceph\x2d67f67f44\x2d54fc\x2d54ea\x2d8df0\x2d10931b6ecdaf.slice/ceph-67f67f44-54fc-54ea-8df0-10931b6ecdaf@osd.2.service
             ├─libpod-payload-2c04f740a8b253d8ecd8e18a6e47e557960887b7213c8498c38edd5db483ed78
             │ ├─88097 /run/podman-init -- /usr/bin/ceph-osd -n osd.2 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --osd-objectstore=bluestore
             │ └─88099 /usr/bin/ceph-osd -n osd.2 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --osd-objectstore=bluestore
             └─runtime
               └─88095 /usr/bin/conmon --api-version 1 -c 2c04f740a8b253d8ecd8e18a6e47e557960887b7213c8498c38edd5db483ed78 -u 2c04f740a8b253d8ecd8e18a6e47e557960887b7213c8498c38edd5db483ed78 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/2c04f740a8b253d8ecd8e18a6e47e557960887b7213c8498c38edd5db483ed78/userdata -p /run/containers/storage/overlay-containers/2c04f740a8b253d8ecd8e18a6e47e557960887b7213c8498c38edd5db483ed78/userdata/pidfile -n ceph-67f67f44-54fc-54ea-8df0-10931b6ecdaf-osd-2 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/2c04f740a8b253d8ecd8e18a6e47e557960887b7213c8498c38edd5db483ed78 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/2c04f740a8b253d8ecd8e18a6e47e557960887b7213c8498c38edd5db483ed78/userdata/oci-log --conmon-pidfile /run/ceph-67f67f44-54fc-54ea-8df0-10931b6ecdaf@osd.2.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 2c04f740a8b253d8ecd8e18a6e47e557960887b7213c8498c38edd5db483ed78

Dec 09 16:44:26 compute-0 ceph-osd[88099]: do_command 'counter schema' '{prefix=counter schema}' result is 0 bytes
Dec 09 16:44:26 compute-0 ceph-osd[88099]: monclient: tick
Dec 09 16:44:26 compute-0 ceph-osd[88099]: monclient: _check_auth_tickets
Dec 09 16:44:26 compute-0 ceph-osd[88099]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-12-09T16:43:54.295182+0000)
Dec 09 16:44:26 compute-0 ceph-osd[88099]: prioritycache tune_memory target: 4294967296 mapped: 82493440 unmapped: 19144704 heap: 101638144 old mem: 2845415832 new mem: 2845415832
Dec 09 16:44:26 compute-0 ceph-osd[88099]: monclient: tick
Dec 09 16:44:26 compute-0 ceph-osd[88099]: monclient: _check_auth_tickets
Dec 09 16:44:26 compute-0 ceph-osd[88099]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2025-12-09T16:43:55.295291+0000)
Dec 09 16:44:26 compute-0 ceph-osd[88099]: prioritycache tune_memory target: 4294967296 mapped: 82780160 unmapped: 18857984 heap: 101638144 old mem: 2845415832 new mem: 2845415832
Dec 09 16:44:26 compute-0 ceph-osd[88099]: do_command 'log dump' '{prefix=log dump}'

● ceph-67f67f44-54fc-54ea-8df0-10931b6ecdaf@rgw.rgw.compute-0.efuxpz.service - Ceph rgw.rgw.compute-0.efuxpz for 67f67f44-54fc-54ea-8df0-10931b6ecdaf
     Loaded: loaded (/etc/systemd/system/ceph-67f67f44-54fc-54ea-8df0-10931b6ecdaf@.service; enabled; preset: disabled)
     Active: active (running) since Tue 2025-12-09 16:05:06 UTC; 39min ago
   Main PID: 94929 (conmon)
         IO: 0B read, 196.0K written
      Tasks: 614 (limit: 48628)
     Memory: 105.6M (peak: 106.2M)
        CPU: 15.010s
     CGroup: /system.slice/system-ceph\x2d67f67f44\x2d54fc\x2d54ea\x2d8df0\x2d10931b6ecdaf.slice/ceph-67f67f44-54fc-54ea-8df0-10931b6ecdaf@rgw.rgw.compute-0.efuxpz.service
             ├─libpod-payload-7f00419a4644aef9dce1fa0301835835d4bb386233bc723500dd8ee22076ad4d
             │ ├─94931 /run/podman-init -- /usr/bin/radosgw -n client.rgw.rgw.compute-0.efuxpz -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --stop-timeout=120
             │ └─94933 /usr/bin/radosgw -n client.rgw.rgw.compute-0.efuxpz -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --stop-timeout=120
             └─runtime
               └─94929 /usr/bin/conmon --api-version 1 -c 7f00419a4644aef9dce1fa0301835835d4bb386233bc723500dd8ee22076ad4d -u 7f00419a4644aef9dce1fa0301835835d4bb386233bc723500dd8ee22076ad4d -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/7f00419a4644aef9dce1fa0301835835d4bb386233bc723500dd8ee22076ad4d/userdata -p /run/containers/storage/overlay-containers/7f00419a4644aef9dce1fa0301835835d4bb386233bc723500dd8ee22076ad4d/userdata/pidfile -n ceph-67f67f44-54fc-54ea-8df0-10931b6ecdaf-rgw-rgw-compute-0-efuxpz --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/7f00419a4644aef9dce1fa0301835835d4bb386233bc723500dd8ee22076ad4d --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/7f00419a4644aef9dce1fa0301835835d4bb386233bc723500dd8ee22076ad4d/userdata/oci-log --conmon-pidfile /run/ceph-67f67f44-54fc-54ea-8df0-10931b6ecdaf@rgw.rgw.compute-0.efuxpz.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 7f00419a4644aef9dce1fa0301835835d4bb386233bc723500dd8ee22076ad4d

Dec 09 16:05:06 compute-0 radosgw[94933]: framework conf key: endpoint, val: 192.168.122.100:8082
Dec 09 16:05:06 compute-0 radosgw[94933]: init_numa not setting numa affinity
Dec 09 16:05:15 compute-0 radosgw[94933]: v1 topic migration: starting v1 topic migration..
Dec 09 16:05:15 compute-0 radosgw[94933]: v1 topic migration: finished v1 topic migration
Dec 09 16:05:16 compute-0 radosgw[94933]: framework: beast
Dec 09 16:05:16 compute-0 radosgw[94933]: framework conf key: ssl_certificate, val: config://rgw/cert/$realm/$zone.crt
Dec 09 16:05:16 compute-0 radosgw[94933]: framework conf key: ssl_private_key, val: config://rgw/cert/$realm/$zone.key
Dec 09 16:05:16 compute-0 radosgw[94933]: starting handler: beast
Dec 09 16:05:16 compute-0 radosgw[94933]: set uid:gid to 167:167 (ceph:ceph)
Dec 09 16:05:16 compute-0 radosgw[94933]: mgrc service_daemon_register rgw.14260 metadata {arch=x86_64,ceph_release=tentacle,ceph_version=ceph version 20.2.0 (69f84cc2651aa259a15bc192ddaabd3baba07489) tentacle (stable - RelWithDebInfo),ceph_version_short=20.2.0,container_hostname=compute-0,container_image=quay.io/ceph/ceph@sha256:1228c3d05e45fbc068a8c33614e4409b6dac688bcc77369b06009b5830fa8d86,cpu=AMD EPYC-Rome Processor,distro=centos,distro_description=CentOS Stream 9,distro_version=9,frontend_config#0=beast endpoint=192.168.122.100:8082,frontend_type#0=beast,hostname=compute-0,id=rgw.compute-0.efuxpz,kernel_description=#1 SMP PREEMPT_DYNAMIC Fri Dec 5 11:18:23 UTC 2025,kernel_version=5.14.0-648.el9.x86_64,mem_swap_kb=1048572,mem_total_kb=7864300,num_handles=1,os=Linux,pid=2,realm_id=,realm_name=,zone_id=483fcdb3-bcab-4288-b81d-feaf7f34b01d,zone_name=default,zonegroup_id=867d5d1c-b402-423a-949d-5103c0b25b35,zonegroup_name=default}

● ceph-osd-losetup-0.service - Ceph OSD losetup
     Loaded: loaded (/etc/systemd/system/ceph-osd-losetup-0.service; enabled; preset: disabled)
     Active: active (exited) since Tue 2025-12-09 16:02:07 UTC; 42min ago
   Main PID: 72567 (code=exited, status=0/SUCCESS)
        CPU: 23ms

Dec 09 16:02:07 compute-0 systemd[1]: Starting Ceph OSD losetup...
Dec 09 16:02:07 compute-0 bash[72568]: /dev/loop3: [64513]:4327748 (/var/lib/ceph-osd-0.img)
Dec 09 16:02:07 compute-0 systemd[1]: Finished Ceph OSD losetup.

● ceph-osd-losetup-1.service - Ceph OSD losetup
     Loaded: loaded (/etc/systemd/system/ceph-osd-losetup-1.service; enabled; preset: disabled)
     Active: active (exited) since Tue 2025-12-09 16:02:12 UTC; 42min ago
   Main PID: 72936 (code=exited, status=0/SUCCESS)
        CPU: 22ms

Dec 09 16:02:12 compute-0 systemd[1]: Starting Ceph OSD losetup...
Dec 09 16:02:12 compute-0 bash[72937]: /dev/loop4: [64513]:4327913 (/var/lib/ceph-osd-1.img)
Dec 09 16:02:12 compute-0 systemd[1]: Finished Ceph OSD losetup.

● ceph-osd-losetup-2.service - Ceph OSD losetup
     Loaded: loaded (/etc/systemd/system/ceph-osd-losetup-2.service; enabled; preset: disabled)
     Active: active (exited) since Tue 2025-12-09 16:02:18 UTC; 42min ago
   Main PID: 73304 (code=exited, status=0/SUCCESS)
        CPU: 20ms

Dec 09 16:02:18 compute-0 systemd[1]: Starting Ceph OSD losetup...
Dec 09 16:02:18 compute-0 bash[73305]: /dev/loop5: [64513]:4327775 (/var/lib/ceph-osd-2.img)
Dec 09 16:02:18 compute-0 systemd[1]: Finished Ceph OSD losetup.

● chronyd.service - NTP client/server
     Loaded: loaded (/usr/lib/systemd/system/chronyd.service; enabled; preset: enabled)
     Active: active (running) since Tue 2025-12-09 16:00:03 UTC; 44min ago
       Docs: man:chronyd(8)
             man:chrony.conf(5)
   Main PID: 58592 (chronyd)
         IO: 0B read, 0B written
      Tasks: 1 (limit: 48628)
     Memory: 1016.0K (peak: 1.7M)
        CPU: 74ms
     CGroup: /system.slice/chronyd.service
             └─58592 /usr/sbin/chronyd -F 2

Dec 09 16:00:03 compute-0 systemd[1]: Starting NTP client/server...
Dec 09 16:00:03 compute-0 chronyd[58592]: chronyd version 4.8 starting (+CMDMON +REFCLOCK +RTC +PRIVDROP +SCFILTER +SIGND +NTS +SECHASH +IPV6 +DEBUG)
Dec 09 16:00:03 compute-0 chronyd[58592]: Frequency -28.511 +/- 0.176 ppm read from /var/lib/chrony/drift
Dec 09 16:00:03 compute-0 chronyd[58592]: Loaded seccomp filter (level 2)
Dec 09 16:00:03 compute-0 systemd[1]: Started NTP client/server.
Dec 09 16:02:12 compute-0 chronyd[58592]: Selected source 162.159.200.1 (pool.ntp.org)

● cloud-config.service - Cloud-init: Config Stage
     Loaded: loaded (/usr/lib/systemd/system/cloud-config.service; enabled; preset: disabled)
     Active: active (exited) since Tue 2025-12-09 14:51:40 UTC; 1h 53min ago
   Main PID: 1001 (code=exited, status=0/SUCCESS)
        CPU: 407ms

Dec 09 14:51:40 np0005552052.novalocal systemd[1]: Starting Cloud-init: Config Stage...
Dec 09 14:51:40 np0005552052.novalocal cloud-init[1158]: Cloud-init v. 24.4-7.el9 running 'modules:config' at Tue, 09 Dec 2025 14:51:40 +0000. Up 8.35 seconds.
Dec 09 14:51:40 np0005552052.novalocal systemd[1]: Finished Cloud-init: Config Stage.

● cloud-final.service - Cloud-init: Final Stage
     Loaded: loaded (/usr/lib/systemd/system/cloud-final.service; enabled; preset: disabled)
     Active: active (exited) since Tue 2025-12-09 14:51:41 UTC; 1h 53min ago
   Main PID: 1211 (code=exited, status=0/SUCCESS)
        CPU: 494ms

Dec 09 14:51:41 np0005552052.novalocal cloud-init[1300]: Cloud-init v. 24.4-7.el9 running 'modules:final' at Tue, 09 Dec 2025 14:51:41 +0000. Up 8.75 seconds.
Dec 09 14:51:41 np0005552052.novalocal cloud-init[1329]: #############################################################
Dec 09 14:51:41 np0005552052.novalocal cloud-init[1332]: -----BEGIN SSH HOST KEY FINGERPRINTS-----
Dec 09 14:51:41 np0005552052.novalocal cloud-init[1342]: 256 SHA256:1x9cVQZEoiG9cu6SuiQliONsIAz0lelL+dGFmuy71lo root@np0005552052.novalocal (ECDSA)
Dec 09 14:51:41 np0005552052.novalocal cloud-init[1350]: 256 SHA256:pbb1NbO1gtH2iQ4dZsIkWnt6P98u0xQnv3UehpNgdko root@np0005552052.novalocal (ED25519)
Dec 09 14:51:41 np0005552052.novalocal cloud-init[1357]: 3072 SHA256:415Ua7xEXdNHP27rRiR8kr03ibvCqLtdYB7WCNOuSuM root@np0005552052.novalocal (RSA)
Dec 09 14:51:41 np0005552052.novalocal cloud-init[1359]: -----END SSH HOST KEY FINGERPRINTS-----
Dec 09 14:51:41 np0005552052.novalocal cloud-init[1362]: #############################################################
Dec 09 14:51:41 np0005552052.novalocal cloud-init[1300]: Cloud-init v. 24.4-7.el9 finished at Tue, 09 Dec 2025 14:51:41 +0000. Datasource DataSourceConfigDrive [net,ver=2][source=/dev/sr0].  Up 8.93 seconds
Dec 09 14:51:41 np0005552052.novalocal systemd[1]: Finished Cloud-init: Final Stage.

● cloud-init-local.service - Cloud-init: Local Stage (pre-network)
     Loaded: loaded (/usr/lib/systemd/system/cloud-init-local.service; enabled; preset: disabled)
     Active: active (exited) since Tue 2025-12-09 14:51:38 UTC; 1h 53min ago
   Main PID: 775 (code=exited, status=0/SUCCESS)
        CPU: 747ms

Dec 09 14:51:37 localhost systemd[1]: Starting Cloud-init: Local Stage (pre-network)...
Dec 09 14:51:38 localhost cloud-init[837]: Cloud-init v. 24.4-7.el9 running 'init-local' at Tue, 09 Dec 2025 14:51:38 +0000. Up 5.96 seconds.
Dec 09 14:51:38 np0005552052.novalocal systemd[1]: Finished Cloud-init: Local Stage (pre-network).

● cloud-init.service - Cloud-init: Network Stage
     Loaded: loaded (/usr/lib/systemd/system/cloud-init.service; enabled; preset: disabled)
     Active: active (exited) since Tue 2025-12-09 14:51:40 UTC; 1h 53min ago
   Main PID: 886 (code=exited, status=0/SUCCESS)
        CPU: 1.022s

Dec 09 14:51:40 np0005552052.novalocal cloud-init[921]: |                 |
Dec 09 14:51:40 np0005552052.novalocal cloud-init[921]: |         o..     |
Dec 09 14:51:40 np0005552052.novalocal cloud-init[921]: |        oo=E.....|
Dec 09 14:51:40 np0005552052.novalocal cloud-init[921]: |       .S.==+**o+|
Dec 09 14:51:40 np0005552052.novalocal cloud-init[921]: |       . oooO=+BB|
Dec 09 14:51:40 np0005552052.novalocal cloud-init[921]: |        .. +.++B=|
Dec 09 14:51:40 np0005552052.novalocal cloud-init[921]: |          . +.+.+|
Dec 09 14:51:40 np0005552052.novalocal cloud-init[921]: |             oo=o|
Dec 09 14:51:40 np0005552052.novalocal cloud-init[921]: +----[SHA256]-----+
Dec 09 14:51:40 np0005552052.novalocal systemd[1]: Finished Cloud-init: Network Stage.

○ cpupower.service - Configure CPU power related settings
     Loaded: loaded (/usr/lib/systemd/system/cpupower.service; disabled; preset: disabled)
     Active: inactive (dead)

● crond.service - Command Scheduler
     Loaded: loaded (/usr/lib/systemd/system/crond.service; enabled; preset: enabled)
     Active: active (running) since Tue 2025-12-09 14:51:40 UTC; 1h 53min ago
   Main PID: 1008 (crond)
         IO: 36.0K read, 12.0K written
      Tasks: 1 (limit: 48628)
     Memory: 1.3M (peak: 4.9M)
        CPU: 159ms
     CGroup: /system.slice/crond.service
             └─1008 /usr/sbin/crond -n

Dec 09 15:40:01 compute-0 anacron[4310]: Job `cron.daily' started
Dec 09 15:40:01 compute-0 anacron[4310]: Job `cron.daily' terminated
Dec 09 16:00:01 compute-0 anacron[4310]: Job `cron.weekly' started
Dec 09 16:00:01 compute-0 anacron[4310]: Job `cron.weekly' terminated
Dec 09 16:01:01 compute-0 CROND[67033]: (root) CMD (run-parts /etc/cron.hourly)
Dec 09 16:01:01 compute-0 run-parts[67050]: (/etc/cron.hourly) finished 0anacron
Dec 09 16:01:01 compute-0 CROND[67032]: (root) CMDEND (run-parts /etc/cron.hourly)
Dec 09 16:20:01 compute-0 anacron[4310]: Job `cron.monthly' started
Dec 09 16:20:01 compute-0 anacron[4310]: Job `cron.monthly' terminated
Dec 09 16:20:01 compute-0 anacron[4310]: Normal exit (3 jobs run)

● dbus-broker.service - D-Bus System Message Bus
     Loaded: loaded (/usr/lib/systemd/system/dbus-broker.service; enabled; preset: enabled)
     Active: active (running) since Tue 2025-12-09 14:51:37 UTC; 1h 53min ago
TriggeredBy: ● dbus.socket
       Docs: man:dbus-broker-launch(1)
   Main PID: 750 (dbus-broker-lau)
         IO: 0B read, 0B written
      Tasks: 2 (limit: 48628)
     Memory: 2.9M (peak: 3.7M)
        CPU: 6.453s
     CGroup: /system.slice/dbus-broker.service
             ├─750 /usr/bin/dbus-broker-launch --scope system --audit
             └─772 dbus-broker --log 4 --controller 9Unit display-manager.service could not be found.
 --machine-id 4d4ef2323cc3337bbfd9081b2a323b4e --max-bytes 536870912 --max-fds 4096 --max-matches 131072 --audit

Dec 09 15:57:41 compute-0 dbus-broker-launch[750]: Noticed file-system modification, trigger reload.
Dec 09 15:58:24 compute-0 dbus-broker-launch[772]: avc:  op=load_policy lsm=selinux seqno=9 res=1
Dec 09 15:58:33 compute-0 dbus-broker-launch[772]: avc:  op=load_policy lsm=selinux seqno=10 res=1
Dec 09 16:12:11 compute-0 dbus-broker-launch[772]: avc:  op=load_policy lsm=selinux seqno=11 res=1
Dec 09 16:15:43 compute-0 dbus-broker-launch[772]: avc:  op=load_policy lsm=selinux seqno=13 res=1
Dec 09 16:16:28 compute-0 dbus-broker-launch[750]: Noticed file-system modification, trigger reload.
Dec 09 16:16:28 compute-0 dbus-broker-launch[772]: avc:  op=load_policy lsm=selinux seqno=14 res=1
Dec 09 16:16:28 compute-0 dbus-broker-launch[750]: Noticed file-system modification, trigger reload.
Dec 09 16:17:54 compute-0 dbus-broker-launch[772]: avc:  op=load_policy lsm=selinux seqno=15 res=1
Dec 09 16:37:08 compute-0 dbus-broker-launch[772]: avc:  op=setenforce lsm=selinux enforcing=0 res=1

○ dm-event.service - Device-mapper event daemon
     Loaded: loaded (/usr/lib/systemd/system/dm-event.service; static)
     Active: inactive (dead)
TriggeredBy: ● dm-event.socket
       Docs: man:dmeventd(8)

○ dnf-makecache.service - dnf makecache
     Loaded: loaded (/usr/lib/systemd/system/dnf-makecache.service; static)
     Active: inactive (dead) since Tue 2025-12-09 15:38:02 UTC; 1h 6min ago
TriggeredBy: ● dnf-makecache.timer
   Main PID: 9112 (code=exited, status=0/SUCCESS)
        CPU: 700ms

Dec 09 15:38:01 np0005552052.novalocal systemd[1]: Starting dnf makecache...
Dec 09 15:38:01 np0005552052.novalocal dnf[9112]: Failed determining last makecache time.
Dec 09 15:38:01 np0005552052.novalocal dnf[9112]: CentOS Stream 9 - BaseOS                         61 kB/s | 6.4 kB     00:00
Dec 09 15:38:02 np0005552052.novalocal dnf[9112]: CentOS Stream 9 - AppStream                      29 kB/s | 7.1 kB     00:00
Dec 09 15:38:02 np0005552052.novalocal dnf[9112]: CentOS Stream 9 - CRB                            64 kB/s | 6.3 kB     00:00
Dec 09 15:38:02 np0005552052.novalocal dnf[9112]: CentOS Stream 9 - Extras packages                69 kB/s | 8.3 kB     00:00
Dec 09 15:38:02 np0005552052.novalocal dnf[9112]: Metadata cache created.
Dec 09 15:38:02 np0005552052.novalocal systemd[1]: dnf-makecache.service: Deactivated successfully.
Dec 09 15:38:02 np0005552052.novalocal systemd[1]: Finished dnf makecache.

○ dracut-cmdline.service - dracut cmdline hook
     Loaded: loaded (/usr/lib/systemd/system/dracut-cmdline.service; static)
     Active: inactive (dead) since Tue 2025-12-09 14:51:36 UTC; 1h 53min ago
   Duration: 1.810s
       Docs: man:dracut-cmdline.service(8)
             man:dracut.bootup(7)
   Main PID: 329 (code=exited, status=0/SUCCESS)
        CPU: 131ms

Dec 09 14:51:34 localhost systemd[1]: Starting dracut cmdline hook...
Dec 09 14:51:34 localhost dracut-cmdline[329]: dracut-9 dracut-057-102.git20250818.el9
Dec 09 14:51:34 localhost dracut-cmdline[329]: Using kernel command line parameters:    BOOT_IMAGE=(hd0,msdos1)/boot/vmlinuz-5.14.0-648.el9.x86_64 root=UUID=fcf6b761-831a-48a7-9f5f-068b5063763f ro console=ttyS0,115200n8 no_timer_check net.ifnames=0 crashkernel=1G-2G:192M,2G-64G:256M,64G-:512M
Dec 09 14:51:34 localhost systemd[1]: Finished dracut cmdline hook.
Dec 09 14:51:36 localhost systemd[1]: dracut-cmdline.service: Deactivated successfully.
Dec 09 14:51:36 localhost systemd[1]: Stopped dracut cmdline hook.

○ dracut-initqueue.service - dracut initqueue hook
     Loaded: loaded (/usr/lib/systemd/system/dracut-initqueue.service; static)
     Active: inactive (dead) since Tue 2025-12-09 14:51:36 UTC; 1h 53min ago
   Duration: 814ms
       Docs: man:dracut-initqueue.service(8)
             man:dracut.bootup(7)
   Main PID: 505 (code=exited, status=0/SUCCESS)
        CPU: 40ms

Dec 09 14:51:34 localhost systemd[1]: Starting dracut initqueue hook...
Dec 09 14:51:35 localhost systemd[1]: Finished dracut initqueue hook.
Dec 09 14:51:36 localhost systemd[1]: dracut-initqueue.service: Deactivated successfully.
Dec 09 14:51:36 localhost systemd[1]: Stopped dracut initqueue hook.

○ dracut-mount.service - dracut mount hook
     Loaded: loaded (/usr/lib/systemd/system/dracut-mount.service; static)
     Active: inactive (dead) since Tue 2025-12-09 14:51:36 UTC; 1h 53min ago
   Duration: 138ms
       Docs: man:dracut-mount.service(8)
             man:dracut.bootup(7)
   Main PID: 568 (code=exited, status=0/SUCCESS)
        CPU: 13ms

Dec 09 14:51:36 localhost systemd[1]: Starting dracut mount hook...
Dec 09 14:51:36 localhost systemd[1]: Finished dracut mount hook.
Dec 09 14:51:36 localhost systemd[1]: dracut-mount.service: Deactivated successfully.
Dec 09 14:51:36 localhost systemd[1]: Stopped dracut mount hook.

○ dracut-pre-mount.service - dracut pre-mount hook
     Loaded: loaded (/usr/lib/systemd/system/dracut-pre-mount.service; static)
     Active: inactive (dead) since Tue 2025-12-09 14:51:36 UTC; 1h 53min ago
   Duration: 760ms
       Docs: man:dracut-pre-mount.service(8)
             man:dracut.bootup(7)
   Main PID: 546 (code=exited, status=0/SUCCESS)
        CPU: 17ms

Dec 09 14:51:35 localhost systemd[1]: Starting dracut pre-mount hook...
Dec 09 14:51:35 localhost systemd[1]: Finished dracut pre-mount hook.
Dec 09 14:51:36 localhost systemd[1]: dracut-pre-mount.service: Deactivated successfully.
Dec 09 14:51:36 localhost systemd[1]: Stopped dracut pre-mount hook.

○ dracut-pre-pivot.service - dracut pre-pivot and cleanup hook
     Loaded: loaded (/usr/lib/systemd/system/dracut-pre-pivot.service; static)
     Active: inactive (dead) since Tue 2025-12-09 14:51:36 UTC; 1h 53min ago
   Duration: 30ms
       Docs: man:dracut-pre-pivot.service(8)
             man:dracut.bootup(7)
   Main PID: 573 (code=exited, status=0/SUCCESS)
        CPU: 87ms

Dec 09 14:51:36 localhost systemd[1]: Starting dracut pre-pivot and cleanup hook...
Dec 09 14:51:36 localhost systemd[1]: Finished dracut pre-pivot and cleanup hook.
Dec 09 14:51:36 localhost systemd[1]: dracut-pre-pivot.service: Deactivated successfully.
Dec 09 14:51:36 localhost systemd[1]: Stopped dracut pre-pivot and cleanup hook.

○ dracut-pre-trigger.service - dracut pre-trigger hook
     Loaded: loaded (/usr/lib/systemd/system/dracut-pre-trigger.service; static)
     Active: inactive (dead) since Tue 2025-12-09 14:51:36 UTC; 1h 53min ago
   Duration: 1.383s
       Docs: man:dracut-pre-trigger.service(8)
             man:dracut.bootup(7)
   Main PID: 470 (code=exited, status=0/SUCCESS)
        CPU: 27ms

Dec 09 14:51:34 localhost systemd[1]: Starting dracut pre-trigger hook...
Dec 09 14:51:34 localhost systemd[1]: Finished dracut pre-trigger hook.
Dec 09 14:51:36 localhost systemd[1]: dracut-pre-trigger.service: Deactivated successfully.
Dec 09 14:51:36 localhost systemd[1]: Stopped dracut pre-trigger hook.

○ dracut-pre-udev.service - dracut pre-udev hook
     Loaded: loaded (/usr/lib/systemd/system/dracut-pre-udev.service; static)
     Active: inactive (dead) since Tue 2025-12-09 14:51:36 UTC; 1h 53min ago
   Duration: 1.515s
       Docs: man:dracut-pre-udev.service(8)
             man:dracut.bootup(7)
   Main PID: 418 (code=exited, status=0/SUCCESS)
        CPU: 279ms

Dec 09 14:51:34 localhost systemd[1]: Starting dracut pre-udev hook...
Dec 09 14:51:34 localhost rpc.statd[447]: Version 2.5.4 starting
Dec 09 14:51:34 localhost rpc.statd[447]: Initializing NSM state
Dec 09 14:51:34 localhost rpc.idmapd[452]: Setting log level to 0
Dec 09 14:51:34 localhost systemd[1]: Finished dracut pre-udev hook.
Dec 09 14:51:36 localhost rpc.idmapd[452]: exiting on signal 15
Dec 09 14:51:36 localhost systemd[1]: dracut-pre-udev.service: Deactivated successfully.
Dec 09 14:51:36 localhost systemd[1]: Stopped dracut pre-udev hook.

○ dracut-shutdown-onfailure.service - Service executing upon dracut-shutdown failure to perform cleanup
     Loaded: loaded (/usr/lib/systemd/system/dracut-shutdown-onfailure.service; static)
     Active: inactive (dead)
       Docs: man:dracut-shutdown.service(8)

● dracut-shutdown.service - Restore /run/initramfs on shutdown
     Loaded: loaded (/usr/lib/systemd/system/dracut-shutdown.service; static)
     Active: active (exited) since Tue 2025-12-09 14:51:37 UTC; 1h 53min ago
       Docs: man:dracut-shutdown.service(8)
   Main PID: 776 (code=exited, status=0/SUCCESS)
        CPU: 3ms

Dec 09 14:51:37 localhost systemd[1]: Starting Restore /run/initramfs on shutdown...
Dec 09 14:51:37 localhost systemd[1]: Finished Restore /run/initramfs on shutdown.

● edpm-container-shutdown.service - EDPM Container Shutdown
     Loaded: loaded (/etc/systemd/system/edpm-container-shutdown.service; enabled; preset: enabled)
     Active: active (exited) since Tue 2025-12-09 16:00:31 UTC; 44min ago
       Docs: https://github.com/openstack-k8s-operators/docs
   Main PID: 61589 (code=exited, status=0/SUCCESS)
        CPU: 3ms

Dec 09 16:00:31 compute-0 systemd[1]: Starting EDPM Container Shutdown...
Dec 09 16:00:31 compute-0 systemd[1]: Finished EDPM Container Shutdown.

○ edpm_libvirt_guests.service - Suspend libvirt Guests in edpm
     Loaded: loaded (/etc/systemd/system/edpm_libvirt_guests.service; enabled; preset: disabled)
     Active: inactive (dead)
       Docs: man:libvirtd(8)
             https://libvirt.org

● edpm_multipathd.service - multipathd container
     Loaded: loaded (/etc/systemd/system/edpm_multipathd.service; enabled; preset: disabled)
     Active: active (running) since Tue 2025-12-09 16:20:03 UTC; 24min ago
   Main PID: 225738 (conmon)
         IO: 0B read, 117.5K written
      Tasks: 1 (limit: 48628)
     Memory: 676.0K (peak: 17.4M)
        CPU: 115ms
     CGroup: /system.slice/edpm_multipathd.service
             └─225738 /usr/bin/conmon --api-version 1 -c 84d676632bb4f080a989766472a2cc2fcf267803021f3bcdf51f0d8bfc9055e2 -u 84d676632bb4f080a989766472a2cc2fcf267803021f3bcdf51f0d8bfc9055e2 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/84d676632bb4f080a989766472a2cc2fcf267803021f3bcdf51f0d8bfc9055e2/userdata -p /run/containers/storage/overlay-containers/84d676632bb4f080a989766472a2cc2fcf267803021f3bcdf51f0d8bfc9055e2/userdata/pidfile -n multipathd --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/84d676632bb4f080a989766472a2cc2fcf267803021f3bcdf51f0d8bfc9055e2 --full-attach -s -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/84d676632bb4f080a989766472a2cc2fcf267803021f3bcdf51f0d8bfc9055e2/userdata/oci-log --conmon-pidfile /run/multipathd.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg 84d676632bb4f080a989766472a2cc2fcf267803021f3bcdf51f0d8bfc9055e2

Dec 09 16:20:03 compute-0 multipathd[225738]: + sudo kolla_copy_cacerts
Dec 09 16:20:03 compute-0 multipathd[225738]: + [[ ! -n '' ]]
Dec 09 16:20:03 compute-0 multipathd[225738]: + . kolla_extend_start
Dec 09 16:20:03 compute-0 multipathd[225738]: Running command: '/usr/sbin/multipathd -d'
Dec 09 16:20:03 compute-0 multipathd[225738]: + echo 'Running command: '\''/usr/sbin/multipathd -d'\'''
Dec 09 16:20:03 compute-0 multipathd[225738]: + umask 0022
Dec 09 16:20:03 compute-0 multipathd[225738]: + exec /usr/sbin/multipathd -d
Dec 09 16:20:03 compute-0 multipathd[225738]: 5311.636392 | --------start up--------
Dec 09 16:20:03 compute-0 multipathd[225738]: 5311.636418 | read /etc/multipath.conf
Dec 09 16:20:04 compute-0 multipathd[225738]: 5311.642660 | path checkers start up

● edpm_nova_compute.service - nova_compute container
     Loaded: loaded (/etc/systemd/system/edpm_nova_compute.service; enabled; preset: disabled)
     Active: active (running) since Tue 2025-12-09 16:21:49 UTC; 23min ago
   Main PID: 243452 (conmon)
         IO: 0B read, 87.0K written
      Tasks: 1 (limit: 48628)
     Memory: 676.0K (peak: 19.1M)
        CPU: 316ms
     CGroup: /system.slice/edpm_nova_compute.service
             └─243452 /usr/bin/conmon --api-version 1 -c 9f2fa752ba80f2edb6a6ed5e7e6142147c4b695355174f47652b45554431a9ce -u 9f2fa752ba80f2edb6a6ed5e7e6142147c4b695355174f47652b45554431a9ce -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/9f2fa752ba80f2edb6a6ed5e7e6142147c4b695355174f47652b45554431a9ce/userdata -p /run/containers/storage/overlay-containers/9f2fa752ba80f2edb6a6ed5e7e6142147c4b695355174f47652b45554431a9ce/userdata/pidfile -n nova_compute --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/9f2fa752ba80f2edb6a6ed5e7e6142147c4b695355174f47652b45554431a9ce --full-attach -s -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/9f2fa752ba80f2edb6a6ed5e7e6142147c4b695355174f47652b45554431a9ce/userdata/oci-log --conmon-pidfile /run/nova_compute.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg 9f2fa752ba80f2edb6a6ed5e7e6142147c4b695355174f47652b45554431a9ce

Dec 09 16:44:12 compute-0 nova_compute[243452]: 2025-12-09 16:44:12.070 243461 DEBUG nova.compute.manager [None req-8ef5e864-f7dd-4e45-95e0-4532f2566626 - - - - - -] Didn't find any instances for network info cache update. _heal_instance_info_cache /usr/lib/python3.9/site-packages/nova/compute/manager.py:9944[00m
Dec 09 16:44:12 compute-0 nova_compute[243452]: 2025-12-09 16:44:12.071 243461 DEBUG oslo_service.periodic_task [None req-8ef5e864-f7dd-4e45-95e0-4532f2566626 - - - - - -] Running periodic task ComputeManager._reclaim_queued_deletes run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210[00m
Dec 09 16:44:12 compute-0 nova_compute[243452]: 2025-12-09 16:44:12.071 243461 DEBUG nova.compute.manager [None req-8ef5e864-f7dd-4e45-95e0-4532f2566626 - - - - - -] CONF.reclaim_instance_interval <= 0, skipping... _reclaim_queued_deletes /usr/lib/python3.9/site-packages/nova/compute/manager.py:10477[00m
Dec 09 16:44:14 compute-0 nova_compute[243452]: 2025-12-09 16:44:14.064 243461 DEBUG oslo_service.periodic_task [None req-8ef5e864-f7dd-4e45-95e0-4532f2566626 - - - - - -] Running periodic task ComputeManager._check_instance_build_time run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210[00m
Dec 09 16:44:15 compute-0 nova_compute[243452]: 2025-12-09 16:44:15.054 243461 DEBUG oslo_service.periodic_task [None req-8ef5e864-f7dd-4e45-95e0-4532f2566626 - - - - - -] Running periodic task ComputeManager._poll_rescued_instances run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210[00m
Dec 09 16:44:15 compute-0 nova_compute[243452]: 2025-12-09 16:44:15.055 243461 DEBUG oslo_service.periodic_task [None req-8ef5e864-f7dd-4e45-95e0-4532f2566626 - - - - - -] Running periodic task ComputeManager._poll_unconfirmed_resizes run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210[00m
Dec 09 16:44:16 compute-0 nova_compute[243452]: 2025-12-09 16:44:16.054 243461 DEBUG oslo_service.periodic_task [None req-8ef5e864-f7dd-4e45-95e0-4532f2566626 - - - - - -] Running periodic task ComputeManager._instance_usage_audit run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210[00m
Dec 09 16:44:16 compute-0 nova_compute[243452]: 2025-12-09 16:44:16.054 243461 DEBUG oslo_service.periodic_task [None req-8ef5e864-f7dd-4e45-95e0-4532f2566626 - - - - - -] Running periodic task ComputeManager._poll_volume_usage run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210[00m
Dec 09 16:44:22 compute-0 nova_compute[243452]: 2025-12-09 16:44:22.047 243461 DEBUG oslo_service.periodic_task [None req-8ef5e864-f7dd-4e45-95e0-4532f2566626 - - - - - -] Running periodic task ComputeManager._sync_scheduler_instance_info run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210[00m
Dec 09 16:44:37 compute-0 nova_compute[243452]: 2025-12-09 16:44:37.308 243461 WARNING oslo.service.loopingcall [-] Function 'nova.servicegroup.drivers.db.DbDriver._report_state' run outlasted interval by 5.41 sec[00m

● edpm_ovn_controller.service - ovn_controller container
     Loaded: loaded (/etc/systemd/system/edpm_ovn_controller.service; enabled; preset: disabled)
     Active: active (running) since Tue 2025-12-09 16:13:10 UTC; 31min ago
   Main PID: 145421 (conmon)
         IO: 0B read, 133.0K written
      Tasks: 1 (limit: 48628)
     Memory: 692.0K (peak: 18.2M)
        CPU: 273ms
     CGroup: /system.slice/edpm_ovn_controller.service
             └─145421 /usr/bin/conmon --api-version 1 -c 0a76737b5f2b25872fe2b565002617e1a84450afb10d979302e98619d30d6470 -u 0a76737b5f2b25872fe2b565002617e1a84450afb10d979302e98619d30d6470 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/0a76737b5f2b25872fe2b565002617e1a84450afb10d979302e98619d30d6470/userdata -p /run/containers/storage/overlay-containers/0a76737b5f2b25872fe2b565002617e1a84450afb10d979302e98619d30d6470/userdata/pidfile -n ovn_controller --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/0a76737b5f2b25872fe2b565002617e1a84450afb10d979302e98619d30d6470 --full-attach -s -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/0a76737b5f2b25872fe2b565002617e1a84450afb10d979302e98619d30d6470/userdata/oci-log --conmon-pidfile /run/ovn_controller.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg 0a76737b5f2b25872fe2b565002617e1a84450afb10d979302e98619d30d6470

Dec 09 16:13:10 compute-0 ovn_controller[145421]: 2025-12-09T16:13:10Z|00023|features|INFO|OVS DB schema supports 4 flow table prefixes, our IDL supports: 4
Dec 09 16:13:10 compute-0 ovn_controller[145421]: 2025-12-09T16:13:10Z|00024|main|INFO|Setting flow table prefixes: ip_src, ip_dst, ipv6_src, ipv6_dst.
Dec 09 16:13:10 compute-0 ovn_controller[145421]: 2025-12-09T16:13:10Z|00001|pinctrl(ovn_pinctrl0)|INFO|unix:/var/run/openvswitch/br-int.mgmt: connecting to switch
Dec 09 16:13:10 compute-0 ovn_controller[145421]: 2025-12-09T16:13:10Z|00001|statctrl(ovn_statctrl3)|INFO|unix:/var/run/openvswitch/br-int.mgmt: connecting to switch
Dec 09 16:13:10 compute-0 ovn_controller[145421]: 2025-12-09T16:13:10Z|00002|rconn(ovn_pinctrl0)|INFO|unix:/var/run/openvswitch/br-int.mgmt: connecting...
Dec 09 16:13:10 compute-0 ovn_controller[145421]: 2025-12-09T16:13:10Z|00002|rconn(ovn_statctrl3)|INFO|unix:/var/run/openvswitch/br-int.mgmt: connecting...
Dec 09 16:13:10 compute-0 ovn_controller[145421]: 2025-12-09T16:13:10Z|00003|rconn(ovn_pinctrl0)|INFO|unix:/var/run/openvswitch/br-int.mgmt: connected
Dec 09 16:13:10 compute-0 ovn_controller[145421]: 2025-12-09T16:13:10Z|00003|rconn(ovn_statctrl3)|INFO|unix:/var/run/openvswitch/br-int.mgmt: connected
Dec 09 16:13:40 compute-0 ovn_controller[145421]: 2025-12-09T16:13:40Z|00025|memory|INFO|16256 kB peak resident set size after 29.7 seconds
Dec 09 16:13:40 compute-0 ovn_controller[145421]: 2025-12-09T16:13:40Z|00026|memory|INFO|idl-cells-OVN_Southbound:239 idl-cells-Open_vSwitch:528 ofctrl_desired_flow_usage-KB:5 ofctrl_installed_flow_usage-KB:4 ofctrl_sb_flow_ref_usage-KB:2

● edpm_ovn_metadata_agent.service - ovn_metadata_agent container
     Loaded: loaded (/etc/systemd/system/edpm_ovn_metadata_agent.service; enabled; preset: disabled)
     Active: active (running) since Tue 2025-12-09 16:14:15 UTC; 30min ago
   Main PID: 155086 (conmon)
         IO: 16.0K read, 137.0K written
      Tasks: 1 (limit: 48628)
     Memory: 732.0K (peak: 19.7M)
        CPU: 348ms
     CGroup: /system.slice/edpm_ovn_metadata_agent.service
             └─155086 /usr/bin/conmon --api-version 1 -c fbe906a4b60641b0d2bc026295092273d9a8b9783389304fc11b10bffae9c692 -u fbe906a4b60641b0d2bc026295092273d9a8b9783389304fc11b10bffae9c692 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/fbe906a4b60641b0d2bc026295092273d9a8b9783389304fc11b10bffae9c692/userdata -p /run/containers/storage/overlay-containers/fbe906a4b60641b0d2bc026295092273d9a8b9783389304fc11b10bffae9c692/userdata/pidfile -n ovn_metadata_agent --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/fbe906a4b60641b0d2bc026295092273d9a8b9783389304fc11b10bffae9c692 --full-attach -s -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/fbe906a4b60641b0d2bc026295092273d9a8b9783389304fc11b10bffae9c692/userdata/oci-log --conmon-pidfile /run/ovn_metadata_agent.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg fbe906a4b60641b0d2bc026295092273d9a8b9783389304fc11b10bffae9c692

Dec 09 16:41:17 compute-0 ovn_metadata_agent[155086]: 2025-12-09 16:41:17.861 155091 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" "released" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: held 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423[00m
Dec 09 16:42:17 compute-0 ovn_metadata_agent[155086]: 2025-12-09 16:42:17.861 155091 DEBUG oslo_concurrency.lockutils [-] Acquiring lock "_check_child_processes" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404[00m
Dec 09 16:42:17 compute-0 ovn_metadata_agent[155086]: 2025-12-09 16:42:17.862 155091 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" acquired by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: waited 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409[00m
Dec 09 16:42:17 compute-0 ovn_metadata_agent[155086]: 2025-12-09 16:42:17.862 155091 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" "released" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: held 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423[00m
Dec 09 16:43:17 compute-0 ovn_metadata_agent[155086]: 2025-12-09 16:43:17.863 155091 DEBUG oslo_concurrency.lockutils [-] Acquiring lock "_check_child_processes" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404[00m
Dec 09 16:43:17 compute-0 ovn_metadata_agent[155086]: 2025-12-09 16:43:17.864 155091 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" acquired by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: waited 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409[00m
Dec 09 16:43:17 compute-0 ovn_metadata_agent[155086]: 2025-12-09 16:43:17.864 155091 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" "released" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: held 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423[00m
Dec 09 16:44:17 compute-0 ovn_metadata_agent[155086]: 2025-12-09 16:44:17.865 155091 DEBUG oslo_concurrency.lockutils [-] Acquiring lock "_check_child_processes" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404[00m
Dec 09 16:44:17 compute-0 ovn_metadata_agent[155086]: 2025-12-09 16:44:17.866 155091 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" acquired by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409[00m
Dec 09 16:44:17 compute-0 ovn_metadata_agent[155086]: 2025-12-09 16:44:17.866 155091 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" "released" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: held 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423[00m

○ emergency.service - Emergency Shell
     Loaded: loaded (/usr/lib/systemd/system/emergency.service; static)
     Active: inactive (dead)
       Docs: man:sulogin(8)

○ fbe906a4b60641b0d2bc026295092273d9a8b9783389304fc11b10bffae9c692-413c4d33667cee78.service - /usr/bin/podman healthcheck run fbe906a4b60641b0d2bc026295092273d9a8b9783389304fc11b10bffae9c692
     Loaded: loaded (/run/systemd/transient/fbe906a4b60641b0d2bc026295092273d9a8b9783389304fc11b10bffae9c692-413c4d33667cee78.service; transient)
  Transient: yes
     Active: inactive (dead) since Tue 2025-12-09 16:44:27 UTC; 28s ago
   Duration: 91ms
TriggeredBy: ● fbe906a4b60641b0d2bc026295092273d9a8b9783389304fc11b10bffae9c692-413c4d33667cee78.timer
    Process: 264741 ExecStart=/usr/bin/podman healthcheck run fbe906a4b60641b0d2bc026295092273d9a8b9783389304fc11b10bffae9c692 (code=exited, status=0/SUCCESS)
   Main PID: 264741 (code=exited, status=0/SUCCESS)
        CPU: 100ms

Dec 09 16:44:27 compute-0 podman[264741]: 2025-12-09 16:44:27.631483359 +0000 UTC m=+0.066612270 container health_status fbe906a4b60641b0d2bc026295092273d9a8b9783389304fc11b10bffae9c692 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn@sha256:85729a662800e6b42ceb088545fed39a2ac58704b4a37fd540cdef3ebf9e59a2, name=ovn_metadata_agent, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.vendor=CentOS, config_id=ovn_metadata_agent, managed_by=edpm_ansible, org.label-schema.build-date=20251202, org.label-schema.license=GPLv2, tcib_managed=true, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, container_name=ovn_metadata_agent, tcib_build_tag=c3923531bcda0b0811b2d5053f189beb, config_data={'cgroupns': 'host', 'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'EDPM_CONFIG_HASH': '0823bd3e096c75f72e4a95820d41b0d4b6a1172bd2892ddb9f29b788a11bc87d'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_metadata_agent', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn@sha256:85729a662800e6b42ceb088545fed39a2ac58704b4a37fd540cdef3ebf9e59a2', 'net': 'host', 'pid': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/run/openvswitch:/run/openvswitch:z', '/var/lib/config-data/ansible-generated/neutron-ovn-metadata-agent:/etc/neutron.conf.d:z', '/run/netns:/run/netns:shared', '/var/lib/kolla/config_files/ovn_metadata_agent.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/neutron:/var/lib/neutron:shared,z', '/var/lib/neutron/ovn_metadata_haproxy_wrapper:/usr/local/bin/haproxy:ro', '/var/lib/neutron/kill_scripts:/etc/neutron/kill_scripts:ro', '/var/lib/openstack/cacerts/neutron-metadata/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_metadata_agent:/openstack:ro,z']}, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team)

● getty@tty1.service - Getty on tty1
     Loaded: loaded (/usr/lib/systemd/system/getty@.service; enabled; preset: enabled)
     Active: active (running) since Tue 2025-12-09 14:51:40 UTC; 1h 53min ago
       Docs: man:agetty(8)
             man:systemd-getty-generator(8)
             http://0pointer.de/blog/projects/serial-console.html
   Main PID: 1009 (agetty)
         IO: 0B read, 0B written
      Tasks: 1 (limit: 48628)
     Memory: 216.0K (peak: 440.0K)
        CPU: 13ms
     CGroup: /system.slice/system-getty.slice/getty@tty1.service
             └─1009 /sbin/agetty -o "-p -- \\u" --noclear - linux

Dec 09 14:51:40 np0005552052.novalocal systemd[1]: Started Getty on tty1.

● gssproxy.service - GSSAPI Proxy Daemon
     Loaded: loaded (/usr/lib/systemd/system/gssproxy.service; disabled; preset: disabled)
     Active: active (running) since Tue 2025-12-09 14:51:38 UTC; 1h 53min ago
   Main PID: 869 (gssproxy)
         IO: 0B read, 0B written
      Tasks: 6 (limit: 48628)
     Memory: 1.8M (peak: 3.2M)Unit hv_kvp_daemon.service could not be found.

        CPU: 19ms
     CGroup: /system.slice/gssproxy.service
             └─869 /usr/sbin/gssproxy -D

Dec 09 14:51:38 np0005552052.novalocal systemd[1]: Starting GSSAPI Proxy Daemon...
Dec 09 14:51:38 np0005552052.novalocal systemd[1]: Started GSSAPI Proxy Daemon.

○ import-state.service - Import network configuration from initramfs
     Loaded: loaded (/usr/lib/systemd/system/import-state.service; enabled; preset: enabled)
     Active: inactive (dead)

○ initrd-cleanup.service - Cleaning Up and Shutting Down Daemons
     Loaded: loaded (/usr/lib/systemd/system/initrd-cleanup.service; static)
     Active: inactive (dead) since Tue 2025-12-09 14:51:36 UTC; 1h 53min ago
   Main PID: 616 (code=exited, status=0/SUCCESS)
        CPU: 12ms

Dec 09 14:51:36 localhost systemd[1]: Starting Cleaning Up and Shutting Down Daemons...
Dec 09 14:51:36 localhost systemd[1]: initrd-cleanup.service: Deactivated successfully.
Dec 09 14:51:36 localhost systemd[1]: Finished Cleaning Up and Shutting Down Daemons.

○ initrd-parse-etc.service - Mountpoints Configured in the Real Root
     Loaded: loaded (/usr/lib/systemd/system/initrd-parse-etc.service; static)
     Active: inactive (dead) since Tue 2025-12-09 14:51:36 UTC; 1h 53min ago
   Main PID: 567 (code=exited, status=0/SUCCESS)
        CPU: 15ms

Dec 09 14:51:36 localhost systemd[1]: Starting Mountpoints Configured in the Real Root...
Dec 09 14:51:36 localhost systemd[1]: initrd-parse-etc.service: Deactivated successfully.
Dec 09 14:51:36 localhost systemd[1]: Finished Mountpoints Configured in the Real Root.

○ initrd-switch-root.service - Switch Root
     Loaded: loaded (/usr/lib/systemd/system/initrd-switch-root.service; static)
     Active: inactive (dead) since Tue 2025-12-09 14:51:36 UTC; 1h 53min ago
   Main PID: 619 (code=exited, status=0/SUCCESS)
        CPU: 7ms

Dec 09 14:51:36 localhost systemd[1]: Starting Switch Root...

○ initrd-udevadm-cleanup-db.service - Cleanup udev Database
     Loaded: loaded (/usr/lib/systemd/system/initrd-udevadm-cleanup-db.service; static)
     Active: inactive (dead) since Tue 2025-12-09 14:51:36 UTC; 1h 53min ago
   Main PID: 618 (code=exited, status=0/SUCCESS)
        CPU: 9ms

Dec 09 14:51:36 localhost systemd[1]: Starting Cleanup udev Database...
Dec 09 14:51:36 localhost systemd[1]: initrd-udevadm-cleanup-db.service: Deactivated successfully.
Dec 09 14:51:36 localhost systemd[1]: Finished Cleanup udev Database.

○ ip6tables.service - IPv6 firewall with ip6tables
     Loaded: loaded (/usr/lib/systemd/system/ip6tables.service; disabled; preset: disabled)
     Active: inactive (dead)

○ iptables.service - IPv4 firewall with iptables
     Loaded: loaded (/usr/lib/systemd/system/iptables.service; disabled; preset: disabled)
     Active: inactive (dead) since Tue 2025-12-09 16:00:40 UTC; 44min ago
   Duration: 1h 9min 2.347s
   Main PID: 778 (code=exited, status=0/SUCCESS)
        CPU: 121ms

Dec 09 14:51:37 localhost systemd[1]: Starting IPv4 firewall with iptables...
Dec 09 14:51:38 localhost iptables.init[778]: iptables: Applying firewall rules: [  OK  ]
Dec 09 14:51:38 localhost systemd[1]: Finished IPv4 firewall with iptables.
Dec 09 16:00:40 compute-0 systemd[1]: Stopping IPv4 firewall with iptables...
Dec 09 16:00:40 compute-0 iptables.init[62837]: iptables: Setting chains to policy ACCEPT: raw mangle filter nat [  OK  ]
Dec 09 16:00:40 compute-0 iptables.init[62837]: iptables: Flushing firewall rules: [  OK  ]
Dec 09 16:00:40 compute-0 systemd[1]: iptables.service: Deactivated successfully.
Dec 09 16:00:40 compute-0 systemd[1]: Stopped IPv4 firewall with iptables.

● irqbalance.service - irqbalance daemon
     Loaded: loaded (/usr/lib/systemd/system/irqbalance.service; enabled; preset: enabled)
     Active: active (running) since Tue 2025-12-09 14:51:37 UTC; 1h 53min ago
       Docs: man:irqbalance(1)
             https://github.com/Irqbalance/irqbalance
   Main PID: 780 (irqbalance)
         IO: 0B read, 0B written
      Tasks: 2 (limit: 48628)
     Memory: 1.1M (peak: 1.5M)
        CPU: 512ms
     CGroup: /system.slice/irqbalance.service
             └─780 /usr/sbin/irqbalance

Dec 09 14:51:48 np0005552052.novalocal irqbalance[780]: Cannot change IRQ 32 affinity: Operation not permitted
Dec 09 14:51:48 np0005552052.novalocal irqbalance[780]: IRQ 32 affinity is now unmanaged
Dec 09 14:51:48 np0005552052.novalocal irqbalance[780]: Cannot change IRQ 30 affinity: Operation not permitted
Dec 09 14:51:48 np0005552052.novalocal irqbalance[780]: IRQ 30 affinity is now unmanaged
Dec 09 14:51:48 np0005552052.novalocal irqbalance[780]: Cannot change IRQ 29 affinity: Operation not permitted
Dec 09 14:51:48 np0005552052.novalocal irqbalance[780]: IRQ 29 affinity is now unmanaged
Dec 09 15:28:08 np0005552052.novalocal irqbalance[780]: Cannot change IRQ 26 affinity: Operation not permitted
Dec 09 15:28:08 np0005552052.novalocal irqbalance[780]: IRQ 26 affinity is now unmanaged
Dec 09 15:29:48 np0005552052.novalocal irqbalance[780]: Cannot change IRQ 27 affinity: Operation not permitted
Dec 09 15:29:48 np0005552052.novalocal irqbalance[780]: IRQ 27 affinity is now unmanaged

○ iscsi-init.service - One time configuration for iscsi.service
     Loaded: loaded (/usr/lib/systemd/system/iscsi-init.service; disabled; preset: disabled)
     Active: inactive (dead)
  Condition: start condition failed at Tue 2025-12-09 16:20:15 UTC; 24min ago

Dec 09 16:19:11 compute-0 systemd[1]: One time configuration for iscsi.service was skipped because of an unmet condition check (ConditionPathExists=!/etc/iscsi/initiatorname.iscsi).
Dec 09 16:20:15 compute-0 systemd[1]: One time configuration for iscsi.service was skipped because of an unmet condition check (ConditionPathExists=!/etc/iscsi/initiatorname.iscsi).

○ iscsi-onboot.service - Special handling of early boot iSCSI sessions
     Loaded: loaded (/usr/lib/systemd/system/iscsi-onboot.service; enabled; preset: enabled)
     Active: inactive (dead)
       Docs: man:iscsiadm(8)
             man:iscsid(8)

● iscsi-shutdown.service - Logout off all iSCSI sessions on shutdown
     Loaded: loaded (/usr/lib/systemd/system/iscsi-shutdown.service; static)
     Active: active (exited) since Tue 2025-12-09 16:19:11 UTC; 25min ago
       Docs: man:iscsid(8)
             man:iscsiadm(8)
   Main PID: 216167 (code=exited, status=0/SUCCESS)
        CPU: 2ms

Dec 09 16:19:11 compute-0 systemd[1]: Starting Logout off all iSCSI sessions on shutdown...
Dec 09 16:19:11 compute-0 systemd[1]: Finished Logout off all iSCSI sessions on shutdown.

○ iscsi-starter.service
     Loaded: loaded (/usr/lib/systemd/system/iscsi-starter.service; enabled; preset: disabled)
     Active: inactive (dead)

○ iscsi.service - Login and scanning of iSCSI devices
     Loaded: loaded (/usr/lib/systemd/system/iscsi.service; indirect; preset: enabled)
     Active: inactive (dead)
       Docs: man:iscsiadm(8)
             man:iscsid(8)

● iscsid.service - Open-iSCSI
     Loaded: loaded (/usr/lib/systemd/system/iscsid.service; enabled; preset: disabled)
     Active: active (running) since Tue 2025-12-09 16:20:15 UTC; 24min ago
TriggeredBy: ● iscsid.socket
       Docs: man:iscsid(8)
             man:iscsiuio(8)
             man:iscsiadm(8)
   Main PID: 228459 (iscsid)
     Status: "Ready to process requests"
         IO: 0B read, 0B written
      Tasks: 1 (limit: 48628)
     Memory: 1.9M (peak: 2.0M)
        CPU: 8ms
     CGroup: /system.slice/iscsid.service
             └─228459 /usr/sbin/iscsid -f

Dec 09 16:20:15 compute-0 systemd[1]: Starting Open-iSCSI...
Dec 09 16:20:15 compute-0 systemd[1]: Started Open-iSCSI.

○ iscsiuio.service - iSCSI UserSpace I/O driver
     Loaded: loaded (/usr/lib/systemd/system/iscsiuio.service; disabled; preset: disabled)
     Active: inactive (dead)
TriggeredBy: ○ iscsiuio.socket
       Docs: man:iscsiuio(8)

● kdump.service - Crash recovery kernel arming
     Loaded: loaded (/usr/lib/systemd/system/kdump.service; enabled; preset: enabled)
     Active: active (exited) since Tue 2025-12-09 14:51:51 UTC; 1h 53min ago
   Main PID: 1007 (code=exited, status=0/SUCCESS)
        CPU: 16.946s

Dec 09 14:51:50 np0005552052.novalocal dracut[1284]: Linked:         0 files
Dec 09 14:51:50 np0005552052.novalocal dracut[1284]: Compared:       0 xattrs
Dec 09 14:51:50 np0005552052.novalocal dracut[1284]: Compared:       0 files
Dec 09 14:51:50 np0005552052.novalocal dracut[1284]: Saved:          0 B
Dec 09 14:51:50 np0005552052.novalocal dracut[1284]: Duration:       0.000504 seconds
Dec 09 14:51:50 np0005552052.novalocal dracut[1284]: *** Hardlinking files done ***
Dec 09 14:51:50 np0005552052.novalocal dracut[1284]: *** Creating initramfs image file '/boot/initramfs-5.14.0-648.el9.x86_64kdump.img' done ***
Dec 09 14:51:51 np0005552052.novalocal kdumpctl[1013]: kdump: kexec: loaded kdump kernel
Dec 09 14:51:51 np0005552052.novalocal kdumpctl[1013]: kdump: Starting kdump: [OK]
Dec 09 14:51:51 np0005552052.novalocal systemd[1]: Finished Crash recovery kernel arming.

● kmod-static-nodes.service - Create List of Static Device Nodes
     Loaded: loaded (/usr/lib/systemd/system/kmod-static-nodes.service; static)
     Active: active (exited) since Tue 2025-12-09 14:51:37 UTC; 1h 53min ago
   Main PID: 669 (code=exited, status=0/SUCCESS)
        CPU: 7ms

Dec 09 14:51:37 localhost systemd[1]: Finished Create List of Static Device Nodes.

● ldconfig.service - Rebuild Dynamic Linker Cache
     Loaded: loaded (/usr/lib/systemd/system/ldconfig.service; static)
     Active: active (exited) since Tue 2025-12-09 14:51:37 UTC; 1h 53min ago
       Docs: man:ldconfig(8)
   Main PID: 692 (code=exited, status=0/SUCCESS)
        CPU: 53ms

Dec 09 14:51:37 localhost systemd[1]: Starting Rebuild Dynamic Linker Cache...
Dec 09 14:51:37 localhost systemd[1]: Finished Rebuild Dynamic Linker Cache.

○ libvirtd.service
     Loaded: masked (Reason: Unit libvirtd.service is masked.)
     Active: inactive (dead)
TriggeredBy: ○ libvirtd-admin.socket
             ○ libvirtd-ro.socket
             ○ libvirtd.socket

○ loadmodules.service - Load legacy module configuration
     Loaded: loaded (/usr/lib/systemd/system/loadmodules.service; enabled; preset: enabled)
     Active: inactive (dead)

○ logrotate.service - Rotate log files
     Loaded: loaded (/usr/lib/systemd/system/logrotate.service; static)
     Active: inactive (dead)
TriggeredBy: ● logrotate.timer
       Docs: man:logrotate(8)
             man:logrotate.conf(5)

○ lvm2-lvmpolld.service - LVM2 poll daemon
     Loaded: loaded (/usr/lib/systemd/system/lvm2-lvmpolld.service; static)
     Active: inactive (dead)
TriggeredBy: ● lvm2-lvmpolld.socket
       Docs: man:lvmpolld(8)

● lvm2-monitor.service - Monitoring of LVM2 mirrors, snapshots etc. using dmeventd or progress polling
     Loaded: loaded (/usr/lib/systemd/system/lvm2-monitor.service; enabled; preset: enabled)
     Active: active (exited) since Tue 2025-12-09 15:55:53 UTC; 49min ago
       Docs: man:dmeventd(8)
             man:lvcreate(8)
             man:lvchange(8)
             man:vgchange(8)
   Main PID: 34119 (code=exited, status=0/SUCCESS)
        CPU: 26ms

Dec 09 15:55:53 compute-0 systemd[1]: Starting Monitoring of LVM2 mirrors, snapshots etc. using dmeventd or progress polling...
Dec 09 15:55:53 compute-0 systemd[1]: Finished Monitoring of LVM2 mirrors, snapshots etc. using dmeventd or progress polling.

○ mdmonitor.service - Software RAID monitoring and management
     Loaded: loaded (/usr/lib/systemd/system/mdmonitor.service; enabled; preset: enabled)
     Active: inactive (dead)

○ microcode.service - Load CPU microcode update
     Loaded: loaded (/usr/lib/systemd/system/microcode.service; enabled; preset: enabled)
     Active: inactive (dead)
  Condition: start condition failed at Tue 2025-12-09 14:51:37 UTC; 1h 53min ago

Dec 09 14:51:37 localhost systemd[1]: Load CPU microcode update was skipped because of an unmet condition check (ConditionPathExists=/sys/devices/system/cpu/microcode/reload).

○ modprobe@configfs.service - Load Kernel Module configfs
     Loaded: loaded (/usr/lib/systemd/system/modprobe@.service; static)
     Active: inactive (dead) since Tue 2025-12-09 14:51:37 UTC; 1h 53min ago
       Docs: man:modprobe(8)
   Main PID: 767 (code=exited, status=0/SUCCESS)
        CPU: 7ms

Dec 09 14:51:37 localhost systemd[1]: Starting Load Kernel Module configfs...
Dec 09 14:51:37 localhost systemd[1]: modprobe@configfs.service: Deactivated successfully.
Dec 09 14:51:37 localhost systemd[1]: Finished Load Kernel Module configfs.

○ modprobe@drm.service - Load Kernel Module drm
     Loaded: loaded (/usr/lib/systemd/system/modprobe@.service; static)
     Active: inactive (dead) since Tue 2025-12-09 14:51:37 UTC; 1h 53min ago
       Docs: man:modprobe(8)
   Main PID: 671 (code=exited, status=0/SUCCESS)
        CPU: 106ms

Dec 09 14:51:37 localhost systemd[1]: modprobe@drm.service: Deactivated successfully.
Dec 09 14:51:37 localhost systemd[1]: Finished Load Kernel Module drm.

○ modprobe@efi_pstore.service - Load Kernel Module efi_pstore
     Loaded: loaded (/usr/lib/systemd/system/modprobe@.service; static)
     Active: inactive (dead) since Tue 2025-12-09 14:51:37 UTC; 1h 53min ago
       Docs: man:modprobe(8)
   Main PID: 672 (code=exited, status=0/SUCCESS)
        CPU: 3ms

Dec 09 14:51:37 localhost systemd[1]: modprobe@efi_pstore.service: Deactivated successfully.
Dec 09 14:51:37 localhost systemd[1]: Finished Load Kernel Module efi_pstore.

○ modprobe@fuse.service - Load Kernel Module fuse
     Loaded: loaded (/usr/lib/systemd/system/modprobe@.service; static)
     Active: inactive (dead) since Tue 2025-12-09 14:51:37 UTC; 1h 53min ago
       Docs: man:modprobe(8)
   Main PID: 673 (code=exited, status=0/SUCCESS)
        CPU: 33ms

Dec 09 14:51:37 localhost systemd[1]: modprobe@fuse.service: Deactivated successfully.
Dec 09 14:51:37 localhost systemd[1]: Finished Load Kernel Module fuse.

○ netns-placeholder.service - Create netns directory
     Loaded: loaded (/etc/systemd/system/netns-placeholder.service; enabled; preset: enabled)
     Active: inactive (dead) since Tue 2025-12-09 16:19:42 UTC; 25min ago
   Main PID: 222173 (code=exited, status=0/SUCCESS)
        CPU: 11ms

Dec 09 16:19:42 compute-0 systemd[1]: Starting Create netns directory...
Dec 09 16:19:42 compute-0 systemd[1]: netns-placeholder.service: Deactivated successfully.
Dec 09 16:19:42 compute-0 systemd[1]: Finished Create netns directory.

○ network.service - LSB: Bring up/down networking
     Loaded: loaded (/etc/rc.d/init.d/network; generated)
     Active: inactive (dead)
       Docs: man:systemd-sysv-generator(8)

● NetworkManager-wait-online.service - Network Manager Wait Online
     Loaded: loaded (/usr/lib/systemd/system/NetworkManager-wait-online.service; enabled; preset: disabled)
     Active: active (exited) since Tue 2025-12-09 15:58:42 UTC; 46min ago
       Docs: man:NetworkManager-wait-online.service(8)
   Main PID: 49039 (code=exited, status=0/SUCCESS)
        CPU: 23ms

Dec 09 15:58:42 compute-0 systemd[1]: Starting Network Manager Wait Online...
Dec 09 15:58:42 compute-0 systemd[1]: Finished Network Manager Wait Online.

● NetworkManager.service - Network Manager
     Loaded: loaded (/usr/lib/systemd/system/NetworkManager.service; enabled; preset: enabled)
    Drop-In: /usr/lib/systemd/system/NetworkManager.service.d
             └─NetworkManager-ovs.conf
     Active: active (running) since Tue 2025-12-09 15:58:42 UTC; 46min ago
       Docs: man:NetworkManager(8)
   Main PID: 49021 (NetworkManager)
         IO: 104.0K read, 286.5K written
      Tasks: 3 (limit: 48628)
     Memory: 5.3M (peak: 6.7M)
        CPU: 20.027s
     CGroup: /system.slice/NetworkManager.service
             └─49021 /usr/sbin/NetworkManager --no-daemon

Dec 09 15:59:13 compute-0 systemd[1]: Reloaded Network Manager.
Dec 09 16:13:10 compute-0 NetworkManager[49021]: <info>  [1765296790.9036] manager: (br-int): new Open vSwitch Interface device (/org/freedesktop/NetworkManager/Devices/16)
Dec 09 16:13:10 compute-0 NetworkManager[49021]: <info>  [1765296790.9048] device (br-int)[Open vSwitch Interface]: state change: unmanaged -> unavailable (reason 'managed', managed-type: 'external')
Dec 09 16:13:10 compute-0 NetworkManager[49021]: <warUnit ntpd.service could not be found.
Unit ntpdate.service could not be found.
n>  [1765296790.9051] device (br-int)[Open vSwitch Interface]: error setting IPv4 forwarding to '1': No such file or directory
Dec 09 16:13:10 compute-0 NetworkManager[49021]: <info>  [1765296790.9064] manager: (br-int): new Open vSwitch Port device (/org/freedesktop/NetworkManager/Devices/17)
Dec 09 16:13:10 compute-0 NetworkManager[49021]: <info>  [1765296790.9074] manager: (br-int): new Open vSwitch Bridge device (/org/freedesktop/NetworkManager/Devices/18)
Dec 09 16:13:10 compute-0 NetworkManager[49021]: <info>  [1765296790.9081] device (br-int)[Open vSwitch Interface]: state change: unavailable -> disconnected (reason 'none', managed-type: 'full')
Dec 09 16:13:10 compute-0 NetworkManager[49021]: <info>  [1765296790.9406] manager: (ovn-82cd4f-0): new Open vSwitch Port device (/org/freedesktop/NetworkManager/Devices/19)
Dec 09 16:13:10 compute-0 NetworkManager[49021]: <info>  [1765296790.9586] device (genev_sys_6081): carrier: link connected
Dec 09 16:13:10 compute-0 NetworkManager[49021]: <info>  [1765296790.9589] manager: (genev_sys_6081): new Generic device (/org/freedesktop/NetworkManager/Devices/20)

○ nfs-idmapd.service - NFSv4 ID-name mapping service
     Loaded: loaded (/usr/lib/systemd/system/nfs-idmapd.service; static)
     Active: inactive (dead)
       Docs: man:idmapd(8)

○ nfs-mountd.service - NFS Mount Daemon
     Loaded: loaded (/usr/lib/systemd/system/nfs-mountd.service; static)
     Active: inactive (dead)
       Docs: man:rpc.mountd(8)

○ nfs-server.service - NFS server and services
     Loaded: loaded (/usr/lib/systemd/system/nfs-server.service; disabled; preset: disabled)
     Active: inactive (dead)
       Docs: man:rpc.nfsd(8)
             man:exportfs(8)

○ nfs-utils.service - NFS server and client services
     Loaded: loaded (/usr/lib/systemd/system/nfs-utils.service; static)
     Active: inactive (dead)

○ nfsdcld.service - NFSv4 Client Tracking Daemon
     Loaded: loaded (/usr/lib/systemd/system/nfsdcld.service; static)
     Active: inactive (dead)
       Docs: man:nfsdcld(8)

● nftables.service - Netfilter Tables
     Loaded: loaded (/usr/lib/systemd/system/nftables.service; enabled; preset: disabled)
     Active: active (exited) since Tue 2025-12-09 16:00:42 UTC; 44min ago
       Docs: man:nft(8)
   Main PID: 63227 (code=exited, status=0/SUCCESS)
        CPU: 21ms

Dec 09 16:00:42 compute-0 systemd[1]: Starting Netfilter Tables...
Dec 09 16:00:42 compute-0 systemd[1]: Finished Netfilter Tables.

● nis-domainname.service - Read and set NIS domainname from /etc/sysconfig/network
     Loaded: loaded (/usr/lib/systemd/system/nis-domainname.service; enabled; preset: enabled)
     Active: active (exited) since Tue 2025-12-09 14:51:37 UTC; 1h 53min ago
   Main PID: 674 (code=exited, status=0/SUCCESS)
        CPU: 4ms

Dec 09 14:51:37 localhost systemd[1]: Finished Read and set NIS domainname from /etc/sysconfig/network.

○ nvmefc-boot-connections.service - Auto-connect to subsystems on FC-NVME devices found during boot
     Loaded: loaded (/usr/lib/systemd/system/nvmefc-boot-connections.service; enabled; preset: enabled)
     Active: inactive (dead)

● openvswitch.service - Open vSwitch
     Loaded: loaded (/usr/lib/systemd/system/openvswitch.service; enabled; preset: disabled)
     Active: active (exited) since Tue 2025-12-09 15:58:28 UTC; 46min ago
   Main PID: 47334 (code=exited, status=0/SUCCESS)
        CPU: 2ms

Dec 09 15:58:28 compute-0 systemd[1]: Starting Open vSwitch...
Dec 09 15:58:28 compute-0 systemd[1]: Finished Open vSwitch.

● ovs-delete-transient-ports.service - Open vSwitch Delete Transient Ports
     Loaded: loaded (/usr/lib/systemd/system/ovs-delete-transient-ports.service; static)
     Active: active (exited) since Tue 2025-12-09 15:58:28 UTC; 46min ago
   Main PID: 47271 (code=exited, status=0/SUCCESS)
        CPU: 33ms

Dec 09 15:58:28 compute-0 systemd[1]: Starting Open vSwitch Delete Transient Ports...
Dec 09 15:58:28 compute-0 systemd[1]: Finished Open vSwitch Delete Transient Ports.

● ovs-vswitchd.service - Open vSwitch Forwarding Unit
     LoadeUnit pacemaker.service could not be found.
Unit plymouth-quit-wait.service could not be found.
Unit plymouth-start.service could not be found.
d: loaded (/usr/lib/systemd/system/ovs-vswitchd.service; static)
     Active: active (running) since Tue 2025-12-09 15:58:28 UTC; 46min ago
   Main PID: 47325 (ovs-vswitchd)
         IO: 3.4M read, 24.0K written
      Tasks: 13 (limit: 48628)
     Memory: 242.9M (peak: 248.9M)
        CPU: 6.957s
     CGroup: /system.slice/ovs-vswitchd.service
             └─47325 ovs-vswitchd unix:/var/run/openvswitch/db.sock -vconsole:emer -vsyslog:err -vfile:info --mlockall --user openvswitch:hugetlbfs --no-chdir --log-file=/var/log/openvswitch/ovs-vswitchd.log --pidfile=/var/run/openvswitch/ovs-vswitchd.pid --detach

Dec 09 15:58:28 compute-0 systemd[1]: Starting Open vSwitch Forwarding Unit...
Dec 09 15:58:28 compute-0 ovs-ctl[47315]: Inserting openvswitch module [  OK  ]
Dec 09 15:58:28 compute-0 ovs-ctl[47284]: Starting ovs-vswitchd [  OK  ]
Dec 09 15:58:28 compute-0 ovs-vsctl[47332]: ovs|00001|vsctl|INFO|Called as ovs-vsctl --no-wait add Open_vSwitch . external-ids hostname=compute-0
Dec 09 15:58:28 compute-0 ovs-ctl[47284]: Enabling remote OVSDB managers [  OK  ]
Dec 09 15:58:28 compute-0 systemd[1]: Started Open vSwitch Forwarding Unit.

● ovsdb-server.service - Open vSwitch Database Unit
     Loaded: loaded (/usr/lib/systemd/system/ovsdb-server.service; static)
     Active: active (running) since Tue 2025-12-09 15:58:28 UTC; 46min ago
   Main PID: 47243 (ovsdb-server)
         IO: 1.2M read, 105.0K written
      Tasks: 1 (limit: 48628)
     Memory: 4.6M (peak: 39.9M)
        CPU: 9.627s
     CGroup: /system.slice/ovsdb-server.service
             └─47243 ovsdb-server /etc/openvswitch/conf.db -vconsole:emer -vsyslog:err -vfile:info --remote=punix:/var/run/openvswitch/db.sock --private-key=db:Open_vSwitch,SSL,private_key --certificate=db:Open_vSwitch,SSL,certificate --bootstrap-ca-cert=db:Open_vSwitch,SSL,ca_cert --user openvswitch:hugetlbfs --no-chdir --log-file=/var/log/openvswitch/ovsdb-server.log --pidfile=/var/run/openvswitch/ovsdb-server.pid --detach

Dec 09 15:58:27 compute-0 chown[47190]: /usr/bin/chown: cannot access '/run/openvswitch': No such file or directory
Dec 09 15:58:27 compute-0 ovs-ctl[47195]: /etc/openvswitch/conf.db does not exist ... (warning).
Dec 09 15:58:27 compute-0 ovs-ctl[47195]: Creating empty database /etc/openvswitch/conf.db [  OK  ]
Dec 09 15:58:27 compute-0 ovs-ctl[47195]: Starting ovsdb-server [  OK  ]
Dec 09 15:58:27 compute-0 ovs-vsctl[47244]: ovs|00001|vsctl|INFO|Called as ovs-vsctl --no-wait -- init -- set Open_vSwitch . db-version=8.5.1
Dec 09 15:58:28 compute-0 ovs-vsctl[47264]: ovs|00001|vsctl|INFO|Called as ovs-vsctl --no-wait set Open_vSwitch . ovs-version=3.3.5-115.el9s "external-ids:system-id=\"037f0e18-4bfd-4487-a7a8-05ae973391a9\"" "external-ids:rundir=\"/var/run/openvswitch\"" "system-type=\"centos\"" "system-version=\"9\""
Dec 09 15:58:28 compute-0 ovs-ctl[47195]: Configuring Open vSwitch system IDs [  OK  ]
Dec 09 15:58:28 compute-0 ovs-ctl[47195]: Enabling remote OVSDB managers [  OK  ]
Dec 09 15:58:28 compute-0 systemd[1]: Started Open vSwitch Database Unit.
Dec 09 15:58:28 compute-0 ovs-vsctl[47270]: ovs|00001|vsctl|INFO|Called as ovs-vsctl --no-wait add Open_vSwitch . external-ids hostname=compute-0

○ podman.service - Podman API Service
     Loaded: loaded (/usr/lib/systemd/system/podman.service; disabled; preset: disabled)
     Active: inactive (dead) since Tue 2025-12-09 16:37:23 UTC; 7min ago
   Duration: 15.105s
TriggeredBy: ● podman.socket
       Docs: man:podman-system-service(1)
    Process: 257048 ExecStart=/usr/bin/podman $LOGGING system service (code=exited, status=0/SUCCESS)
   Main PID: 257048 (code=exited, status=0/SUCCESS)
        CPU: 178ms

Dec 09 16:37:08 compute-0 podman[257048]: time="2025-12-09T16:37:08Z" level=info msg="/usr/bin/podman filtering at log level info"
Dec 09 16:37:08 compute-0 podman[257048]: time="2025-12-09T16:37:08Z" level=info msg="Setting parallel job count to 25"
Dec 09 16:37:08 compute-0 podman[257048]: time="2025-12-09T16:37:08Z" level=info msg="Using sqlite as database backend"
Dec 09 16:37:08 compute-0 podman[257048]:Unit power-profiles-daemon.service could not be found.
 time="2025-12-09T16:37:08Z" level=info msg="Not using native diff for overlay, this may cause degraded performance for building images: kernel has CONFIG_OVERLAY_FS_REDIRECT_DIR enabled"
Dec 09 16:37:08 compute-0 podman[257048]: time="2025-12-09T16:37:08Z" level=info msg="Using systemd socket activation to determine API endpoint"
Dec 09 16:37:08 compute-0 podman[257048]: time="2025-12-09T16:37:08Z" level=info msg="API service listening on \"/run/podman/podman.sock\". URI: \"unix:///run/podman/podman.sock\""
Dec 09 16:37:08 compute-0 podman[257048]: @ - - [09/Dec/2025:16:37:08 +0000] "HEAD /v4.7.0/libpod/_ping HTTP/1.1" 200 0 "" "PodmanPy/4.7.0 (API v4.7.0; Compatible v1.40)"
Dec 09 16:37:08 compute-0 podman[257048]: @ - - [09/Dec/2025:16:37:08 +0000] "GET /v4.7.0/libpod/containers/json HTTP/1.1" 200 25040 "" "PodmanPy/4.7.0 (API v4.7.0; Compatible v1.40)"
Dec 09 16:37:23 compute-0 podman[257048]: time="2025-12-09T16:37:23Z" level=info msg="Received shutdown.Stop(), terminating!" PID=257048
Dec 09 16:37:23 compute-0 systemd[1]: podman.service: Deactivated successfully.

● polkit.service - Authorization Manager
     Loaded: loaded (/usr/lib/systemd/system/polkit.service; static)
     Active: active (running) since Tue 2025-12-09 15:57:46 UTC; 47min ago
       Docs: man:polkit(8)
   Main PID: 43504 (polkitd)
         IO: 11.0M read, 0B written
      Tasks: 12 (limit: 48628)
     Memory: 16.8M (peak: 18.2M)
        CPU: 1.701s
     CGroup: /system.slice/polkit.service
             └─43504 /usr/lib/polkit-1/polkitd --no-debug

Dec 09 16:16:31 compute-0 polkitd[43504]: Collecting garbage unconditionally...
Dec 09 16:16:31 compute-0 polkitd[43504]: Loading rules from directory /etc/polkit-1/rules.d
Dec 09 16:16:31 compute-0 polkitd[43504]: Loading rules from directory /usr/share/polkit-1/rules.d
Dec 09 16:16:31 compute-0 polkitd[43504]: Finished loading, compiling and executing 3 rules
Dec 09 16:18:13 compute-0 polkitd[43504]: Registered Authentication Agent for unix-process:207653:520082 (system bus name :1.2549 [pkttyagent --process 207653 --notify-fd 4 --fallback], object path /org/freedesktop/PolicyKit1/AuthenticationAgent, locale en_US.UTF-8)
Dec 09 16:18:13 compute-0 polkitd[43504]: Unregistered Authentication Agent for unix-process:207653:520082 (system bus name :1.2549, object path /org/freedesktop/PolicyKit1/AuthenticationAgent, locale en_US.UTF-8) (disconnected from bus)
Dec 09 16:18:13 compute-0 polkitd[43504]: Registered Authentication Agent for unix-process:207652:520081 (system bus name :1.2550 [pkttyagent --process 207652 --notify-fd 5 --fallback], object path /org/freedesktop/PolicyKit1/AuthenticationAgent, locale en_US.UTF-8)
Dec 09 16:18:13 compute-0 polkitd[43504]: Unregistered Authentication Agent for unix-process:207652:520081 (system bus name :1.2550, object path /org/freedesktop/PolicyKit1/AuthenticationAgent, locale en_US.UTF-8) (disconnected from bus)
Dec 09 16:18:15 compute-0 polkitd[43504]: Registered Authentication Agent for unix-process:208119:520329 (system bus name :1.2553 [pkttyagent --process 208119 --notify-fd 4 --fallback], object path /org/freedesktop/PolicyKit1/AuthenticationAgent, locale en_US.UTF-8)
Dec 09 16:18:15 compute-0 polkitd[43504]: Unregistered Authentication Agent for unix-process:208119:520329 (system bus name :1.2553, object path /org/freedesktop/PolicyKit1/AuthenticationAgent, locale en_US.UTF-8) (disconnected from bus)

○ raid-check.service - RAID setup health check
     Loaded: loaded (/usr/lib/systemd/system/raid-check.service; static)
     Active: inactive (dead)
TriggeredBy: ○ raid-check.timer

○ rc-local.service - /etc/rc.d/rc.local Compatibility
     Loaded: loaded (/usr/lib/systemd/system/rc-local.service; static)
     Active: inactive (dead)
       Docs: man:systemd-rc-local-generator(8)

○ rescue.service - Rescue Shell
     Loaded: loaded (/usr/lib/systemd/system/rescue.service; static)
     Active: inactive (dead)
       Docs: man:sulogin(8)

○ rpc-gssd.service - RPC security service for NFS client and server
     Loaded: loaded (/usr/lib/systemd/syUnit rpc-svcgssd.service could not be found.
stem/rpc-gssd.service; static)
     Active: inactive (dead)
  Condition: start condition failed at Tue 2025-12-09 14:51:38 UTC; 1h 53min ago
       Docs: man:rpc.gssd(8)

Dec 09 14:51:38 np0005552052.novalocal systemd[1]: RPC security service for NFS client and server was skipped because of an unmet condition check (ConditionPathExists=/etc/krb5.keytab).

● rpc-statd-notify.service - Notify NFS peers of a restart
     Loaded: loaded (/usr/lib/systemd/system/rpc-statd-notify.service; static)
     Active: active (exited) since Tue 2025-12-09 14:51:40 UTC; 1h 53min ago
       Docs: man:sm-notify(8)
             man:rpc.statd(8)
        CPU: 7ms

Dec 09 14:51:40 np0005552052.novalocal systemd[1]: Starting Notify NFS peers of a restart...
Dec 09 14:51:40 np0005552052.novalocal sm-notify[1003]: Version 2.5.4 starting
Dec 09 14:51:40 np0005552052.novalocal systemd[1]: Started Notify NFS peers of a restart.

○ rpc-statd.service - NFS status monitor for NFSv2/3 locking.
     Loaded: loaded (/usr/lib/systemd/system/rpc-statd.service; static)
     Active: inactive (dead)
       Docs: man:rpc.statd(8)

● rpcbind.service - RPC Bind
     Loaded: loaded (/usr/lib/systemd/system/rpcbind.service; enabled; preset: enabled)
     Active: active (running) since Tue 2025-12-09 14:51:37 UTC; 1h 53min ago
TriggeredBy: ● rpcbind.socket
       Docs: man:rpcbind(8)
   Main PID: 698 (rpcbind)
         IO: 0B read, 0B written
      Tasks: 1 (limit: 48628)
     Memory: 2.3M (peak: 2.6M)
        CPU: 36ms
     CGroup: /system.slice/rpcbind.service
             └─698 /usr/bin/rpcbind -w -f

Dec 09 14:51:37 localhost systemd[1]: Starting RPC Bind...
Dec 09 14:51:37 localhost systemd[1]: Started RPC Bind.

● rsyslog.service - System Logging Service
     Loaded: loaded (/usr/lib/systemd/system/rsyslog.service; enabled; preset: enabled)
     Active: active (running) since Tue 2025-12-09 14:51:40 UTC; 1h 53min ago
       Docs: man:rsyslogd(8)
             https://www.rsyslog.com/doc/
   Main PID: 1004 (rsyslogd)
         IO: 4.0K read, 15.0M written
      Tasks: 3 (limit: 48628)
     Memory: 13.5M (peak: 14.0M)
        CPU: 9.808s
     CGroup: /system.slice/rsyslog.service
             └─1004 /usr/sbin/rsyslogd -n

Dec 09 16:14:55 compute-0 rsyslogd[1004]: imjournal: journal files changed, reloading...  [v8.2510.0-2.el9 try https://www.rsyslog.com/e/0 ]
Dec 09 16:21:04 compute-0 rsyslogd[1004]: imjournal: journal files changed, reloading...  [v8.2510.0-2.el9 try https://www.rsyslog.com/e/0 ]
Dec 09 16:21:04 compute-0 rsyslogd[1004]: imjournal: journal files changed, reloading...  [v8.2510.0-2.el9 try https://www.rsyslog.com/e/0 ]
Dec 09 16:21:47 compute-0 rsyslogd[1004]: imjournal: journal files changed, reloading...  [v8.2510.0-2.el9 try https://www.rsyslog.com/e/0 ]
Dec 09 16:21:47 compute-0 rsyslogd[1004]: imjournal: journal files changed, reloading...  [v8.2510.0-2.el9 try https://www.rsyslog.com/e/0 ]
Dec 09 16:30:41 compute-0 rsyslogd[1004]: imjournal: journal files changed, reloading...  [v8.2510.0-2.el9 try https://www.rsyslog.com/e/0 ]
Dec 09 16:44:26 compute-0 rsyslogd[1004]: imjournal: journal files changed, reloading...  [v8.2510.0-2.el9 try https://www.rsyslog.com/e/0 ]
Dec 09 16:44:31 compute-0 rsyslogd[1004]: imjournal from <np0005552052:ceph-osd>: begin to drop messages due to rate-limiting
Dec 09 16:44:35 compute-0 rsyslogd[1004]: imjournal: journal files changed, reloading...  [v8.2510.0-2.el9 try https://www.rsyslog.com/e/0 ]
Dec 09 16:44:35 compute-0 rsyslogd[1004]: imjournal: journal files changed, reloading...  [v8.2510.0-2.el9 try https://www.rsyslog.com/e/0 ]

○ selinux-autorelabel-mark.service - Mark the need to relabel after reboot
     Loaded: loaded (/usr/lib/systemd/system/selinux-autorelabel-mark.service; enabled; preset: enabled)
     Active: inactive (dead)
  Condition: start condition failed at Tue 2025-12-09 14:51:37 UTC; 1h 53min ago

Dec 09 14:51:37 localhost systemd[1]: Mark the need to relabel after reboot was skipped because of an unmet condition check (ConditionSecurity=!selinux).

● serial-gettyUnit sntp.service could not be found.
Unit sshd-keygen.service could not be found.
@ttyS0.service - Serial Getty on ttyS0
     Loaded: loaded (/usr/lib/systemd/system/serial-getty@.service; enabled-runtime; preset: disabled)
     Active: active (running) since Tue 2025-12-09 14:51:40 UTC; 1h 53min ago
       Docs: man:agetty(8)
             man:systemd-getty-generator(8)
             http://0pointer.de/blog/projects/serial-console.html
   Main PID: 1010 (agetty)
         IO: 0B read, 0B written
      Tasks: 1 (limit: 48628)
     Memory: 300.0K (peak: 540.0K)
        CPU: 12ms
     CGroup: /system.slice/system-serial\x2dgetty.slice/serial-getty@ttyS0.service
             └─1010 /sbin/agetty -o "-p -- \\u" --keep-baud 115200,57600,38400,9600 - vt220

Dec 09 14:51:40 np0005552052.novalocal systemd[1]: Started Serial Getty on ttyS0.

○ sshd-keygen@ecdsa.service - OpenSSH ecdsa Server Key Generation
     Loaded: loaded (/usr/lib/systemd/system/sshd-keygen@.service; disabled; preset: disabled)
    Drop-In: /usr/lib/systemd/system/sshd-keygen@.service.d
             └─disable-sshd-keygen-if-cloud-init-active.conf
     Active: inactive (dead)
  Condition: start condition failed at Tue 2025-12-09 16:16:36 UTC; 28min ago

Dec 09 14:51:37 localhost systemd[1]: OpenSSH ecdsa Server Key Generation was skipped because of an unmet condition check (ConditionPathExists=!/run/systemd/generator.early/multi-user.target.wants/cloud-init.target).
Dec 09 16:16:36 compute-0 systemd[1]: OpenSSH ecdsa Server Key Generation was skipped because of an unmet condition check (ConditionPathExists=!/run/systemd/generator.early/multi-user.target.wants/cloud-init.target).

○ sshd-keygen@ed25519.service - OpenSSH ed25519 Server Key Generation
     Loaded: loaded (/usr/lib/systemd/system/sshd-keygen@.service; disabled; preset: disabled)
    Drop-In: /usr/lib/systemd/system/sshd-keygen@.service.d
             └─disable-sshd-keygen-if-cloud-init-active.conf
     Active: inactive (dead)
  Condition: start condition failed at Tue 2025-12-09 16:16:36 UTC; 28min ago

Dec 09 14:51:37 localhost systemd[1]: OpenSSH ed25519 Server Key Generation was skipped because of an unmet condition check (ConditionPathExists=!/run/systemd/generator.early/multi-user.target.wants/cloud-init.target).
Dec 09 16:16:36 compute-0 systemd[1]: OpenSSH ed25519 Server Key Generation was skipped because of an unmet condition check (ConditionPathExists=!/run/systemd/generator.early/multi-user.target.wants/cloud-init.target).

○ sshd-keygen@rsa.service - OpenSSH rsa Server Key Generation
     Loaded: loaded (/usr/lib/systemd/system/sshd-keygen@.service; disabled; preset: disabled)
    Drop-In: /usr/lib/systemd/system/sshd-keygen@.service.d
             └─disable-sshd-keygen-if-cloud-init-active.conf
     Active: inactive (dead)
  Condition: start condition failed at Tue 2025-12-09 16:16:36 UTC; 28min ago

Dec 09 14:51:37 localhost systemd[1]: OpenSSH rsa Server Key Generation was skipped because of an unmet condition check (ConditionPathExists=!/run/systemd/generator.early/multi-user.target.wants/cloud-init.target).
Dec 09 16:16:36 compute-0 systemd[1]: OpenSSH rsa Server Key Generation was skipped because of an unmet condition check (ConditionPathExists=!/run/systemd/generator.early/multi-user.target.wants/cloud-init.target).

● sshd.service - OpenSSH server daemon
     Loaded: loaded (/usr/lib/systemd/system/sshd.service; enabled; preset: enabled)
     Active: active (running) since Tue 2025-12-09 16:16:36 UTC; 28min ago
       Docs: man:sshd(8)
             man:sshd_config(5)
   Main PID: 181036 (sshd)
         IO: 0B read, 124.0K written
      Tasks: 1 (limit: 48628)
     Memory: 3.4M (peak: 9.2M)
        CPU: 2.161s
     CGroup: /system.slice/sshd.service
             └─181036 "sshd: /usr/sbin/sshd -D [listener] 0 of 10-100 startups"

Dec 09 16:42:13 compute-0 sshd-session[261343]: Invalid user oracle from 146.190.31.45 port 55534
Dec 09 16:42:13 compute-0 sshd-session[261343]: Connection closed by invalid user oracle 146.190.31.45 port 55534 [preauth]
Dec 09 16:42:55 compute-0 sshd-session[262067]: Invalid user oracle from 146.190.31.45 porUnit syslog.service could not be found.
t 35718
Dec 09 16:42:55 compute-0 sshd-session[262067]: Connection closed by invalid user oracle 146.190.31.45 port 35718 [preauth]
Dec 09 16:43:36 compute-0 sshd-session[262182]: Invalid user oracle from 146.190.31.45 port 50880
Dec 09 16:43:36 compute-0 sshd-session[262182]: Connection closed by invalid user oracle 146.190.31.45 port 50880 [preauth]
Dec 09 16:44:09 compute-0 sshd-session[262891]: Accepted publickey for zuul from 192.168.122.10 port 39876 ssh2: ECDSA SHA256:5Z3PQlHQTNxVypj+2lSB7x2k5BQcImpWN0ATZCDqSSQ
Dec 09 16:44:09 compute-0 sshd-session[262891]: pam_unix(sshd:session): session opened for user zuul(uid=1000) by zuul(uid=0)
Dec 09 16:44:16 compute-0 sshd-session[263230]: Invalid user oracle from 146.190.31.45 port 40842
Dec 09 16:44:16 compute-0 sshd-session[263230]: Connection closed by invalid user oracle 146.190.31.45 port 40842 [preauth]

○ sssd-kcm.service - SSSD Kerberos Cache Manager
     Loaded: loaded (/usr/lib/systemd/system/sssd-kcm.service; indirect; preset: disabled)
     Active: inactive (dead)
TriggeredBy: ● sssd-kcm.socket
       Docs: man:sssd-kcm(5)

○ sssd.service - System Security Services Daemon
     Loaded: loaded (/usr/lib/systemd/system/sssd.service; enabled; preset: enabled)
     Active: inactive (dead)
  Condition: start condition failed at Tue 2025-12-09 14:51:37 UTC; 1h 53min ago

Dec 09 14:51:37 localhost systemd[1]: System Security Services Daemon was skipped because no trigger condition checks were met.

○ sysstat-collect.service - system activity accounting tool
     Loaded: loaded (/usr/lib/systemd/system/sysstat-collect.service; static)
     Active: inactive (dead)
TriggeredBy: ○ sysstat-collect.timer
       Docs: man:sa1(8)

○ sysstat-summary.service - Generate a daily summary of process accounting
     Loaded: loaded (/usr/lib/systemd/system/sysstat-summary.service; static)
     Active: inactive (dead)
TriggeredBy: ○ sysstat-summary.timer
       Docs: man:sa2(8)

○ sysstat.service - Resets System Activity Logs
     Loaded: loaded (/usr/lib/systemd/system/sysstat.service; enabled; preset: enabled)
     Active: inactive (dead)

○ systemd-ask-password-console.service - Dispatch Password Requests to Console
     Loaded: loaded (/usr/lib/systemd/system/systemd-ask-password-console.service; static)
     Active: inactive (dead)
TriggeredBy: ● systemd-ask-password-console.path
       Docs: man:systemd-ask-password-console.service(8)

○ systemd-ask-password-wall.service - Forward Password Requests to Wall
     Loaded: loaded (/usr/lib/systemd/system/systemd-ask-password-wall.service; static)
     Active: inactive (dead)
TriggeredBy: ● systemd-ask-password-wall.path
       Docs: man:systemd-ask-password-wall.service(8)

○ systemd-binfmt.service - Set Up Additional Binary Formats
     Loaded: loaded (/usr/lib/systemd/system/systemd-binfmt.service; static)
     Active: inactive (dead)
  Condition: start condition failed at Tue 2025-12-09 14:51:37 UTC; 1h 53min ago
       Docs: man:systemd-binfmt.service(8)
             man:binfmt.d(5)
             https://docs.kernel.org/admin-guide/binfmt-misc.html
             https://www.freedesktop.org/wiki/Software/systemd/APIFileSystems

Dec 09 14:51:37 localhost systemd[1]: Set Up Additional Binary Formats was skipped because no trigger condition checks were met.

○ systemd-boot-random-seed.service - Update Boot Loader Random Seed
     Loaded: loaded (/usr/lib/systemd/system/systemd-boot-random-seed.service; static)
     Active: inactive (dead)
  Condition: start condition failed at Tue 2025-12-09 14:51:37 UTC; 1h 53min ago
       Docs: man:systemd-boot-random-seed.service(8)
             man:random(4)

Dec 09 14:51:37 localhost systemd[1]: Update Boot Loader Random Seed was skipped because no trigger condition checks were met.

● systemd-boot-update.service - Automatic Boot Loader Update
     Loaded: loaded (/usr/lib/systemd/system/systemd-boot-update.service; enabled; preset: enabled)
     Active: active (exited) since Tue 2025-12-09 14:51:37 UTC; 1h 53min ago
       Docs: man:bootctl(1)
   Main PID: 694 (code=exited, status=0/SUCCESS)
        CPU: 15ms

Dec 09 14:51:37 localhost systemd[1]: Starting Automatic Boot Loader Update...
Dec 09 14:51:37 localhost bootctl[694]: Couldn't find EFI system partition, skipping.
Dec 09 14:51:37 localhost systemd[1]: Finished Automatic Boot Loader Update.

○ systemd-firstboot.service - First Boot Wizard
     Loaded: loaded (/usr/lib/systemd/system/systemd-firstboot.service; static)
     Active: inactive (dead)
  Condition: start condition failed at Tue 2025-12-09 14:51:37 UTC; 1h 53min ago
       Docs: man:systemd-firstboot(1)

Dec 09 14:51:37 localhost systemd[1]: First Boot Wizard was skipped because of an unmet condition check (ConditionFirstBoot=yes).

○ systemd-fsck-root.service - File System Check on Root Device
     Loaded: loaded (/usr/lib/systemd/system/systemd-fsck-root.service; static)
     Active: inactive (dead) since Tue 2025-12-09 14:51:36 UTC; 1h 53min ago
   Duration: 1.459s
       Docs: man:systemd-fsck-root.service(8)
   Main PID: 551 (code=exited, status=0/SUCCESS)
        CPU: 21ms

Dec 09 14:51:35 localhost systemd[1]: Starting File System Check on /dev/disk/by-uuid/fcf6b761-831a-48a7-9f5f-068b5063763f...
Dec 09 14:51:35 localhost systemd-fsck[553]: /usr/sbin/fsck.xfs: XFS file system.
Dec 09 14:51:35 localhost systemd[1]: Finished File System Check on /dev/disk/by-uuid/fcf6b761-831a-48a7-9f5f-068b5063763f.

● systemd-hostnamed.service - Hostname Service
     Loaded: loaded (/usr/lib/systemd/system/systemd-hostnamed.service; static)
    Drop-In: /usr/lib/systemd/system/systemd-hostnamed.service.d
             └─disable-privatedevices.conf
     Active: active (running) since Tue 2025-12-09 16:44:38 UTC; 17s ago
       Docs: man:systemd-hostnamed.service(8)
             man:hostname(5)
             man:machine-info(5)
             man:org.freedesktop.resolve1(5)
   Main PID: 265996 (systemd-hostnam)
         IO: 8.0K read, 0B written
      Tasks: 1 (limit: 48628)
     Memory: 2.7M (peak: 3.8M)
        CPU: 120ms
     CGroup: /system.slice/systemd-hostnamed.service
             └─265996 /usr/lib/systemd/systemd-hostnamed

Dec 09 16:44:38 compute-0 systemd[1]: Starting Hostname Service...
Dec 09 16:44:38 compute-0 systemd[1]: Started Hostname Service.

● systemd-hwdb-update.service - Rebuild Hardware Database
     Loaded: loaded (/usr/lib/systemd/system/systemd-hwdb-update.service; static)
     Active: active (exited) since Tue 2025-12-09 14:51:37 UTC; 1h 53min ago
       Docs: man:hwdb(7)
             man:systemd-hwdb(8)
   Main PID: 684 (code=exited, status=0/SUCCESS)
        CPU: 561ms

Dec 09 14:51:37 localhost systemd[1]: Starting Rebuild Hardware Database...
Dec 09 14:51:37 localhost systemd[1]: Finished Rebuild Hardware Database.

○ systemd-initctl.service - initctl Compatibility Daemon
     Loaded: loaded (/usr/lib/systemd/system/systemd-initctl.service; static)
     Active: inactive (dead)
TriggeredBy: ● systemd-initctl.socket
       Docs: man:systemd-initctl.service(8)

● systemd-journal-catalog-update.service - Rebuild Journal Catalog
     Loaded: loaded (/usr/lib/systemd/system/systemd-journal-catalog-update.service; static)
     Active: active (exited) since Tue 2025-12-09 14:51:37 UTC; 1h 53min ago
       Docs: man:systemd-journald.service(8)
             man:journald.conf(5)
   Main PID: 699 (code=exited, status=0/SUCCESS)
        CPU: 18ms

Dec 09 14:51:37 localhost systemd[1]: Starting Rebuild Journal Catalog...
Dec 09 14:51:37 localhost systemd[1]: Finished Rebuild Journal Catalog.

● systemd-journal-flush.service - Flush Journal to Persistent Storage
     Loaded: loaded (/usr/lib/systemd/system/systemd-journal-flush.service; static)
     Active: active (exited) since Tue 2025-12-09 14:51:37 UTC; 1h 53min ago
       Docs: man:systemd-journald.service(8)
             man:journald.conf(5)
   Main PID: 685 (code=exited, status=0/SUCCESS)
        CPU: 8ms

Dec 09 14:51:37 localhost systemd[1]: Starting Flush Journal to Persistent Storage...
Dec 09 14:51:37 localhost systemd[1]: Finished Flush Journal to Persistent Storage.

● systemd-journald.service - Journal Service
     Loaded: loaded (/usr/lib/systemd/system/systemd-journald.service; static)
     Active: active (running) since Tue 2025-12-09 14:51:37 UTC; 1h 53min ago
TriggeredBy: ● systemd-journald-dev-log.socket
             ● systemd-journald.socket
       Docs: man:systemd-journald.service(8)
             man:journald.conf(5)
   Main PID: 675 (systemd-journal)
     Status: "Processing requests..."
         IO: 0B read, 0B written
      Tasks: 1 (limit: 48628)
     Memory: 66.6M (peak: 74.0M)
        CPU: 10.929s
     CGroup: /system.slice/systemd-journald.service
             └─675 /usr/lib/systemd/systemd-journald

Dec 09 14:51:37 localhost systemd-journald[675]: Journal started
Dec 09 14:51:37 localhost systemd-journald[675]: Runtime Journal (/run/log/journal/4d4ef2323cc3337bbfd9081b2a323b4e) is 8.0M, max 153.6M, 145.6M free.
Dec 09 14:51:36 localhost systemd[1]: systemd-journald.service: Deactivated successfully.
Dec 09 14:51:37 localhost systemd-journald[675]: Runtime Journal (/run/log/journal/4d4ef2323cc3337bbfd9081b2a323b4e) is 8.0M, max 153.6M, 145.6M free.
Dec 09 14:51:37 localhost systemd-journald[675]: Received client request to flush runtime journal.

● systemd-logind.service - User Login Management
     Loaded: loaded (/usr/lib/systemd/system/systemd-logind.service; static)
    Drop-In: /usr/lib/systemd/system/systemd-logind.service.d
             └─10-grub2-logind-service.conf
     Active: active (running) since Tue 2025-12-09 14:51:37 UTC; 1h 53min ago
       Docs: man:sd-login(3)
             man:systemd-logind.service(8)
             man:logind.conf(5)
             man:org.freedesktop.login1(5)
   Main PID: 786 (systemd-logind)
     Status: "Processing requests..."
         IO: 4.0K read, 0B written
      Tasks: 1 (limit: 48628)
     Memory: 5.7M (peak: 8.9M)
        CPU: 3.784s
     CGroup: /system.slice/systemd-logind.service
             └─786 /usr/lib/systemd/systemd-logind

Dec 09 16:36:59 compute-0 systemd-logind[786]: New session 53 of user zuul.
Dec 09 16:37:06 compute-0 systemd-logind[786]: New session 54 of user zuul.
Dec 09 16:37:08 compute-0 systemd-logind[786]: New session 55 of user zuul.
Dec 09 16:37:35 compute-0 systemd-logind[786]: Session 54 logged out. Waiting for processes to exit.
Dec 09 16:37:35 compute-0 systemd-logind[786]: Removed session 54.
Dec 09 16:37:35 compute-0 systemd-logind[786]: Session 53 logged out. Waiting for processes to exit.
Dec 09 16:37:35 compute-0 systemd-logind[786]: Removed session 53.
Dec 09 16:37:36 compute-0 systemd-logind[786]: Session 55 logged out. Waiting for processes to exit.
Dec 09 16:37:36 compute-0 systemd-logind[786]: Removed session 55.
Dec 09 16:44:09 compute-0 systemd-logind[786]: New session 56 of user zuul.

○ systemd-machine-id-commit.service - Commit a transient machine-id on disk
     Loaded: loaded (/usr/lib/systemd/system/systemd-machine-id-commit.service; static)
     Active: inactive (dead)
  Condition: start condition failed at Tue 2025-12-09 14:51:37 UTC; 1h 53min ago
       Docs: man:systemd-machine-id-commit.service(8)

Dec 09 14:51:37 localhost systemd[1]: Commit a transient machine-id on disk was skipped because of an unmet condition check (ConditionPathIsMountPoint=/etc/machine-id).

● systemd-machined.service - Virtual Machine and Container Registration Service
     Loaded: loaded (/usr/lib/systemd/system/systemd-machined.service; static)
     Active: active (running) since Tue 2025-12-09 16:18:06 UTC; 26min ago
       Docs: man:systemd-machined.service(8)
             man:org.freedesktop.machine1(5)
   Main PID: 206383 (systemd-machine)
     Status: "Processing requests..."
         IO: 0B read, 0B written
      Tasks: 1 (limit: 48628)
     Memory: 1.4M (peak: 1.9M)
        CPU: 1.216s
     CGroup: /system.slice/systemd-machined.service
             └─206383 /usr/lib/systemd/systemd-machined

Dec 09 16:18:06 compute-0 systemd[1]: Starting Virtual Machine and Container Registration Service...
Dec 09 16:18:06 compute-0 systemd[1]: Started Virtual Machine and Container RUnit systemd-networkd-wait-online.service could not be found.
egistration Service.

● systemd-modules-load.service - Load Kernel Modules
     Loaded: loaded (/usr/lib/systemd/system/systemd-modules-load.service; static)
     Active: active (exited) since Tue 2025-12-09 16:20:09 UTC; 24min ago
       Docs: man:systemd-modules-load.service(8)
             man:modules-load.d(5)
   Main PID: 226833 (code=exited, status=0/SUCCESS)
        CPU: 8ms

Dec 09 16:20:09 compute-0 systemd[1]: Starting Load Kernel Modules...
Dec 09 16:20:09 compute-0 systemd[1]: Finished Load Kernel Modules.

● systemd-network-generator.service - Generate network units from Kernel command line
     Loaded: loaded (/usr/lib/systemd/system/systemd-network-generator.service; enabled; preset: enabled)
     Active: active (exited) since Tue 2025-12-09 14:51:37 UTC; 1h 53min ago
       Docs: man:systemd-network-generator.service(8)
   Main PID: 676 (code=exited, status=0/SUCCESS)
        CPU: 8ms

Dec 09 14:51:37 localhost systemd[1]: Finished Generate network units from Kernel command line.

○ systemd-pcrmachine.service - TPM2 PCR Machine ID Measurement
     Loaded: loaded (/usr/lib/systemd/system/systemd-pcrmachine.service; static)
     Active: inactive (dead)
  Condition: start condition failed at Tue 2025-12-09 14:51:36 UTC; 1h 53min ago
       Docs: man:systemd-pcrmachine.service(8)

○ systemd-pcrphase-initrd.service - TPM2 PCR Barrier (initrd)
     Loaded: loaded (/usr/lib/systemd/system/systemd-pcrphase-initrd.service; static)
     Active: inactive (dead)
       Docs: man:systemd-pcrphase-initrd.service(8)

○ systemd-pcrphase-sysinit.service - TPM2 PCR Barrier (Initialization)
     Loaded: loaded (/usr/lib/systemd/system/systemd-pcrphase-sysinit.service; static)
     Active: inactive (dead)
  Condition: start condition failed at Tue 2025-12-09 14:51:37 UTC; 1h 53min ago
       Docs: man:systemd-pcrphase-sysinit.service(8)

Dec 09 14:51:37 localhost systemd[1]: TPM2 PCR Barrier (Initialization) was skipped because of an unmet condition check (ConditionPathExists=/sys/firmware/efi/efivars/StubPcrKernelImage-4a67b082-0a4c-41cf-b6c7-440b29bb8c4f).

○ systemd-pcrphase.service - TPM2 PCR Barrier (User)
     Loaded: loaded (/usr/lib/systemd/system/systemd-pcrphase.service; static)
     Active: inactive (dead)
  Condition: start condition failed at Tue 2025-12-09 14:51:38 UTC; 1h 53min ago
       Docs: man:systemd-pcrphase.service(8)

Dec 09 14:51:38 np0005552052.novalocal systemd[1]: TPM2 PCR Barrier (User) was skipped because of an unmet condition check (ConditionPathExists=/sys/firmware/efi/efivars/StubPcrKernelImage-4a67b082-0a4c-41cf-b6c7-440b29bb8c4f).

○ systemd-pstore.service - Platform Persistent Storage Archival
     Loaded: loaded (/usr/lib/systemd/system/systemd-pstore.service; enabled; preset: enabled)
     Active: inactive (dead)
  Condition: start condition failed at Tue 2025-12-09 14:51:37 UTC; 1h 53min ago
       Docs: man:systemd-pstore(8)

Dec 09 14:51:37 localhost systemd[1]: Platform Persistent Storage Archival was skipped because of an unmet condition check (ConditionDirectoryNotEmpty=/sys/fs/pstore).

● systemd-random-seed.service - Load/Save OS Random Seed
     Loaded: loaded (/usr/lib/systemd/system/systemd-random-seed.service; static)
     Active: active (exited) since Tue 2025-12-09 14:51:37 UTC; 1h 53min ago
       Docs: man:systemd-random-seed.service(8)
             man:random(4)
   Main PID: 686 (code=exited, status=0/SUCCESS)
        CPU: 8ms

Dec 09 14:51:37 localhost systemd[1]: Starting Load/Save OS Random Seed...
Dec 09 14:51:37 localhost systemd[1]: Finished Load/Save OS Random Seed.

● systemd-remount-fs.service - Remount Root and Kernel File Systems
     Loaded: loaded (/usr/lib/systemd/system/systemd-remount-fs.service; enabled-runtime; preset: disabled)
     Active: active (exited) since Tue 2025-12-09 14:51:37 UTC; 1h 53min ago
       Docs: man:systemd-remount-fs.service(8)
             https://www.freedesktop.org/wiki/Software/systemd/APIFileSystems
   Main PID: 677 (code=exited, status=0/SUCCESS)
        CPU: 12ms

Dec 09 14:51:37 localhost systemd[1]: FiniUnit systemd-timesyncd.service could not be found.
Unit systemd-tmpfiles.service could not be found.
shed Remount Root and Kernel File Systems.

○ systemd-repart.service - Repartition Root Disk
     Loaded: loaded (/usr/lib/systemd/system/systemd-repart.service; static)
     Active: inactive (dead)
  Condition: start condition failed at Tue 2025-12-09 14:51:37 UTC; 1h 53min ago
       Docs: man:systemd-repart.service(8)

○ systemd-rfkill.service - Load/Save RF Kill Switch Status
     Loaded: loaded (/usr/lib/systemd/system/systemd-rfkill.service; static)
     Active: inactive (dead)
TriggeredBy: ● systemd-rfkill.socket
       Docs: man:systemd-rfkill.service(8)

● systemd-sysctl.service - Apply Kernel Variables
     Loaded: loaded (/usr/lib/systemd/system/systemd-sysctl.service; static)
     Active: active (exited) since Tue 2025-12-09 15:57:56 UTC; 46min ago
       Docs: man:systemd-sysctl.service(8)
             man:sysctl.d(5)
   Main PID: 44990 (code=exited, status=0/SUCCESS)
        CPU: 15ms

Dec 09 15:57:56 compute-0 systemd[1]: Starting Apply Kernel Variables...
Dec 09 15:57:56 compute-0 systemd[1]: Finished Apply Kernel Variables.

○ systemd-sysext.service - Merge System Extension Images into /usr/ and /opt/
     Loaded: loaded (/usr/lib/systemd/system/systemd-sysext.service; disabled; preset: disabled)
     Active: inactive (dead)
       Docs: man:systemd-sysext.service(8)

● systemd-sysusers.service - Create System Users
     Loaded: loaded (/usr/lib/systemd/system/systemd-sysusers.service; static)
     Active: active (exited) since Tue 2025-12-09 14:51:37 UTC; 1h 53min ago
       Docs: man:sysusers.d(5)
             man:systemd-sysusers.service(8)
   Main PID: 687 (code=exited, status=0/SUCCESS)
        CPU: 27ms

Dec 09 14:51:37 localhost systemd[1]: Starting Create System Users...
Dec 09 14:51:37 localhost systemd[1]: Finished Create System Users.

○ systemd-tmpfiles-clean.service - Cleanup of Temporary Directories
     Loaded: loaded (/usr/lib/systemd/system/systemd-tmpfiles-clean.service; static)
     Active: inactive (dead) since Tue 2025-12-09 15:06:46 UTC; 1h 38min ago
TriggeredBy: ● systemd-tmpfiles-clean.timer
       Docs: man:tmpfiles.d(5)
             man:systemd-tmpfiles(8)
   Main PID: 4315 (code=exited, status=0/SUCCESS)
        CPU: 39ms

Dec 09 15:06:46 np0005552052.novalocal systemd[1]: Starting Cleanup of Temporary Directories...
Dec 09 15:06:46 np0005552052.novalocal systemd[1]: systemd-tmpfiles-clean.service: Deactivated successfully.
Dec 09 15:06:46 np0005552052.novalocal systemd[1]: Finished Cleanup of Temporary Directories.

● systemd-tmpfiles-setup-dev.service - Create Static Device Nodes in /dev
     Loaded: loaded (/usr/lib/systemd/system/systemd-tmpfiles-setup-dev.service; static)
     Active: active (exited) since Tue 2025-12-09 14:51:37 UTC; 1h 53min ago
       Docs: man:tmpfiles.d(5)
             man:systemd-tmpfiles(8)
   Main PID: 689 (code=exited, status=0/SUCCESS)
        CPU: 37ms

Dec 09 14:51:37 localhost systemd[1]: Starting Create Static Device Nodes in /dev...
Dec 09 14:51:37 localhost systemd[1]: Finished Create Static Device Nodes in /dev.

● systemd-tmpfiles-setup.service - Create Volatile Files and Directories
     Loaded: loaded (/usr/lib/systemd/system/systemd-tmpfiles-setup.service; static)
     Active: active (exited) since Tue 2025-12-09 14:51:37 UTC; 1h 53min ago
       Docs: man:tmpfiles.d(5)
             man:systemd-tmpfiles(8)
   Main PID: 695 (code=exited, status=0/SUCCESS)
        CPU: 99ms

Dec 09 14:51:37 localhost systemd[1]: Starting Create Volatile Files and Directories...
Dec 09 14:51:37 localhost systemd[1]: Finished Create Volatile Files and Directories.

○ systemd-udev-settle.service - Wait for udev To Complete Device Initialization
     Loaded: loaded (/usr/lib/systemd/system/systemd-udev-settle.service; static)
     Active: inactive (dead)
       Docs: man:systemd-udev-settle.service(8)

● systemd-udev-trigger.service - Coldplug All udev Devices
     Loaded: loaded (/usr/lib/systemd/system/systemd-udev-trigger.service; static)
    Drop-In: /usr/lib/systemd/system/systemd-udev-trigger.service.d
             └─systemd-udev-trigger-no-reload.conf
     Active: active (exited) since Tue 2025-12-09 14:51:37 UTC; 1h 53min ago
       Docs: man:udev(7)
             man:systemd-udevd.service(8)
   Main PID: 681 (code=exited, status=0/SUCCESS)
        CPU: 112ms

Dec 09 14:51:37 localhost systemd[1]: Finished Coldplug All udev Devices.

● systemd-udevd.service - Rule-based Manager for Device Events and Files
     Loaded: loaded (/usr/lib/systemd/system/systemd-udevd.service; static)
     Active: active (running) since Tue 2025-12-09 14:51:37 UTC; 1h 53min ago
TriggeredBy: ● systemd-udevd-kernel.socket
             ● systemd-udevd-control.socket
       Docs: man:systemd-udevd.service(8)
             man:udev(7)
   Main PID: 728 (systemd-udevd)
     Status: "Processing with 32 children at max"
         IO: 191.5M read, 96.0M written
      Tasks: 1
     Memory: 50.1M (peak: 101.0M)
        CPU: 11.719s
     CGroup: /system.slice/systemd-udevd.service
             └─udev
               └─728 /usr/lib/systemd/systemd-udevd

Dec 09 16:43:53 compute-0 lvm[262782]: PV /dev/loop3 online, VG ceph_vg0 is complete.
Dec 09 16:43:53 compute-0 lvm[262782]: VG ceph_vg0 finished
Dec 09 16:43:53 compute-0 lvm[262785]: PV /dev/loop5 online, VG ceph_vg2 is complete.
Dec 09 16:43:53 compute-0 lvm[262785]: VG ceph_vg2 finished
Dec 09 16:44:19 compute-0 lvm[263584]: PV /dev/loop3 online, VG ceph_vg0 is complete.
Dec 09 16:44:19 compute-0 lvm[263584]: VG ceph_vg0 finished
Dec 09 16:44:19 compute-0 lvm[263601]: PV /dev/loop5 online, VG ceph_vg2 is complete.
Dec 09 16:44:19 compute-0 lvm[263601]: VG ceph_vg2 finished
Dec 09 16:44:19 compute-0 lvm[263616]: PV /dev/loop4 online, VG ceph_vg1 is complete.
Dec 09 16:44:19 compute-0 lvm[263616]: VG ceph_vg1 finished

● systemd-update-done.service - Update is Completed
     Loaded: loaded (/usr/lib/systemd/system/systemd-update-done.service; static)
     Active: active (exited) since Tue 2025-12-09 14:51:37 UTC; 1h 53min ago
       Docs: man:systemd-update-done.service(8)
   Main PID: 730 (code=exited, status=0/SUCCESS)
        CPU: 11ms

Dec 09 14:51:37 localhost systemd[1]: Starting Update is Completed...
Dec 09 14:51:37 localhost systemd[1]: Finished Update is Completed.

○ systemd-update-utmp-runlevel.service - Record Runlevel Change in UTMP
     Loaded: loaded (/usr/lib/systemd/system/systemd-update-utmp-runlevel.service; static)
     Active: inactive (dead) since Tue 2025-12-09 14:51:40 UTC; 1h 53min ago
       Docs: man:systemd-update-utmp-runlevel.service(8)
             man:utmp(5)
   Main PID: 1028 (code=exited, status=0/SUCCESS)
        CPU: 7ms

Dec 09 14:51:40 np0005552052.novalocal systemd[1]: Starting Record Runlevel Change in UTMP...
Dec 09 14:51:40 np0005552052.novalocal systemd[1]: systemd-update-utmp-runlevel.service: Deactivated successfully.
Dec 09 14:51:40 np0005552052.novalocal systemd[1]: Finished Record Runlevel Change in UTMP.

● systemd-update-utmp.service - Record System Boot/Shutdown in UTMP
     Loaded: loaded (/usr/lib/systemd/system/systemd-update-utmp.service; static)
     Active: active (exited) since Tue 2025-12-09 14:51:37 UTC; 1h 53min ago
       Docs: man:systemd-update-utmp.service(8)
             man:utmp(5)
   Main PID: 727 (code=exited, status=0/SUCCESS)
        CPU: 7ms

Dec 09 14:51:37 localhost systemd[1]: Starting Record System Boot/Shutdown in UTMP...
Dec 09 14:51:37 localhost systemd[1]: Finished Record System Boot/Shutdown in UTMP.

● systemd-user-sessions.service - Permit User Sessions
     Loaded: loaded (/usr/lib/systemd/system/systemd-user-sessions.service; static)
     Active: active (exited) since Tue 2025-12-09 14:51:40 UTC; 1h 53min ago
       Docs: man:systemd-user-sessions.service(8)
   Main PID: 1006 (code=exited, status=0/SUCCESS)
        CPU: 11ms

Dec 09 14:51:40 np0005552052.novalocal systemd[1]: Starting Permit User Sessions...
Dec 09 14:51:40 np0005552052.novalocal systemd[1]: Finished Permit User Sessions.

○ systemd-vconsole-setup.service - Setup Virtual Console
     Loaded: loaded (/usr/lib/systemd/system/systemd-vconsole-setup.serviUnit tlp.service could not be found.
ce; static)
     Active: inactive (dead) since Tue 2025-12-09 14:51:36 UTC; 1h 53min ago
   Duration: 1.938s
       Docs: man:systemd-vconsole-setup.service(8)
             man:vconsole.conf(5)
   Main PID: 313 (code=exited, status=0/SUCCESS)
        CPU: 188ms

Dec 09 14:51:34 localhost systemd[1]: Finished Setup Virtual Console.
Dec 09 14:51:36 localhost systemd[1]: systemd-vconsole-setup.service: Deactivated successfully.
Dec 09 14:51:36 localhost systemd[1]: Stopped Setup Virtual Console.
Notice: journal has been rotated since unit was started, output may be incomplete.

● tuned.service - Dynamic System Tuning Daemon
     Loaded: loaded (/usr/lib/systemd/system/tuned.service; enabled; preset: enabled)
     Active: active (running) since Tue 2025-12-09 16:08:13 UTC; 36min ago
       Docs: man:tuned(8)
             man:tuned.conf(5)
             man:tuned-adm(8)
   Main PID: 105736 (tuned)
         IO: 0B read, 0B written
      Tasks: 4 (limit: 48628)
     Memory: 13.8M (peak: 15.8M)
        CPU: 1.117s
     CGroup: /system.slice/tuned.service
             └─105736 /usr/bin/python3 -Es /usr/sbin/tuned -l -P

Dec 09 16:08:13 compute-0 systemd[1]: Starting Dynamic System Tuning Daemon...
Dec 09 16:08:13 compute-0 systemd[1]: Started Dynamic System Tuning Daemon.

○ unbound-anchor.service - update of the root trust anchor for DNSSEC validation in unbound
     Loaded: loaded (/usr/lib/systemd/system/unbound-anchor.service; static)
     Active: inactive (dead)
TriggeredBy: ● unbound-anchor.timer
       Docs: man:unbound-anchor(8)

● user-runtime-dir@1000.service - User Runtime Directory /run/user/1000
     Loaded: loaded (/usr/lib/systemd/system/user-runtime-dir@.service; static)
     Active: active (exited) since Tue 2025-12-09 15:27:58 UTC; 1h 16min ago
       Docs: man:user@.service(5)
   Main PID: 4338 (code=exited, status=0/SUCCESS)
        CPU: 15ms

Dec 09 15:27:58 np0005552052.novalocal systemd[1]: Starting User Runtime Directory /run/user/1000...
Dec 09 15:27:58 np0005552052.novalocal systemd[1]: Finished User Runtime Directory /run/user/1000.

● user-runtime-dir@42477.service - User Runtime Directory /run/user/42477
     Loaded: loaded (/usr/lib/systemd/system/user-runtime-dir@.service; static)
     Active: active (exited) since Tue 2025-12-09 16:03:32 UTC; 41min ago
       Docs: man:user@.service(5)
   Main PID: 76650 (code=exited, status=0/SUCCESS)
        CPU: 16ms

Dec 09 16:03:32 compute-0 systemd[1]: Starting User Runtime Directory /run/user/42477...
Dec 09 16:03:32 compute-0 systemd[1]: Finished User Runtime Directory /run/user/42477.

● user@1000.service - User Manager for UID 1000
     Loaded: loaded (/usr/lib/systemd/system/user@.service; static)
    Drop-In: /usr/lib/systemd/system/user@.service.d
             └─10-login-barrier.conf
     Active: active (running) since Tue 2025-12-09 15:27:59 UTC; 1h 16min ago
       Docs: man:user@.service(5)
   Main PID: 4339 (systemd)
     Status: "Ready."
         IO: 676.0K read, 4.0K written
      Tasks: 5
     Memory: 9.3M (peak: 16.7M)
        CPU: 3.952s
     CGroup: /user.slice/user-1000.slice/user@1000.service
             ├─app.slice
             │ └─dbus-broker.service
             │   ├─12153 /usr/bin/dbus-broker-launch --scope user
             │   └─12182 dbus-broker --log 4 --controller 9 --machine-id 4d4ef2323cc3337bbfd9081b2a323b4e --max-bytes 100000000000000 --max-fds 25000000000000 --max-matches 5000000000
             ├─init.scope
             │ ├─4339 /usr/lib/systemd/systemd --user
             │ └─4342 "(sd-pam)"
             └─user.slice
               └─podman-pause-305cba0a.scope
                 └─12094 catatonit -P

Dec 09 15:38:04 np0005552052.novalocal dbus-broker-launch[12153]: Policy to allow eavesdropping in /usr/share/dbus-1/session.conf +31: Eavesdropping is deprecated and ignored
Dec 09 15:38:04 np0005552052.novalocal dbus-broker-launch[12153]: Policy to allow eavesdropping in /usr/share/dbus-1/session.conf +33: Eavesdropping is deprecated and ignored
Dec 09 15:38:04 np0005552052.novalocal systemd[4339]: Started D-Bus User Message Bus.
Dec 09 15:38:04 np0005552052.novalocal dbus-broker-lau[12153]: Ready
Dec 09 15:38:04 np0005552052.novalocal systemd[4339]: selinux: avc:  op=load_policy lsm=selinux seqno=6 res=1
Dec 09 15:38:04 np0005552052.novalocal systemd[4339]: Created slice Slice /user.
Dec 09 15:38:04 np0005552052.novalocal systemd[4339]: podman-12077.scope: unit configures an IP firewall, but not running as root.
Dec 09 15:38:04 np0005552052.novalocal systemd[4339]: (This warning is only shown for the first unit using IP firewalling.)
Dec 09 15:38:04 np0005552052.novalocal systemd[4339]: Started podman-12077.scope.
Dec 09 15:38:04 np0005552052.novalocal systemd[4339]: Started podman-pause-305cba0a.scope.

● user@42477.service - User Manager for UID 42477
     Loaded: loaded (/usr/lib/systemd/system/user@.service; static)
    Drop-In: /usr/lib/systemd/system/user@.service.d
             └─10-login-barrier.conf
     Active: active (running) since Tue 2025-12-09 16:03:32 UTC; 41min ago
       Docs: man:user@.service(5)
   Main PID: 76651 (systemd)
     Status: "Ready."
         IO: 0B read, 0B written
      Tasks: 2
     Memory: 8.7M (peak: 10.5M)
        CPU: 2.648s
     CGroup: /user.slice/user-42477.slice/user@42477.service
             └─init.scope
               ├─76651 /usr/lib/systemd/systemd --user
               └─76654 "(sd-pam)"

Dec 09 16:03:32 compute-0 systemd[76651]: Finished Create User's Volatile Files and Directories.
Dec 09 16:03:32 compute-0 systemd[76651]: Reached target Basic System.
Dec 09 16:03:32 compute-0 systemd[76651]: Reached target Main User Target.
Dec 09 16:03:32 compute-0 systemd[76651]: Startup finished in 132ms.
Dec 09 16:03:32 compute-0 systemd[1]: Started User Manager for UID 42477.
Dec 09 16:05:46 compute-0 systemd[76651]: Starting Mark boot as successful...
Dec 09 16:05:46 compute-0 systemd[76651]: Finished Mark boot as successful.
Dec 09 16:08:33 compute-0 systemd[76651]: Created slice User Background Tasks Slice.
Dec 09 16:08:33 compute-0 systemd[76651]: Starting Cleanup of User's Temporary Files and Directories...
Dec 09 16:08:33 compute-0 systemd[76651]: Finished Cleanup of User's Temporary Files and Directories.

○ virtinterfaced.service - libvirt interface daemon
     Loaded: loaded (/usr/lib/systemd/system/virtinterfaced.service; disabled; preset: disabled)
     Active: inactive (dead)
TriggeredBy: ○ virtinterfaced-ro.socket
             ○ virtinterfaced.socket
             ○ virtinterfaced-admin.socket
       Docs: man:virtinterfaced(8)
             https://libvirt.org/

○ virtlockd.service - libvirt locking daemon
     Loaded: loaded (/usr/lib/systemd/system/virtlockd.service; disabled; preset: disabled)
     Active: inactive (dead)
TriggeredBy: ○ virtlockd-admin.socket
             ● virtlockd.socket
       Docs: man:virtlockd(8)
             https://libvirt.org/

● virtlogd.service - libvirt logging daemon
     Loaded: loaded (/usr/lib/systemd/system/virtlogd.service; enabled; preset: disabled)
     Active: active (running) since Tue 2025-12-09 16:18:02 UTC; 26min ago
TriggeredBy: ● virtlogd.socket
             ● virtlogd-admin.socket
       Docs: man:virtlogd(8)
             https://libvirt.org/
   Main PID: 205752 (virtlogd)
         IO: 644.0K read, 0B written
      Tasks: 1 (limit: 48628)
     Memory: 3.1M (peak: 3.3M)
        CPU: 81ms
     CGroup: /system.slice/virtlogd.service
             └─205752 /usr/sbin/virtlogd

Dec 09 16:18:02 compute-0 systemd[1]: Starting libvirt logging daemon...
Dec 09 16:18:02 compute-0 systemd[1]: Started libvirt logging daemon.

○ virtnetworkd.service - libvirt network daemon
     Loaded: loaded (/usr/lib/systemd/system/virtnetworkd.service; disabled; preset: disabled)
     Active: inactive (dead)
TriggeredBy: ○ virtnetworkd-ro.socket
             ○ virtnetworkd.socket
             ○ virtnetworkd-admin.socket
       Docs: man:virtnetworkd(8)
             https://libvirt.org/

● virtnodedevd.service - libvirt nodedev daemon
     Loaded: loaded (/usr/lib/systemd/system/virtnodedevd.service; enabled; preset: disabled)
     Active: active (running) since Tue 2025-12-09 16:21:53 UTC; 23min ago
TriggeredBy: ● virtnodedevd-admin.socket
             ● virtnodedevd.socket
             ● virtnodedevd-ro.socket
       Docs: man:virtnodedevd(8)
             https://libvirt.org/
   Main PID: 244229 (virtnodedevd)
         IO: 1.0M read, 0B written
      Tasks: 20 (limit: 48628)
     Memory: 6.6M (peak: 7.8M)
        CPU: 1.546s
     CGroup: /system.slice/virtnodedevd.service
             └─244229 /usr/sbin/virtnodedevd --timeout 120

Dec 09 16:21:53 compute-0 systemd[1]: Starting libvirt nodedev daemon...
Dec 09 16:21:53 compute-0 systemd[1]: Started libvirt nodedev daemon.

○ virtnwfilterd.service - libvirt nwfilter daemon
     Loaded: loaded (/usr/lib/systemd/system/virtnwfilterd.service; disabled; preset: disabled)
     Active: inactive (dead)
TriggeredBy: ○ virtnwfilterd-ro.socket
             ○ virtnwfilterd.socket
             ○ virtnwfilterd-admin.socket
       Docs: man:virtnwfilterd(8)
             https://libvirt.org/

○ virtproxyd.service - libvirt proxy daemon
     Loaded: loaded (/usr/lib/systemd/system/virtproxyd.service; enabled; preset: disabled)
     Active: inactive (dead) since Tue 2025-12-09 16:20:05 UTC; 24min ago
   Duration: 2min 22ms
TriggeredBy: ● virtproxyd-admin.socket
             ● virtproxyd-ro.socket
             ● virtproxyd-tls.socket
             ● virtproxyd.socket
       Docs: man:virtproxyd(8)
             https://libvirt.org/
   Main PID: 206172 (code=exited, status=0/SUCCESS)
        CPU: 53ms

Dec 09 16:18:05 compute-0 systemd[1]: Starting libvirt proxy daemon...
Dec 09 16:18:05 compute-0 systemd[1]: Started libvirt proxy daemon.
Dec 09 16:20:05 compute-0 systemd[1]: virtproxyd.service: Deactivated successfully.

● virtqemud.service - libvirt QEMU daemon
     Loaded: loaded (/usr/lib/systemd/system/virtqemud.service; enabled; preset: enabled)
     Active: active (running) since Tue 2025-12-09 16:21:46 UTC; 23min ago
TriggeredBy: ● virtqemud.socket
             ● virtqemud-admin.socket
             ● virtqemud-ro.socket
       Docs: man:virtqemud(8)
             https://libvirt.org/
   Main PID: 243015 (virtqemud)
         IO: 4.7M read, 188.0K written
      Tasks: 18 (limit: 32768)
     Memory: 19.9M (peak: 50.8M)
        CPU: 2.086s
     CGroup: /system.slice/virtqemud.service
             └─243015 /usr/sbin/virtqemud --timeout 120

Dec 09 16:21:46 compute-0 systemd[1]: Starting libvirt QEMU daemon...
Dec 09 16:21:46 compute-0 systemd[1]: Started libvirt QEMU daemon.
Dec 09 16:21:48 compute-0 virtqemud[243015]: libvirt version: 11.9.0, package: 1.el9 (builder@centos.org, 2025-11-04-09:54:50, )
Dec 09 16:21:48 compute-0 virtqemud[243015]: hostname: compute-0
Dec 09 16:21:48 compute-0 virtqemud[243015]: End of file while reading data: Input/output error
Dec 09 16:44:18 compute-0 virtqemud[243015]: Failed to connect socket to '/var/run/libvirt/virtnetworkd-sock-ro': No such file or directory
Dec 09 16:44:18 compute-0 virtqemud[243015]: Failed to connect socket to '/var/run/libvirt/virtnwfilterd-sock-ro': No such file or directory
Dec 09 16:44:18 compute-0 virtqemud[243015]: Failed to connect socket to '/var/run/libvirt/virtstoraged-sock-ro': No such file or directory
Dec 09 16:44:56 compute-0 virtqemud[243015]: Failed to connect socket to '/var/run/libvirt/virtstoraged-sock-ro': No such file or directory

○ virtsecretd.service - libvirt secret daemon
     Loaded: loaded (/usr/lib/systemd/system/virtsecretd.service; enabled; preset: disabled)
     Active: inactive (dead) since Tue 2025-12-09 16:20:15 UTC; 24min ago
   Duration: 2min 7.992s
TriggeredBy: ● virtsecretd-ro.socket
             ● virtsecretd-admin.socket
             ● virtsecretd.socket
       Docs: man:virtsecretd(8)
             https://libvirt.org/
   Main PID: 206600 (code=exited, status=0/SUCCESS)
        CPU: 62ms

Dec 09 16:18:07 compute-0 systemd[1]: Starting libvirt secret daemon...
Dec 09 16:18:07 compute-0 systeUnit ypbind.service could not be found.
Unit yppasswdd.service could not be found.
Unit ypserv.service could not be found.
Unit ypxfrd.service could not be found.
md[1]: Started libvirt secret daemon.
Dec 09 16:20:15 compute-0 systemd[1]: virtsecretd.service: Deactivated successfully.

○ virtstoraged.service - libvirt storage daemon
     Loaded: loaded (/usr/lib/systemd/system/virtstoraged.service; disabled; preset: disabled)
     Active: inactive (dead)
TriggeredBy: ○ virtstoraged-ro.socket
             ○ virtstoraged-admin.socket
             ○ virtstoraged.socket
       Docs: man:virtstoraged(8)
             https://libvirt.org/

● -.slice - Root Slice
     Loaded: loaded
     Active: active since Tue 2025-12-09 14:51:34 UTC; 1h 53min ago
      Until: Tue 2025-12-09 14:51:34 UTC; 1h 53min ago
       Docs: man:systemd.special(7)
      Tasks: 1420
     Memory: 2.7G
        CPU: 36min 32.850s
     CGroup: /
             ├─268565 turbostat --debug sleep 10
             ├─268568 sleep 10
             ├─init.scope
             │ └─1 /usr/lib/systemd/systemd --switched-root --system --deserialize 31
             ├─machine.slice
             │ ├─libpod-0a76737b5f2b25872fe2b565002617e1a84450afb10d979302e98619d30d6470.scope
             │ │ └─container
             │ │   ├─145423 dumb-init --single-child -- kolla_start
             │ │   └─145426 /usr/bin/ovn-controller --pidfile unix:/run/openvswitch/db.sock -p /etc/pki/tls/private/ovndb.key -c /etc/pki/tls/certs/ovndb.crt -C /etc/pki/tls/certs/ovndbca.crt
             │ ├─libpod-84d676632bb4f080a989766472a2cc2fcf267803021f3bcdf51f0d8bfc9055e2.scope
             │ │ └─container
             │ │   ├─225740 dumb-init --single-child -- kolla_start
             │ │   └─225743 /usr/sbin/multipathd -d
             │ ├─libpod-9f2fa752ba80f2edb6a6ed5e7e6142147c4b695355174f47652b45554431a9ce.scope
             │ │ └─container
             │ │   ├─243454 dumb-init --single-child -- kolla_start
             │ │   └─243461 /usr/bin/python3 /usr/bin/nova-compute
             │ ├─libpod-conmon-4323287b9d9f02a12363c9f5cc1c8f6d0d210317f09eb056106c7d354196560f.scope
             │ │ ├─269408 /usr/bin/conmon --api-version 1 -c 4323287b9d9f02a12363c9f5cc1c8f6d0d210317f09eb056106c7d354196560f -u 4323287b9d9f02a12363c9f5cc1c8f6d0d210317f09eb056106c7d354196560f -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/4323287b9d9f02a12363c9f5cc1c8f6d0d210317f09eb056106c7d354196560f/userdata -p /run/containers/storage/overlay-containers/4323287b9d9f02a12363c9f5cc1c8f6d0d210317f09eb056106c7d354196560f/userdata/pidfile -n funny_jepsen --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/4323287b9d9f02a12363c9f5cc1c8f6d0d210317f09eb056106c7d354196560f --full-attach -s -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/4323287b9d9f02a12363c9f5cc1c8f6d0d210317f09eb056106c7d354196560f/userdata/oci-log --conmon-pidfile /run/containers/storage/overlay-containers/4323287b9d9f02a12363c9f5cc1c8f6d0d210317f09eb056106c7d354196560f/userdata/conmon.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 4323287b9d9f02a12363c9f5cc1c8f6d0d210317f09eb056106c7d354196560f
             │ │ └─269493 /usr/bin/podman --root /var/lib/containers/storage --runroot /run/containers/storage --log-level warning --cgroup-manager systemd --tmpdir /run/libpod --network-config-dir "" --network-backend netavark --volumepath /var/lib/containers/storage/volumes --db-backend sqlite --transient-store=false --hooks-dir /usr/share/containers/oci/hooks.d --runtime crun --storage-driver overlay --storage-opt overlay.mountopt=nodev,metacopy=on --events-backend journald container cleanup --stopped-only --rm 4323287b9d9f02a12363c9f5cc1c8f6d0d210317f09eb056106c7d354196560f
             │ └─libpod-fbe906a4b60641b0d2bc026295092273d9a8b9783389304fc11b10bffae9c692.scope
             │   └─container
             │     ├─155088 dumb-init --single-child -- kolla_start
             │     ├─155091 "neutron-ovn-metadata-agent (/usr/bin/python3 /usr/bin/neutron-ovn-metadata-agent)"
             │     ├─155210 "neutron-ovn-metadata-agent (/usr/bin/python3 /usr/bin/neutron-ovn-metadata-agent)"
             │     └─155215 /usr/bin/python3 /bin/privsep-helper --config-file /etc/neutron/neutron.conf --config-dir /etc/neutron.conf.d --privsep_context neutron.privileged.namespace_cmd --privsep_sock_path /tmp/tmp2hzlk811/privsep.sock
             ├─system.slice
             │ ├─NetworkManager.service
             │ │ └─49021 /usr/sbin/NetworkManager --no-daemon
             │ ├─auditd.service
             │ │ ├─700 /sbin/auditd
             │ │ └─702 /usr/sbin/sedispatch
             │ ├─chronyd.service
             │ │ └─58592 /usr/sbin/chronyd -F 2
             │ ├─crond.service
             │ │ └─1008 /usr/sbin/crond -n
             │ ├─dbus-broker.service
             │ │ ├─750 /usr/bin/dbus-broker-launch --scope system --audit
             │ │ └─772 dbus-broker --log 4 --controller 9 --machine-id 4d4ef2323cc3337bbfd9081b2a323b4e --max-bytes 536870912 --max-fds 4096 --max-matches 131072 --audit
             │ ├─edpm_multipathd.service
             │ │ └─225738 /usr/bin/conmon --api-version 1 -c 84d676632bb4f080a989766472a2cc2fcf267803021f3bcdf51f0d8bfc9055e2 -u 84d676632bb4f080a989766472a2cc2fcf267803021f3bcdf51f0d8bfc9055e2 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/84d676632bb4f080a989766472a2cc2fcf267803021f3bcdf51f0d8bfc9055e2/userdata -p /run/containers/storage/overlay-containers/84d676632bb4f080a989766472a2cc2fcf267803021f3bcdf51f0d8bfc9055e2/userdata/pidfile -n multipathd --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/84d676632bb4f080a989766472a2cc2fcf267803021f3bcdf51f0d8bfc9055e2 --full-attach -s -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/84d676632bb4f080a989766472a2cc2fcf267803021f3bcdf51f0d8bfc9055e2/userdata/oci-log --conmon-pidfile /run/multipathd.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg 84d676632bb4f080a989766472a2cc2fcf267803021f3bcdf51f0d8bfc9055e2
             │ ├─edpm_nova_compute.service
             │ │ └─243452 /usr/bin/conmon --api-version 1 -c 9f2fa752ba80f2edb6a6ed5e7e6142147c4b695355174f47652b45554431a9ce -u 9f2fa752ba80f2edb6a6ed5e7e6142147c4b695355174f47652b45554431a9ce -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/9f2fa752ba80f2edb6a6ed5e7e6142147c4b695355174f47652b45554431a9ce/userdata -p /run/containers/storage/overlay-containers/9f2fa752ba80f2edb6a6ed5e7e6142147c4b695355174f47652b45554431a9ce/userdata/pidfile -n nova_compute --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/9f2fa752ba80f2edb6a6ed5e7e6142147c4b695355174f47652b45554431a9ce --full-attach -s -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/9f2fa752ba80f2edb6a6ed5e7e6142147c4b695355174f47652b45554431a9ce/userdata/oci-log --conmon-pidfile /run/nova_compute.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg 9f2fa752ba80f2edb6a6ed5e7e6142147c4b695355174f47652b45554431a9ce
             │ ├─edpm_ovn_controller.service
             │ │ └─145421 /usr/bin/conmon --api-version 1 -c 0a76737b5f2b25872fe2b565002617e1a84450afb10d979302e98619d30d6470 -u 0a76737b5f2b25872fe2b565002617e1a84450afb10d979302e98619d30d6470 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/0a76737b5f2b25872fe2b565002617e1a84450afb10d979302e98619d30d6470/userdata -p /run/containers/storage/overlay-containers/0a76737b5f2b25872fe2b565002617e1a84450afb10d979302e98619d30d6470/userdata/pidfile -n ovn_controller --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/0a76737b5f2b25872fe2b565002617e1a84450afb10d979302e98619d30d6470 --full-attach -s -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/0a76737b5f2b25872fe2b565002617e1a84450afb10d979302e98619d30d6470/userdata/oci-log --conmon-pidfile /run/ovn_controller.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg 0a76737b5f2b25872fe2b565002617e1a84450afb10d979302e98619d30d6470
             │ ├─edpm_ovn_metadata_agent.service
             │ │ └─155086 /usr/bin/conmon --api-version 1 -c fbe906a4b60641b0d2bc026295092273d9a8b9783389304fc11b10bffae9c692 -u fbe906a4b60641b0d2bc026295092273d9a8b9783389304fc11b10bffae9c692 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/fbe906a4b60641b0d2bc026295092273d9a8b9783389304fc11b10bffae9c692/userdata -p /run/containers/storage/overlay-containers/fbe906a4b60641b0d2bc026295092273d9a8b9783389304fc11b10bffae9c692/userdata/pidfile -n ovn_metadata_agent --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/fbe906a4b60641b0d2bc026295092273d9a8b9783389304fc11b10bffae9c692 --full-attach -s -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/fbe906a4b60641b0d2bc026295092273d9a8b9783389304fc11b10bffae9c692/userdata/oci-log --conmon-pidfile /run/ovn_metadata_agent.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg fbe906a4b60641b0d2bc026295092273d9a8b9783389304fc11b10bffae9c692
             │ ├─gssproxy.service
             │ │ └─869 /usr/sbin/gssproxy -D
             │ ├─irqbalance.service
             │ │ └─780 /usr/sbin/irqbalance
             │ ├─iscsid.service
             │ │ └─228459 /usr/sbin/iscsid -f
             │ ├─ovs-vswitchd.service
             │ │ └─47325 ovs-vswitchd unix:/var/run/openvswitch/db.sock -vconsole:emer -vsyslog:err -vfile:info --mlockall --user openvswitch:hugetlbfs --no-chdir --log-file=/var/log/openvswitch/ovs-vswitchd.log --pidfile=/var/run/openvswitch/ovs-vswitchd.pid --detach
             │ ├─ovsdb-server.service
             │ │ └─47243 ovsdb-server /etc/openvswitch/conf.db -vconsole:emer -vsyslog:err -vfile:info --remote=punix:/var/run/openvswitch/db.sock --private-key=db:Open_vSwitch,SSL,private_key --certificate=db:Open_vSwitch,SSL,certificate --bootstrap-ca-cert=db:Open_vSwitch,SSL,ca_cert --user openvswitch:hugetlbfs --no-chdir --log-file=/var/log/openvswitch/ovsdb-server.log --pidfile=/var/run/openvswitch/ovsdb-server.pid --detach
             │ ├─polkit.service
             │ │ └─43504 /usr/lib/polkit-1/polkitd --no-debug
             │ ├─rpcbind.service
             │ │ └─698 /usr/bin/rpcbind -w -f
             │ ├─rsyslog.service
             │ │ └─1004 /usr/sbin/rsyslogd -n
             │ ├─sshd.service
             │ │ └─181036 "sshd: /usr/sbin/sshd -D [listener] 0 of 10-100 startups"
             │ ├─system-ceph\x2d67f67f44\x2d54fc\x2d54ea\x2d8df0\x2d10931b6ecdaf.slice
             │ │ ├─ceph-67f67f44-54fc-54ea-8df0-10931b6ecdaf@crash.compute-0.service
             │ │ │ ├─libpod-payload-1d6c84974bebf4aec64bc5aa950bd94610adc6953e493e81f66035a7be677973
             │ │ │ │ ├─80274 /run/podman-init -- /usr/bin/ceph-crash -n client.crash.compute-0
             │ │ │ │ └─80276 /usr/bin/python3 -s /usr/bin/ceph-crash -n client.crash.compute-0
             │ │ │ └─runtime
             │ │ │   └─80272 /usr/bin/conmon --api-version 1 -c 1d6c84974bebf4aec64bc5aa950bd94610adc6953e493e81f66035a7be677973 -u 1d6c84974bebf4aec64bc5aa950bd94610adc6953e493e81f66035a7be677973 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/1d6c84974bebf4aec64bc5aa950bd94610adc6953e493e81f66035a7be677973/userdata -p /run/containers/storage/overlay-containers/1d6c84974bebf4aec64bc5aa950bd94610adc6953e493e81f66035a7be677973/userdata/pidfile -n ceph-67f67f44-54fc-54ea-8df0-10931b6ecdaf-crash-compute-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/1d6c84974bebf4aec64bc5aa950bd94610adc6953e493e81f66035a7be677973 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/1d6c84974bebf4aec64bc5aa950bd94610adc6953e493e81f66035a7be677973/userdata/oci-log --conmon-pidfile /run/ceph-67f67f44-54fc-54ea-8df0-10931b6ecdaf@crash.compute-0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 1d6c84974bebf4aec64bc5aa950bd94610adc6953e493e81f66035a7be677973
             │ │ ├─ceph-67f67f44-54fc-54ea-8df0-10931b6ecdaf@mds.cephfs.compute-0.izecis.service
             │ │ │ ├─libpod-payload-63e30a35b7b96951b132aa123d944f19fef8cbb23554d42058a7f839e76cf474
             │ │ │ │ ├─95394 /run/podman-init -- /usr/bin/ceph-mds -n mds.cephfs.compute-0.izecis -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ │ │ └─95396 /usr/bin/ceph-mds -n mds.cephfs.compute-0.izecis -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ │ └─runtime
             │ │ │   └─95392 /usr/bin/conmon --api-version 1 -c 63e30a35b7b96951b132aa123d944f19fef8cbb23554d42058a7f839e76cf474 -u 63e30a35b7b96951b132aa123d944f19fef8cbb23554d42058a7f839e76cf474 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/63e30a35b7b96951b132aa123d944f19fef8cbb23554d42058a7f839e76cf474/userdata -p /run/containers/storage/overlay-containers/63e30a35b7b96951b132aa123d944f19fef8cbb23554d42058a7f839e76cf474/userdata/pidfile -n ceph-67f67f44-54fc-54ea-8df0-10931b6ecdaf-mds-cephfs-compute-0-izecis --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/63e30a35b7b96951b132aa123d944f19fef8cbb23554d42058a7f839e76cf474 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/63e30a35b7b96951b132aa123d944f19fef8cbb23554d42058a7f839e76cf474/userdata/oci-log --conmon-pidfile /run/ceph-67f67f44-54fc-54ea-8df0-10931b6ecdaf@mds.cephfs.compute-0.izecis.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 63e30a35b7b96951b132aa123d944f19fef8cbb23554d42058a7f839e76cf474
             │ │ ├─ceph-67f67f44-54fc-54ea-8df0-10931b6ecdaf@mgr.compute-0.ysegzv.service
             │ │ │ ├─libpod-payload-f232def5bd3d41fdf0b35f628fe45f0e39d35b90e0d04b3d069f81dcb3d82662
             │ │ │ │ ├─75513 /run/podman-init -- /usr/bin/ceph-mgr -n mgr.compute-0.ysegzv -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ │ │ └─75515 /usr/bin/ceph-mgr -n mgr.compute-0.ysegzv -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ │ └─runtime
             │ │ │   └─75511 /usr/bin/conmon --api-version 1 -c f232def5bd3d41fdf0b35f628fe45f0e39d35b90e0d04b3d069f81dcb3d82662 -u f232def5bd3d41fdf0b35f628fe45f0e39d35b90e0d04b3d069f81dcb3d82662 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/f232def5bd3d41fdf0b35f628fe45f0e39d35b90e0d04b3d069f81dcb3d82662/userdata -p /run/containers/storage/overlay-containers/f232def5bd3d41fdf0b35f628fe45f0e39d35b90e0d04b3d069f81dcb3d82662/userdata/pidfile -n ceph-67f67f44-54fc-54ea-8df0-10931b6ecdaf-mgr-compute-0-ysegzv --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/f232def5bd3d41fdf0b35f628fe45f0e39d35b90e0d04b3d069f81dcb3d82662 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/f232def5bd3d41fdf0b35f628fe45f0e39d35b90e0d04b3d069f81dcb3d82662/userdata/oci-log --conmon-pidfile /run/ceph-67f67f44-54fc-54ea-8df0-10931b6ecdaf@mgr.compute-0.ysegzv.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg f232def5bd3d41fdf0b35f628fe45f0e39d35b90e0d04b3d069f81dcb3d82662
             │ │ ├─ceph-67f67f44-54fc-54ea-8df0-10931b6ecdaf@mon.compute-0.service
             │ │ │ ├─libpod-payload-9ce3cdfc68db4310535ef64a87efb40353dcdfbbac71cac592072bd903c643f6
             │ │ │ │ ├─75220 /run/podman-init -- /usr/bin/ceph-mon -n mon.compute-0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false
             │ │ │ │ └─75222 /usr/bin/ceph-mon -n mon.compute-0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false
             │ │ │ └─runtime
             │ │ │   └─75218 /usr/bin/conmon --api-version 1 -c 9ce3cdfc68db4310535ef64a87efb40353dcdfbbac71cac592072bd903c643f6 -u 9ce3cdfc68db4310535ef64a87efb40353dcdfbbac71cac592072bd903c643f6 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/9ce3cdfc68db4310535ef64a87efb40353dcdfbbac71cac592072bd903c643f6/userdata -p /run/containers/storage/overlay-containers/9ce3cdfc68db4310535ef64a87efb40353dcdfbbac71cac592072bd903c643f6/userdata/pidfile -n ceph-67f67f44-54fc-54ea-8df0-10931b6ecdaf-mon-compute-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/9ce3cdfc68db4310535ef64a87efb40353dcdfbbac71cac592072bd903c643f6 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/9ce3cdfc68db4310535ef64a87efb40353dcdfbbac71cac592072bd903c643f6/userdata/oci-log --conmon-pidfile /run/ceph-67f67f44-54fc-54ea-8df0-10931b6ecdaf@mon.compute-0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 9ce3cdfc68db4310535ef64a87efb40353dcdfbbac71cac592072bd903c643f6
             │ │ ├─ceph-67f67f44-54fc-54ea-8df0-10931b6ecdaf@osd.0.service
             │ │ │ ├─libpod-payload-012822ae8bedefb05d753efd429cb131456844e02bd9516e891592371e33cae1
             │ │ │ │ ├─86011 /run/podman-init -- /usr/bin/ceph-osd -n osd.0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --osd-objectstore=bluestore
             │ │ │ │ └─86013 /usr/bin/ceph-osd -n osd.0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --osd-objectstore=bluestore
             │ │ │ └─runtime
             │ │ │   └─86009 /usr/bin/conmon --api-version 1 -c 012822ae8bedefb05d753efd429cb131456844e02bd9516e891592371e33cae1 -u 012822ae8bedefb05d753efd429cb131456844e02bd9516e891592371e33cae1 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/012822ae8bedefb05d753efd429cb131456844e02bd9516e891592371e33cae1/userdata -p /run/containers/storage/overlay-containers/012822ae8bedefb05d753efd429cb131456844e02bd9516e891592371e33cae1/userdata/pidfile -n ceph-67f67f44-54fc-54ea-8df0-10931b6ecdaf-osd-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/012822ae8bedefb05d753efd429cb131456844e02bd9516e891592371e33cae1 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/012822ae8bedefb05d753efd429cb131456844e02bd9516e891592371e33cae1/userdata/oci-log --conmon-pidfile /run/ceph-67f67f44-54fc-54ea-8df0-10931b6ecdaf@osd.0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 012822ae8bedefb05d753efd429cb131456844e02bd9516e891592371e33cae1
             │ │ ├─ceph-67f67f44-54fc-54ea-8df0-10931b6ecdaf@osd.1.service
             │ │ │ ├─libpod-payload-187eb50611d2ddb1b41d9735d4d6534119cb949c4865c9c5b94e777c53253f5a
             │ │ │ │ ├─87053 /run/podman-init -- /usr/bin/ceph-osd -n osd.1 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --osd-objectstore=bluestore
             │ │ │ │ └─87055 /usr/bin/ceph-osd -n osd.1 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --osd-objectstore=bluestore
             │ │ │ └─runtime
             │ │ │   └─87051 /usr/bin/conmon --api-version 1 -c 187eb50611d2ddb1b41d9735d4d6534119cb949c4865c9c5b94e777c53253f5a -u 187eb50611d2ddb1b41d9735d4d6534119cb949c4865c9c5b94e777c53253f5a -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/187eb50611d2ddb1b41d9735d4d6534119cb949c4865c9c5b94e777c53253f5a/userdata -p /run/containers/storage/overlay-containers/187eb50611d2ddb1b41d9735d4d6534119cb949c4865c9c5b94e777c53253f5a/userdata/pidfile -n ceph-67f67f44-54fc-54ea-8df0-10931b6ecdaf-osd-1 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/187eb50611d2ddb1b41d9735d4d6534119cb949c4865c9c5b94e777c53253f5a --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/187eb50611d2ddb1b41d9735d4d6534119cb949c4865c9c5b94e777c53253f5a/userdata/oci-log --conmon-pidfile /run/ceph-67f67f44-54fc-54ea-8df0-10931b6ecdaf@osd.1.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 187eb50611d2ddb1b41d9735d4d6534119cb949c4865c9c5b94e777c53253f5a
             │ │ ├─ceph-67f67f44-54fc-54ea-8df0-10931b6ecdaf@osd.2.service
             │ │ │ ├─libpod-payload-2c04f740a8b253d8ecd8e18a6e47e557960887b7213c8498c38edd5db483ed78
             │ │ │ │ ├─88097 /run/podman-init -- /usr/bin/ceph-osd -n osd.2 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --osd-objectstore=bluestore
             │ │ │ │ └─88099 /usr/bin/ceph-osd -n osd.2 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --osd-objectstore=bluestore
             │ │ │ └─runtime
             │ │ │   └─88095 /usr/bin/conmon --api-version 1 -c 2c04f740a8b253d8ecd8e18a6e47e557960887b7213c8498c38edd5db483ed78 -u 2c04f740a8b253d8ecd8e18a6e47e557960887b7213c8498c38edd5db483ed78 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/2c04f740a8b253d8ecd8e18a6e47e557960887b7213c8498c38edd5db483ed78/userdata -p /run/containers/storage/overlay-containers/2c04f740a8b253d8ecd8e18a6e47e557960887b7213c8498c38edd5db483ed78/userdata/pidfile -n ceph-67f67f44-54fc-54ea-8df0-10931b6ecdaf-osd-2 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/2c04f740a8b253d8ecd8e18a6e47e557960887b7213c8498c38edd5db483ed78 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/2c04f740a8b253d8ecd8e18a6e47e557960887b7213c8498c38edd5db483ed78/userdata/oci-log --conmon-pidfile /run/ceph-67f67f44-54fc-54ea-8df0-10931b6ecdaf@osd.2.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 2c04f740a8b253d8ecd8e18a6e47e557960887b7213c8498c38edd5db483ed78
             │ │ └─ceph-67f67f44-54fc-54ea-8df0-10931b6ecdaf@rgw.rgw.compute-0.efuxpz.service
             │ │   ├─libpod-payload-7f00419a4644aef9dce1fa0301835835d4bb386233bc723500dd8ee22076ad4d
             │ │   │ ├─94931 /run/podman-init -- /usr/bin/radosgw -n client.rgw.rgw.compute-0.efuxpz -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --stop-timeout=120
             │ │   │ └─94933 /usr/bin/radosgw -n client.rgw.rgw.compute-0.efuxpz -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --stop-timeout=120
             │ │   └─runtime
             │ │     └─94929 /usr/bin/conmon --api-version 1 -c 7f00419a4644aef9dce1fa0301835835d4bb386233bc723500dd8ee22076ad4d -u 7f00419a4644aef9dce1fa0301835835d4bb386233bc723500dd8ee22076ad4d -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/7f00419a4644aef9dce1fa0301835835d4bb386233bc723500dd8ee22076ad4d/userdata -p /run/containers/storage/overlay-containers/7f00419a4644aef9dce1fa0301835835d4bb386233bc723500dd8ee22076ad4d/userdata/pidfile -n ceph-67f67f44-54fc-54ea-8df0-10931b6ecdaf-rgw-rgw-compute-0-efuxpz --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/7f00419a4644aef9dce1fa0301835835d4bb386233bc723500dd8ee22076ad4d --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/7f00419a4644aef9dce1fa0301835835d4bb386233bc723500dd8ee22076ad4d/userdata/oci-log --conmon-pidfile /run/ceph-67f67f44-54fc-54ea-8df0-10931b6ecdaf@rgw.rgw.compute-0.efuxpz.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 7f00419a4644aef9dce1fa0301835835d4bb386233bc723500dd8ee22076ad4d
             │ ├─system-getty.slice
             │ │ └─getty@tty1.service
             │ │   └─1009 /sbin/agetty -o "-p -- \\u" --noclear - linux
             │ ├─system-serial\x2dgetty.slice
             │ │ └─serial-getty@ttyS0.service
             │ │   └─1010 /sbin/agetty -o "-p -- \\u" --keep-baud 115200,57600,38400,9600 - vt220
             │ ├─systemd-hostnamed.service
             │ │ └─265996 /usr/lib/systemd/systemd-hostnamed
             │ ├─systemd-journald.service
             │ │ └─675 /usr/lib/systemd/systemd-journald
             │ ├─systemd-logind.service
             │ │ └─786 /usr/lib/systemd/systemd-logind
             │ ├─systemd-machined.service
             │ │ └─206383 /usr/lib/systemd/systemd-machined
             │ ├─systemd-udevd.service
             │ │ └─udev
             │ │   └─728 /usr/lib/systemd/systemd-udevd
             │ ├─tuned.service
             │ │ └─105736 /usr/bin/python3 -Es /usr/sbin/tuned -l -P
             │ ├─virtlogd.service
             │ │ └─205752 /usr/sbin/virtlogd
             │ ├─virtnodedevd.service
             │ │ └─244229 /usr/sbin/virtnodedevd --timeout 120
             │ └─virtqemud.service
             │   └─243015 /usr/sbin/virtqemud --timeout 120
             └─user.slice
               ├─user-1000.slice
               │ ├─session-1.scope
               │ │ └─4551 /usr/bin/python3
               │ ├─session-56.scope
               │ │ ├─262891 "sshd-session: zuul [priv]"
               │ │ ├─262913 "sshd-session: zuul@notty"
               │ │ ├─262914 sudo bash -c "rm -rf /var/tmp/sos-osp && mkdir /var/tmp/sos-osp && sos report --batch --all-logs --tmp-dir=/var/tmp/sos-osp  -p container,openstack_edpm,system,storage,virt"
               │ │ ├─262938 /usr/bin/python3 -s /sbin/sos report --batch --all-logs --tmp-dir=/var/tmp/sos-osp -p container,openstack_edpm,system,storage,virt
               │ │ ├─268564 timeout 15s turbostat --debug sleep 10
               │ │ ├─269108 timeout 300s systemctl status --all
               │ │ ├─269111 systemctl status --all
               │ │ ├─269431 timeout 300s ceph osd dump --format json-pretty
               │ │ ├─269433 /usr/bin/python3 -s /usr/bin/ceph osd dump --format json-pretty
               │ │ ├─269491 timeout --foreground 300s virsh -r nodedev-dumpxml block_sr0_QEMU_DVD_ROM_QM00001
               │ │ └─269492 virsh -r nodedev-dumpxml block_sr0_QEMU_DVD_ROM_QM00001
               │ └─user@1000.service
               │   ├─app.slice
               │   │ └─dbus-broker.service
               │   │   ├─12153 /usr/bin/dbus-broker-launch --scope user
               │   │   └─12182 dbus-broker --log 4 --controller 9 --machine-id 4d4ef2323cc3337bbfd9081b2a323b4e --max-bytes 100000000000000 --max-fds 25000000000000 --max-matches 5000000000
               │   ├─init.scope
               │   │ ├─4339 /usr/lib/systemd/systemd --user
               │   │ └─4342 "(sd-pam)"
               │   └─user.slice
               │     └─podman-pause-305cba0a.scope
               │       └─12094 catatonit -P
               └─user-42477.slice
                 ├─session-21.scope
                 │ ├─76647 "sshd-session: ceph-admin [priv]"
                 │ └─76670 "sshd-session: ceph-admin"
                 ├─session-23.scope
                 │ ├─76665 "sshd-session: ceph-admin [priv]"
                 │ └─76671 "sshd-session: ceph-admin@notty"
                 ├─session-24.scope
                 │ ├─76697 "sshd-session: ceph-admin [priv]"
                 │ └─76700 "sshd-session: ceph-admin@notty"
                 ├─session-25.scope
                 │ ├─76726 "sshd-session: ceph-admin [priv]"
                 │ └─76729 "sshd-session: ceph-admin@notty"
                 ├─session-26.scope
                 │ ├─76755 "sshd-session: ceph-admin [priv]"
                 │ └─76758 "sshd-session: ceph-admin@notty"
                 ├─session-27.scope
                 │ ├─76784 "sshd-session: ceph-admin [priv]"
                 │ └─76787 "sshd-session: ceph-admin@notty"
                 ├─session-28.scope
                 │ ├─76813 "sshd-session: ceph-admin [priv]"
                 │ └─76816 "sshd-session: ceph-admin@notty"
                 ├─session-29.scope
                 │ ├─76842 "sshd-session: ceph-admin [priv]"
                 │ └─76845 "sshd-session: ceph-admin@notty"
                 ├─session-30.scope
                 │ ├─76871 "sshd-session: ceph-admin [priv]"
                 │ └─76874 "sshd-session: ceph-admin@notty"
                 ├─session-31.scope
                 │ ├─76900 "sshd-session: ceph-admin [priv]"
                 │ └─76903 "sshd-session: ceph-admin@notty"
                 ├─session-32.scope
                 │ ├─76927 "sshd-session: ceph-admin [priv]"
                 │ └─76930 "sshd-session: ceph-admin@notty"
                 ├─session-33.scope
                 │ ├─ 76956 "sshd-session: ceph-admin [priv]"
                 │ ├─ 76959 "sshd-session: ceph-admin@notty"
                 │ ├─269057 sudo /bin/python3 /var/lib/ceph/67f67f44-54fc-54ea-8df0-10931b6ecdaf/cephadm.ed5a13ad26f7f55dd30e9b63855e4e581fd86973bec1d21a12ed0bb26af19c8b --env CEPH_VOLUME_OSDSPEC_AFFINITY=default_drive_group --image quay.io/ceph/ceph@sha256:1228c3d05e45fbc068a8c33614e4409b6dac688bcc77369b06009b5830fa8d86 --timeout 895 ceph-volume --fsid 67f67f44-54fc-54ea-8df0-10931b6ecdaf --config-json - -- lvm batch --no-auto /dev/ceph_vg0/ceph_lv0 /dev/ceph_vg1/ceph_lv1 /dev/ceph_vg2/ceph_lv2 --objectstore bluestore --yes --no-systemd
                 │ ├─269082 /bin/python3 /var/lib/ceph/67f67f44-54fc-54ea-8df0-10931b6ecdaf/cephadm.ed5a13ad26f7f55dd30e9b63855e4e581fd86973bec1d21a12ed0bb26af19c8b --env CEPH_VOLUME_OSDSPEC_AFFINITY=default_drive_group --image quay.io/ceph/ceph@sha256:1228c3d05e45fbc068a8c33614e4409b6dac688bcc77369b06009b5830fa8d86 --timeout 895 ceph-volume --fsid 67f67f44-54fc-54ea-8df0-10931b6ecdaf --config-json - -- lvm batch --no-auto /dev/ceph_vg0/ceph_lv0 /dev/ceph_vg1/ceph_lv1 /dev/ceph_vg2/ceph_lv2 --objectstore bluestore --yes --no-systemd
                 │ └─269391 /bin/podman run --rm --ipc=host --stop-signal=SIGTERM --net=host --entrypoint /usr/sbin/ceph-volume --privileged --group-add=disk --init -e CONTAINER_IMAGE=quay.io/ceph/ceph@sha256:1228c3d05e45fbc068a8c33614e4409b6dac688bcc77369b06009b5830fa8d86 -e NODE_NAME=compute-0 -e CEPH_VOLUME_OSDSPEC_AFFINITY=default_drive_group -e CEPH_VOLUME_SKIP_RESTORECON=yes -e CEPH_VOLUME_DEBUG=1 -v /var/run/ceph/67f67f44-54fc-54ea-8df0-10931b6ecdaf:/var/run/ceph:z -v /var/log/ceph/67f67f44-54fc-54ea-8df0-10931b6ecdaf:/var/log/ceph:z -v /var/lib/ceph/67f67f44-54fc-54ea-8df0-10931b6ecdaf/crash:/var/lib/ceph/crash:z -v /run/systemd/journal:/run/systemd/journal -v /dev:/dev -v /run/udev:/run/udev -v /sys:/sys -v /run/lvm:/run/lvm -v /run/lock/lvm:/run/lock/lvm -v /var/lib/ceph/67f67f44-54fc-54ea-8df0-10931b6ecdaf/selinux:/sys/fs/selinux:ro -v /:/rootfs:rslave -v /etc/hosts:/etc/hosts:ro -v /tmp/ceph-tmpfuu3d7xo:/etc/ceph/ceph.conf:z -v /tmp/ceph-tmpoufvpa0c:/var/lib/ceph/bootstrap-osd/ceph.keyring:z quay.io/ceph/ceph@sha256:1228c3d05e45fbc068a8c33614e4409b6dac688bcc77369b06009b5830fa8d86 lvm batch --no-auto /dev/ceph_vg0/ceph_lv0 /dev/ceph_vg1/ceph_lv1 /dev/ceph_vg2/ceph_lv2 --objectstore bluestore --yes --no-systemd
                 └─user@42477.service
                   └─init.scope
                     ├─76651 /usr/lib/systemd/systemd --user
                     └─76654 "(sd-pam)"

Dec 09 16:44:38 compute-0 systemd[1]: Starting Hostname Service...
Dec 09 16:44:38 compute-0 systemd[1]: Started Hostname Service.
Dec 09 16:44:55 compute-0 systemd[1]: Started libpod-conmon-6de51581d8e77a1bc4dbf7916fdfe9005c1866ceba3ee9b9582f2907611c6fda.scope.
Dec 09 16:44:55 compute-0 systemd[1]: Started libcrun container.
Dec 09 16:44:55 compute-0 systemd[1]: libpod-6de51581d8e77a1bc4dbf7916fdfe9005c1866ceba3ee9b9582f2907611c6fda.scope: Deactivated successfully.
Dec 09 16:44:55 compute-0 systemd[1]: var-lib-containers-storage-overlay-9d2474e97d2f6ca8d7311d97cac788396ef7bcc51bb073801856fcc9ff22e173-merged.mount: Deactivated successfully.
Dec 09 16:44:55 compute-0 systemd[1]: libpod-conmon-6de51581d8e77a1bc4dbf7916fdfe9005c1866ceba3ee9b9582f2907611c6fda.scope: Deactivated successfully.
Dec 09 16:44:55 compute-0 systemd[1]: Started libpod-conmon-4323287b9d9f02a12363c9f5cc1c8f6d0d210317f09eb056106c7d354196560f.scope.
Dec 09 16:44:55 compute-0 systemd[1]: Started libcrun container.
Dec 09 16:44:56 compute-0 systemd[1]: libpod-4323287b9d9f02a12363c9f5cc1c8f6d0d210317f09eb056106c7d354196560f.scope: Deactivated successfully.

● machine.slice - Virtual Machine and Container Slice
     Loaded: loaded (/usr/lib/systemd/system/machine.slice; static)
     Active: active since Tue 2025-12-09 16:02:57 UTC; 41min ago
      Until: Tue 2025-12-09 16:02:57 UTC; 41min ago
       Docs: man:systemd.special(7)
         IO: 747.9M read, 21.1M written
      Tasks: 54
     Memory: 466.8M (peak: 512.3M)
        CPU: 3min 22.552s
     CGroup: /machine.slice
             ├─libpod-0a76737b5f2b25872fe2b565002617e1a84450afb10d979302e98619d30d6470.scope
             │ └─container
             │   ├─145423 dumb-init --single-child -- kolla_start
             │   └─145426 /usr/bin/ovn-controller --pidfile unix:/run/openvswitch/db.sock -p /etc/pki/tls/private/ovndb.key -c /etc/pki/tls/certs/ovndb.crt -C /etc/pki/tls/certs/ovndbca.crt
             ├─libpod-84d676632bb4f080a989766472a2cc2fcf267803021f3bcdf51f0d8bfc9055e2.scope
             │ └─container
             │   ├─225740 dumb-init --single-child -- kolla_start
             │   └─225743 /usr/sbin/multipathd -d
             ├─libpod-9f2fa752ba80f2edb6a6ed5e7e6142147c4b695355174f47652b45554431a9ce.scope
             │ └─container
             │   ├─243454 dumb-init --single-child -- kolla_start
             │   └─243461 /usr/bin/python3 /usr/bin/nova-compute
             ├─libpod-conmon-4323287b9d9f02a12363c9f5cc1c8f6d0d210317f09eb056106c7d354196560f.scope
             │ ├─269408 /usr/bin/conmon --api-version 1 -c 4323287b9d9f02a12363c9f5cc1c8f6d0d210317f09eb056106c7d354196560f -u 4323287b9d9f02a12363c9f5cc1c8f6d0d210317f09eb056106c7d354196560f -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/4323287b9d9f02a12363c9f5cc1c8f6d0d210317f09eb056106c7d354196560f/userdata -p /run/containers/storage/overlay-containers/4323287b9d9f02a12363c9f5cc1c8f6d0d210317f09eb056106c7d354196560f/userdata/pidfile -n funny_jepsen --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/4323287b9d9f02a12363c9f5cc1c8f6d0d210317f09eb056106c7d354196560f --full-attach -s -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/4323287b9d9f02a12363c9f5cc1c8f6d0d210317f09eb056106c7d354196560f/userdata/oci-log --conmon-pidfile /run/containers/storage/overlay-containers/4323287b9d9f02a12363c9f5cc1c8f6d0d210317f09eb056106c7d354196560f/userdata/conmon.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 4323287b9d9f02a12363c9f5cc1c8f6d0d210317f09eb056106c7d354196560f
             │ └─269493 /usr/bin/podman --root /var/lib/containers/storage --runroot /run/containers/storage --log-level warning --cgroup-manager systemd --tmpdir /run/libpod --network-config-dir "" --network-backend netavark --volumepath /var/lib/containers/storage/volumes --db-backend sqlite --transient-store=false --hooks-dir /usr/share/containers/oci/hooks.d --runtime crun --storage-driver overlay --storage-opt overlay.mountopt=nodev,metacopy=on --events-backend journald container cleanup --stopped-only --rm 4323287b9d9f02a12363c9f5cc1c8f6d0d210317f09eb056106c7d354196560f
             └─libpod-fbe906a4b60641b0d2bc026295092273d9a8b9783389304fc11b10bffae9c692.scope
               └─container
                 ├─155088 dumb-init --single-child -- kolla_start
                 ├─155091 "neutron-ovn-metadata-agent (/usr/bin/python3 /usr/bin/neutron-ovn-metadata-agent)"
                 ├─155210 "neutron-ovn-metadata-agent (/usr/bin/python3 /usr/bin/neutron-ovn-metadata-agent)"
                 └─155215 /usr/bin/python3 /bin/privsep-helper --config-file /etc/neutron/neutron.conf --config-dir /etc/neutron.conf.d --privsep_context neutron.privileged.namespace_cmd --privsep_sock_path /tmp/tmp2hzlk811/privsep.sock

Dec 09 16:43:52 compute-0 sad_chandrasekhar[262561]:             "vg_name": "ceph_vg2"
Dec 09 16:43:52 compute-0 sad_chandrasekhar[262561]:         }
Dec 09 16:43:52 compute-0 sad_chandrasekhar[262561]:     ]
Dec 09 16:43:52 compute-0 sad_chandrasekhar[262561]: }
Dec 09 16:43:52 compute-0 festive_napier[262663]: 167 167
Dec 09 16:43:54 compute-0 ecstatic_goldstine[262704]: {}
Dec 09 16:44:55 compute-0 inspiring_pare[269163]: 167 167
Dec 09 16:44:55 compute-0 conmon[269163]: conmon 6de51581d8e77a1bc4db <nwarn>: Failed to open cgroups file: /sys/fs/cgroup/machine.slice/libpod-6de51581d8e77a1bc4dbf7916fdfe9005c1866ceba3ee9b9582f2907611c6fda.scope/container/memory.events
Dec 09 16:44:56 compute-0 funny_jepsen[269408]: --> passed data devices: 0 physical, 3 LVM
Dec 09 16:44:56 compute-0 funny_jepsen[269408]: --> All data devices are unavailable

● system-ceph\x2d67f67f44\x2d54fc\x2d54ea\x2d8df0\x2d10931b6ecdaf.slice - Slice /system/ceph-67f67f44-54fc-54ea-8df0-10931b6ecdaf
     Loaded: loaded
     Active: active since Tue 2025-12-09 16:03:01 UTC; 41min ago
      Until: Tue 2025-12-09 16:03:01 UTC; 41min ago
         IO: 21.8M read, 4.3G written
      Tasks: 1002
     Memory: 2.1G (peak: 2.1G)
        CPU: 2min 36.698s
     CGroup: /system.slice/system-ceph\x2d67f67f44\x2d54fc\x2d54ea\x2d8df0\x2d10931b6ecdaf.slice
             ├─ceph-67f67f44-54fc-54ea-8df0-10931b6ecdaf@crash.compute-0.service
             │ ├─libpod-payload-1d6c84974bebf4aec64bc5aa950bd94610adc6953e493e81f66035a7be677973
             │ │ ├─80274 /run/podman-init -- /usr/bin/ceph-crash -n client.crash.compute-0
             │ │ └─80276 /usr/bin/python3 -s /usr/bin/ceph-crash -n client.crash.compute-0
             │ └─runtime
             │   └─80272 /usr/bin/conmon --api-version 1 -c 1d6c84974bebf4aec64bc5aa950bd94610adc6953e493e81f66035a7be677973 -u 1d6c84974bebf4aec64bc5aa950bd94610adc6953e493e81f66035a7be677973 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/1d6c84974bebf4aec64bc5aa950bd94610adc6953e493e81f66035a7be677973/userdata -p /run/containers/storage/overlay-containers/1d6c84974bebf4aec64bc5aa950bd94610adc6953e493e81f66035a7be677973/userdata/pidfile -n ceph-67f67f44-54fc-54ea-8df0-10931b6ecdaf-crash-compute-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/1d6c84974bebf4aec64bc5aa950bd94610adc6953e493e81f66035a7be677973 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/1d6c84974bebf4aec64bc5aa950bd94610adc6953e493e81f66035a7be677973/userdata/oci-log --conmon-pidfile /run/ceph-67f67f44-54fc-54ea-8df0-10931b6ecdaf@crash.compute-0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 1d6c84974bebf4aec64bc5aa950bd94610adc6953e493e81f66035a7be677973
             ├─ceph-67f67f44-54fc-54ea-8df0-10931b6ecdaf@mds.cephfs.compute-0.izecis.service
             │ ├─libpod-payload-63e30a35b7b96951b132aa123d944f19fef8cbb23554d42058a7f839e76cf474
             │ │ ├─95394 /run/podman-init -- /usr/bin/ceph-mds -n mds.cephfs.compute-0.izecis -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ └─95396 /usr/bin/ceph-mds -n mds.cephfs.compute-0.izecis -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ └─runtime
             │   └─95392 /usr/bin/conmon --api-version 1 -c 63e30a35b7b96951b132aa123d944f19fef8cbb23554d42058a7f839e76cf474 -u 63e30a35b7b96951b132aa123d944f19fef8cbb23554d42058a7f839e76cf474 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/63e30a35b7b96951b132aa123d944f19fef8cbb23554d42058a7f839e76cf474/userdata -p /run/containers/storage/overlay-containers/63e30a35b7b96951b132aa123d944f19fef8cbb23554d42058a7f839e76cf474/userdata/pidfile -n ceph-67f67f44-54fc-54ea-8df0-10931b6ecdaf-mds-cephfs-compute-0-izecis --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/63e30a35b7b96951b132aa123d944f19fef8cbb23554d42058a7f839e76cf474 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/63e30a35b7b96951b132aa123d944f19fef8cbb23554d42058a7f839e76cf474/userdata/oci-log --conmon-pidfile /run/ceph-67f67f44-54fc-54ea-8df0-10931b6ecdaf@mds.cephfs.compute-0.izecis.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 63e30a35b7b96951b132aa123d944f19fef8cbb23554d42058a7f839e76cf474
             ├─ceph-67f67f44-54fc-54ea-8df0-10931b6ecdaf@mgr.compute-0.ysegzv.service
             │ ├─libpod-payload-f232def5bd3d41fdf0b35f628fe45f0e39d35b90e0d04b3d069f81dcb3d82662
             │ │ ├─75513 /run/podman-init -- /usr/bin/ceph-mgr -n mgr.compute-0.ysegzv -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ └─75515 /usr/bin/ceph-mgr -n mgr.compute-0.ysegzv -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ └─runtime
             │   └─75511 /usr/bin/conmon --api-version 1 -c f232def5bd3d41fdf0b35f628fe45f0e39d35b90e0d04b3d069f81dcb3d82662 -u f232def5bd3d41fdf0b35f628fe45f0e39d35b90e0d04b3d069f81dcb3d82662 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/f232def5bd3d41fdf0b35f628fe45f0e39d35b90e0d04b3d069f81dcb3d82662/userdata -p /run/containers/storage/overlay-containers/f232def5bd3d41fdf0b35f628fe45f0e39d35b90e0d04b3d069f81dcb3d82662/userdata/pidfile -n ceph-67f67f44-54fc-54ea-8df0-10931b6ecdaf-mgr-compute-0-ysegzv --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/f232def5bd3d41fdf0b35f628fe45f0e39d35b90e0d04b3d069f81dcb3d82662 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/f232def5bd3d41fdf0b35f628fe45f0e39d35b90e0d04b3d069f81dcb3d82662/userdata/oci-log --conmon-pidfile /run/ceph-67f67f44-54fc-54ea-8df0-10931b6ecdaf@mgr.compute-0.ysegzv.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg f232def5bd3d41fdf0b35f628fe45f0e39d35b90e0d04b3d069f81dcb3d82662
             ├─ceph-67f67f44-54fc-54ea-8df0-10931b6ecdaf@mon.compute-0.service
             │ ├─libpod-payload-9ce3cdfc68db4310535ef64a87efb40353dcdfbbac71cac592072bd903c643f6
             │ │ ├─75220 /run/podman-init -- /usr/bin/ceph-mon -n mon.compute-0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false
             │ │ └─75222 /usr/bin/ceph-mon -n mon.compute-0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false
             │ └─runtime
             │   └─75218 /usr/bin/conmon --api-version 1 -c 9ce3cdfc68db4310535ef64a87efb40353dcdfbbac71cac592072bd903c643f6 -u 9ce3cdfc68db4310535ef64a87efb40353dcdfbbac71cac592072bd903c643f6 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/9ce3cdfc68db4310535ef64a87efb40353dcdfbbac71cac592072bd903c643f6/userdata -p /run/containers/storage/overlay-containers/9ce3cdfc68db4310535ef64a87efb40353dcdfbbac71cac592072bd903c643f6/userdata/pidfile -n ceph-67f67f44-54fc-54ea-8df0-10931b6ecdaf-mon-compute-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/9ce3cdfc68db4310535ef64a87efb40353dcdfbbac71cac592072bd903c643f6 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/9ce3cdfc68db4310535ef64a87efb40353dcdfbbac71cac592072bd903c643f6/userdata/oci-log --conmon-pidfile /run/ceph-67f67f44-54fc-54ea-8df0-10931b6ecdaf@mon.compute-0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 9ce3cdfc68db4310535ef64a87efb40353dcdfbbac71cac592072bd903c643f6
             ├─ceph-67f67f44-54fc-54ea-8df0-10931b6ecdaf@osd.0.service
             │ ├─libpod-payload-012822ae8bedefb05d753efd429cb131456844e02bd9516e891592371e33cae1
             │ │ ├─86011 /run/podman-init -- /usr/bin/ceph-osd -n osd.0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --osd-objectstore=bluestore
             │ │ └─86013 /usr/bin/ceph-osd -n osd.0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --osd-objectstore=bluestore
             │ └─runtime
             │   └─86009 /usr/bin/conmon --api-version 1 -c 012822ae8bedefb05d753efd429cb131456844e02bd9516e891592371e33cae1 -u 012822ae8bedefb05d753efd429cb131456844e02bd9516e891592371e33cae1 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/012822ae8bedefb05d753efd429cb131456844e02bd9516e891592371e33cae1/userdata -p /run/containers/storage/overlay-containers/012822ae8bedefb05d753efd429cb131456844e02bd9516e891592371e33cae1/userdata/pidfile -n ceph-67f67f44-54fc-54ea-8df0-10931b6ecdaf-osd-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/012822ae8bedefb05d753efd429cb131456844e02bd9516e891592371e33cae1 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/012822ae8bedefb05d753efd429cb131456844e02bd9516e891592371e33cae1/userdata/oci-log --conmon-pidfile /run/ceph-67f67f44-54fc-54ea-8df0-10931b6ecdaf@osd.0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 012822ae8bedefb05d753efd429cb131456844e02bd9516e891592371e33cae1
             ├─ceph-67f67f44-54fc-54ea-8df0-10931b6ecdaf@osd.1.service
             │ ├─libpod-payload-187eb50611d2ddb1b41d9735d4d6534119cb949c4865c9c5b94e777c53253f5a
             │ │ ├─87053 /run/podman-init -- /usr/bin/ceph-osd -n osd.1 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --osd-objectstore=bluestore
             │ │ └─87055 /usr/bin/ceph-osd -n osd.1 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --osd-objectstore=bluestore
             │ └─runtime
             │   └─87051 /usr/bin/conmon --api-version 1 -c 187eb50611d2ddb1b41d9735d4d6534119cb949c4865c9c5b94e777c53253f5a -u 187eb50611d2ddb1b41d9735d4d6534119cb949c4865c9c5b94e777c53253f5a -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/187eb50611d2ddb1b41d9735d4d6534119cb949c4865c9c5b94e777c53253f5a/userdata -p /run/containers/storage/overlay-containers/187eb50611d2ddb1b41d9735d4d6534119cb949c4865c9c5b94e777c53253f5a/userdata/pidfile -n ceph-67f67f44-54fc-54ea-8df0-10931b6ecdaf-osd-1 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/187eb50611d2ddb1b41d9735d4d6534119cb949c4865c9c5b94e777c53253f5a --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/187eb50611d2ddb1b41d9735d4d6534119cb949c4865c9c5b94e777c53253f5a/userdata/oci-log --conmon-pidfile /run/ceph-67f67f44-54fc-54ea-8df0-10931b6ecdaf@osd.1.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 187eb50611d2ddb1b41d9735d4d6534119cb949c4865c9c5b94e777c53253f5a
             ├─ceph-67f67f44-54fc-54ea-8df0-10931b6ecdaf@osd.2.service
             │ ├─libpod-payload-2c04f740a8b253d8ecd8e18a6e47e557960887b7213c8498c38edd5db483ed78
             │ │ ├─88097 /run/podman-init -- /usr/bin/ceph-osd -n osd.2 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --osd-objectstore=bluestore
             │ │ └─88099 /usr/bin/ceph-osd -n osd.2 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --osd-objectstore=bluestore
             │ └─runtime
             │   └─88095 /usr/bin/conmon --api-version 1 -c 2c04f740a8b253d8ecd8e18a6e47e557960887b7213c8498c38edd5db483ed78 -u 2c04f740a8b253d8ecd8e18a6e47e557960887b7213c8498c38edd5db483ed78 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/2c04f740a8b253d8ecd8e18a6e47e557960887b7213c8498c38edd5db483ed78/userdata -p /run/containers/storage/overlay-containers/2c04f740a8b253d8ecd8e18a6e47e557960887b7213c8498c38edd5db483ed78/userdata/pidfile -n ceph-67f67f44-54fc-54ea-8df0-10931b6ecdaf-osd-2 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/2c04f740a8b253d8ecd8e18a6e47e557960887b7213c8498c38edd5db483ed78 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/2c04f740a8b253d8ecd8e18a6e47e557960887b7213c8498c38edd5db483ed78/userdata/oci-log --conmon-pidfile /run/ceph-67f67f44-54fc-54ea-8df0-10931b6ecdaf@osd.2.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 2c04f740a8b253d8ecd8e18a6e47e557960887b7213c8498c38edd5db483ed78
             └─ceph-67f67f44-54fc-54ea-8df0-10931b6ecdaf@rgw.rgw.compute-0.efuxpz.service
               ├─libpod-payload-7f00419a4644aef9dce1fa0301835835d4bb386233bc723500dd8ee22076ad4d
               │ ├─94931 /run/podman-init -- /usr/bin/radosgw -n client.rgw.rgw.compute-0.efuxpz -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --stop-timeout=120
               │ └─94933 /usr/bin/radosgw -n client.rgw.rgw.compute-0.efuxpz -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --stop-timeout=120
               └─runtime
                 └─94929 /usr/bin/conmon --api-version 1 -c 7f00419a4644aef9dce1fa0301835835d4bb386233bc723500dd8ee22076ad4d -u 7f00419a4644aef9dce1fa0301835835d4bb386233bc723500dd8ee22076ad4d -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/7f00419a4644aef9dce1fa0301835835d4bb386233bc723500dd8ee22076ad4d/userdata -p /run/containers/storage/overlay-containers/7f00419a4644aef9dce1fa0301835835d4bb386233bc723500dd8ee22076ad4d/userdata/pidfile -n ceph-67f67f44-54fc-54ea-8df0-10931b6ecdaf-rgw-rgw-compute-0-efuxpz --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/7f00419a4644aef9dce1fa0301835835d4bb386233bc723500dd8ee22076ad4d --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/7f00419a4644aef9dce1fa0301835835d4bb386233bc723500dd8ee22076ad4d/userdata/oci-log --conmon-pidfile /run/ceph-67f67f44-54fc-54ea-8df0-10931b6ecdaf@rgw.rgw.compute-0.efuxpz.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 7f00419a4644aef9dce1fa0301835835d4bb386233bc723500dd8ee22076ad4d

Dec 09 16:44:55 compute-0 ceph-mon[75222]: from='mgr.14124 192.168.122.100:0/2022010261' entity='mgr.compute-0.ysegzv' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch
Dec 09 16:44:55 compute-0 ceph-mon[75222]: from='mgr.14124 192.168.122.100:0/2022010261' entity='mgr.compute-0.ysegzv' cmd={"prefix": "auth get", "entity": "client.bootstrap-osd"} : dispatch
Dec 09 16:44:55 compute-0 ceph-mon[75222]: from='mgr.14124 192.168.122.100:0/2022010261' entity='mgr.compute-0.ysegzv' cmd={"prefix": "config generate-minimal-conf"} : dispatch
Dec 09 16:44:55 compute-0 ceph-mon[75222]: from='client.? 192.168.122.100:0/4175834408' entity='client.admin' cmd={"prefix": "osd blocklist ls", "format": "json-pretty"} : dispatch
Dec 09 16:44:55 compute-0 ceph-mgr[75515]: log_channel(audit) log [DBG] : from='client.14826 -' entity='client.admin' cmd=[{"prefix": "osd df", "output_method": "tree", "target": ["mon-mgr", ""], "format": "json-pretty"}]: dispatch
Dec 09 16:44:55 compute-0 ceph-mgr[75515]: log_channel(audit) log [DBG] : from='client.14828 -' entity='client.admin' cmd=[{"prefix": "osd df", "target": ["mon-mgr", ""], "format": "json-pretty"}]: dispatch
Dec 09 16:44:56 compute-0 ceph-mon[75222]: mon.compute-0@0(leader).osd e152 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 348127232 full_alloc: 348127232 kv_alloc: 318767104
Dec 09 16:44:56 compute-0 ceph-mon[75222]: from='client.14822 -' entity='client.admin' cmd=[{"prefix": "osd blocked-by", "target": ["mon-mgr", ""], "format": "json-pretty"}]: dispatch
Dec 09 16:44:56 compute-0 ceph-mon[75222]: pgmap v1387: 305 pgs: 305 active+clean; 461 KiB data, 137 MiB used, 60 GiB / 60 GiB avail
Dec 09 16:44:56 compute-0 ceph-mon[75222]: from='client.14826 -' entity='client.admin' cmd=[{"prefix": "osd df", "output_method": "tree", "target": ["mon-mgr", ""], "format": "json-pretty"}]: dispatch

● system-dbus\x2d:1.1\x2dorg.fedoraproject.SetroubleshootPrivileged.slice - Slice /system/dbus-:1.1-org.fedoraproject.SetroubleshootPrivileged
     Loaded: loaded
     Active: active since Tue 2025-12-09 16:18:04 UTC; 26min ago
      Until: Tue 2025-12-09 16:18:04 UTC; 26min ago
         IO: 4.0K read, 0B written
      Tasks: 0
     Memory: 4.0K (peak: 58.7M)
        CPU: 996ms
     CGroup: /system.slice/system-dbus\x2d:1.1\x2dorg.fedoraproject.SetroubleshootPrivileged.slice

Dec 09 16:18:04 compute-0 systemd[1]: Created slice Slice /system/dbus-:1.1-org.fedoraproject.SetroubleshootPrivileged.

● system-getty.slice - Slice /system/getty
     Loaded: loaded
     Active: active since Tue 2025-12-09 14:51:36 UTC; 1h 53min ago
      Until: Tue 2025-12-09 14:51:36 UTC; 1h 53min ago
         IO: 0B read, 0B written
      Tasks: 1
     Memory: 236.0K (peak: 460.0K)
        CPU: 13ms
     CGroup: /system.slice/system-getty.slice
             └─getty@tty1.service
               └─1009 /sbin/agetty -o "-p -- \\u" --noclear - linux

● system-modprobe.slice - Slice /system/modprobe
     Loaded: loaded
     Active: active since Tue 2025-12-09 14:51:34 UTC; 1h 53min ago
      Until: Tue 2025-12-09 14:51:34 UTC; 1h 53min ago
         IO: 0B read, 0B written
      Tasks: 0
     Memory: 52.0K (peak: 11.8M)
        CPU: 161ms
     CGroup: /system.slice/system-modprobe.slice

Dec 09 14:51:34 localhost systemd[1]: Created slice Slice /system/modprobe.

● system-serial\x2dgetty.slice - Slice /system/serial-getty
     Loaded: loaded
     Active: active since Tue 2025-12-09 14:51:36 UTC; 1h 53min ago
      Until: Tue 2025-12-09 14:51:36 UTC; 1h 53min ago
         IO: 0B read, 0B written
      Tasks: 1
     Memory: 320.0K (peak: 560.0K)
        CPU: 12ms
     CGroup: /system.slice/system-serial\x2dgetty.slice
             └─serial-getty@ttyS0.service
               └─1010 /sbin/agetty -o "-p -- \\u" --keep-baud 115200,57600,38400,9600 - vt220

● system-sshd\x2dkeygen.slice - Slice /system/sshd-keygen
     Loaded: loaded
     Active: active since Tue 2025-12-09 14:51:36 UTC; 1h 53min ago
      Until: Tue 2025-12-09 14:51:36 UTC; 1h 53min ago
         IO: 0B read, 0B written
      Tasks: 0
     Memory: 0B (peak: 0B)
        CPU: 0
     CGroup: /system.slice/system-sshd\x2dkeygen.slice

● system.slice - System Slice
     Loaded: loaded
     Active: active since Tue 2025-12-09 14:51:34 UTC; 1h 53min ago
      Until: Tue 2025-12-09 14:51:34 UTC; 1h 53min ago
       Docs: man:systemd.special(7)
         IO: 280.1M read, 4.5G written
      Tasks: 1107
     Memory: 2.7G (peak: 2.7G)
        CPU: 6min 46.874s
     CGroup: /system.slice
             ├─NetworkManager.service
             │ └─49021 /usr/sbin/NetworkManager --no-daemon
             ├─auditd.service
             │ ├─700 /sbin/auditd
             │ └─702 /usr/sbin/sedispatch
             ├─chronyd.service
             │ └─58592 /usr/sbin/chronyd -F 2
             ├─crond.service
             │ └─1008 /usr/sbin/crond -n
             ├─dbus-broker.service
             │ ├─750 /usr/bin/dbus-broker-launch --scope system --audit
             │ └─772 dbus-broker --log 4 --controller 9 --machine-id 4d4ef2323cc3337bbfd9081b2a323b4e --max-bytes 536870912 --max-fds 4096 --max-matches 131072 --audit
             ├─edpm_multipathd.service
             │ └─225738 /usr/bin/conmon --api-version 1 -c 84d676632bb4f080a989766472a2cc2fcf267803021f3bcdf51f0d8bfc9055e2 -u 84d676632bb4f080a989766472a2cc2fcf267803021f3bcdf51f0d8bfc9055e2 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/84d676632bb4f080a989766472a2cc2fcf267803021f3bcdf51f0d8bfc9055e2/userdata -p /run/containers/storage/overlay-containers/84d676632bb4f080a989766472a2cc2fcf267803021f3bcdf51f0d8bfc9055e2/userdata/pidfile -n multipathd --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/84d676632bb4f080a989766472a2cc2fcf267803021f3bcdf51f0d8bfc9055e2 --full-attach -s -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/84d676632bb4f080a989766472a2cc2fcf267803021f3bcdf51f0d8bfc9055e2/userdata/oci-log --conmon-pidfile /run/multipathd.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg 84d676632bb4f080a989766472a2cc2fcf267803021f3bcdf51f0d8bfc9055e2
             ├─edpm_nova_compute.service
             │ └─243452 /usr/bin/conmon --api-version 1 -c 9f2fa752ba80f2edb6a6ed5e7e6142147c4b695355174f47652b45554431a9ce -u 9f2fa752ba80f2edb6a6ed5e7e6142147c4b695355174f47652b45554431a9ce -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/9f2fa752ba80f2edb6a6ed5e7e6142147c4b695355174f47652b45554431a9ce/userdata -p /run/containers/storage/overlay-containers/9f2fa752ba80f2edb6a6ed5e7e6142147c4b695355174f47652b45554431a9ce/userdata/pidfile -n nova_compute --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/9f2fa752ba80f2edb6a6ed5e7e6142147c4b695355174f47652b45554431a9ce --full-attach -s -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/9f2fa752ba80f2edb6a6ed5e7e6142147c4b695355174f47652b45554431a9ce/userdata/oci-log --conmon-pidfile /run/nova_compute.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg 9f2fa752ba80f2edb6a6ed5e7e6142147c4b695355174f47652b45554431a9ce
             ├─edpm_ovn_controller.service
             │ └─145421 /usr/bin/conmon --api-version 1 -c 0a76737b5f2b25872fe2b565002617e1a84450afb10d979302e98619d30d6470 -u 0a76737b5f2b25872fe2b565002617e1a84450afb10d979302e98619d30d6470 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/0a76737b5f2b25872fe2b565002617e1a84450afb10d979302e98619d30d6470/userdata -p /run/containers/storage/overlay-containers/0a76737b5f2b25872fe2b565002617e1a84450afb10d979302e98619d30d6470/userdata/pidfile -n ovn_controller --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/0a76737b5f2b25872fe2b565002617e1a84450afb10d979302e98619d30d6470 --full-attach -s -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/0a76737b5f2b25872fe2b565002617e1a84450afb10d979302e98619d30d6470/userdata/oci-log --conmon-pidfile /run/ovn_controller.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg 0a76737b5f2b25872fe2b565002617e1a84450afb10d979302e98619d30d6470
             ├─edpm_ovn_metadata_agent.service
             │ └─155086 /usr/bin/conmon --api-version 1 -c fbe906a4b60641b0d2bc026295092273d9a8b9783389304fc11b10bffae9c692 -u fbe906a4b60641b0d2bc026295092273d9a8b9783389304fc11b10bffae9c692 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/fbe906a4b60641b0d2bc026295092273d9a8b9783389304fc11b10bffae9c692/userdata -p /run/containers/storage/overlay-containers/fbe906a4b60641b0d2bc026295092273d9a8b9783389304fc11b10bffae9c692/userdata/pidfile -n ovn_metadata_agent --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/fbe906a4b60641b0d2bc026295092273d9a8b9783389304fc11b10bffae9c692 --full-attach -s -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/fbe906a4b60641b0d2bc026295092273d9a8b9783389304fc11b10bffae9c692/userdata/oci-log --conmon-pidfile /run/ovn_metadata_agent.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg fbe906a4b60641b0d2bc026295092273d9a8b9783389304fc11b10bffae9c692
             ├─gssproxy.service
             │ └─869 /usr/sbin/gssproxy -D
             ├─irqbalance.service
             │ └─780 /usr/sbin/irqbalance
             ├─iscsid.service
             │ └─228459 /usr/sbin/iscsid -f
             ├─ovs-vswitchd.service
             │ └─47325 ovs-vswitchd unix:/var/run/openvswitch/db.sock -vconsole:emer -vsyslog:err -vfile:info --mlockall --user openvswitch:hugetlbfs --no-chdir --log-file=/var/log/openvswitch/ovs-vswitchd.log --pidfile=/var/run/openvswitch/ovs-vswitchd.pid --detach
             ├─ovsdb-server.service
             │ └─47243 ovsdb-server /etc/openvswitch/conf.db -vconsole:emer -vsyslog:err -vfile:info --remote=punix:/var/run/openvswitch/db.sock --private-key=db:Open_vSwitch,SSL,private_key --certificate=db:Open_vSwitch,SSL,certificate --bootstrap-ca-cert=db:Open_vSwitch,SSL,ca_cert --user openvswitch:hugetlbfs --no-chdir --log-file=/var/log/openvswitch/ovsdb-server.log --pidfile=/var/run/openvswitch/ovsdb-server.pid --detach
             ├─polkit.service
             │ └─43504 /usr/lib/polkit-1/polkitd --no-debug
             ├─rpcbind.service
             │ └─698 /usr/bin/rpcbind -w -f
             ├─rsyslog.service
             │ └─1004 /usr/sbin/rsyslogd -n
             ├─sshd.service
             │ └─181036 "sshd: /usr/sbin/sshd -D [listener] 0 of 10-100 startups"
             ├─system-ceph\x2d67f67f44\x2d54fc\x2d54ea\x2d8df0\x2d10931b6ecdaf.slice
             │ ├─ceph-67f67f44-54fc-54ea-8df0-10931b6ecdaf@crash.compute-0.service
             │ │ ├─libpod-payload-1d6c84974bebf4aec64bc5aa950bd94610adc6953e493e81f66035a7be677973
             │ │ │ ├─80274 /run/podman-init -- /usr/bin/ceph-crash -n client.crash.compute-0
             │ │ │ └─80276 /usr/bin/python3 -s /usr/bin/ceph-crash -n client.crash.compute-0
             │ │ └─runtime
             │ │   └─80272 /usr/bin/conmon --api-version 1 -c 1d6c84974bebf4aec64bc5aa950bd94610adc6953e493e81f66035a7be677973 -u 1d6c84974bebf4aec64bc5aa950bd94610adc6953e493e81f66035a7be677973 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/1d6c84974bebf4aec64bc5aa950bd94610adc6953e493e81f66035a7be677973/userdata -p /run/containers/storage/overlay-containers/1d6c84974bebf4aec64bc5aa950bd94610adc6953e493e81f66035a7be677973/userdata/pidfile -n ceph-67f67f44-54fc-54ea-8df0-10931b6ecdaf-crash-compute-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/1d6c84974bebf4aec64bc5aa950bd94610adc6953e493e81f66035a7be677973 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/1d6c84974bebf4aec64bc5aa950bd94610adc6953e493e81f66035a7be677973/userdata/oci-log --conmon-pidfile /run/ceph-67f67f44-54fc-54ea-8df0-10931b6ecdaf@crash.compute-0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 1d6c84974bebf4aec64bc5aa950bd94610adc6953e493e81f66035a7be677973
             │ ├─ceph-67f67f44-54fc-54ea-8df0-10931b6ecdaf@mds.cephfs.compute-0.izecis.service
             │ │ ├─libpod-payload-63e30a35b7b96951b132aa123d944f19fef8cbb23554d42058a7f839e76cf474
             │ │ │ ├─95394 /run/podman-init -- /usr/bin/ceph-mds -n mds.cephfs.compute-0.izecis -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ │ └─95396 /usr/bin/ceph-mds -n mds.cephfs.compute-0.izecis -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ └─runtime
             │ │   └─95392 /usr/bin/conmon --api-version 1 -c 63e30a35b7b96951b132aa123d944f19fef8cbb23554d42058a7f839e76cf474 -u 63e30a35b7b96951b132aa123d944f19fef8cbb23554d42058a7f839e76cf474 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/63e30a35b7b96951b132aa123d944f19fef8cbb23554d42058a7f839e76cf474/userdata -p /run/containers/storage/overlay-containers/63e30a35b7b96951b132aa123d944f19fef8cbb23554d42058a7f839e76cf474/userdata/pidfile -n ceph-67f67f44-54fc-54ea-8df0-10931b6ecdaf-mds-cephfs-compute-0-izecis --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/63e30a35b7b96951b132aa123d944f19fef8cbb23554d42058a7f839e76cf474 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/63e30a35b7b96951b132aa123d944f19fef8cbb23554d42058a7f839e76cf474/userdata/oci-log --conmon-pidfile /run/ceph-67f67f44-54fc-54ea-8df0-10931b6ecdaf@mds.cephfs.compute-0.izecis.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 63e30a35b7b96951b132aa123d944f19fef8cbb23554d42058a7f839e76cf474
             │ ├─ceph-67f67f44-54fc-54ea-8df0-10931b6ecdaf@mgr.compute-0.ysegzv.service
             │ │ ├─libpod-payload-f232def5bd3d41fdf0b35f628fe45f0e39d35b90e0d04b3d069f81dcb3d82662
             │ │ │ ├─75513 /run/podman-init -- /usr/bin/ceph-mgr -n mgr.compute-0.ysegzv -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ │ └─75515 /usr/bin/ceph-mgr -n mgr.compute-0.ysegzv -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ └─runtime
             │ │   └─75511 /usr/bin/conmon --api-version 1 -c f232def5bd3d41fdf0b35f628fe45f0e39d35b90e0d04b3d069f81dcb3d82662 -u f232def5bd3d41fdf0b35f628fe45f0e39d35b90e0d04b3d069f81dcb3d82662 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/f232def5bd3d41fdf0b35f628fe45f0e39d35b90e0d04b3d069f81dcb3d82662/userdata -p /run/containers/storage/overlay-containers/f232def5bd3d41fdf0b35f628fe45f0e39d35b90e0d04b3d069f81dcb3d82662/userdata/pidfile -n ceph-67f67f44-54fc-54ea-8df0-10931b6ecdaf-mgr-compute-0-ysegzv --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/f232def5bd3d41fdf0b35f628fe45f0e39d35b90e0d04b3d069f81dcb3d82662 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/f232def5bd3d41fdf0b35f628fe45f0e39d35b90e0d04b3d069f81dcb3d82662/userdata/oci-log --conmon-pidfile /run/ceph-67f67f44-54fc-54ea-8df0-10931b6ecdaf@mgr.compute-0.ysegzv.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg f232def5bd3d41fdf0b35f628fe45f0e39d35b90e0d04b3d069f81dcb3d82662
             │ ├─ceph-67f67f44-54fc-54ea-8df0-10931b6ecdaf@mon.compute-0.service
             │ │ ├─libpod-payload-9ce3cdfc68db4310535ef64a87efb40353dcdfbbac71cac592072bd903c643f6
             │ │ │ ├─75220 /run/podman-init -- /usr/bin/ceph-mon -n mon.compute-0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false
             │ │ │ └─75222 /usr/bin/ceph-mon -n mon.compute-0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false
             │ │ └─runtime
             │ │   └─75218 /usr/bin/conmon --api-version 1 -c 9ce3cdfc68db4310535ef64a87efb40353dcdfbbac71cac592072bd903c643f6 -u 9ce3cdfc68db4310535ef64a87efb40353dcdfbbac71cac592072bd903c643f6 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/9ce3cdfc68db4310535ef64a87efb40353dcdfbbac71cac592072bd903c643f6/userdata -p /run/containers/storage/overlay-containers/9ce3cdfc68db4310535ef64a87efb40353dcdfbbac71cac592072bd903c643f6/userdata/pidfile -n ceph-67f67f44-54fc-54ea-8df0-10931b6ecdaf-mon-compute-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/9ce3cdfc68db4310535ef64a87efb40353dcdfbbac71cac592072bd903c643f6 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/9ce3cdfc68db4310535ef64a87efb40353dcdfbbac71cac592072bd903c643f6/userdata/oci-log --conmon-pidfile /run/ceph-67f67f44-54fc-54ea-8df0-10931b6ecdaf@mon.compute-0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 9ce3cdfc68db4310535ef64a87efb40353dcdfbbac71cac592072bd903c643f6
             │ ├─ceph-67f67f44-54fc-54ea-8df0-10931b6ecdaf@osd.0.service
             │ │ ├─libpod-payload-012822ae8bedefb05d753efd429cb131456844e02bd9516e891592371e33cae1
             │ │ │ ├─86011 /run/podman-init -- /usr/bin/ceph-osd -n osd.0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --osd-objectstore=bluestore
             │ │ │ └─86013 /usr/bin/ceph-osd -n osd.0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --osd-objectstore=bluestore
             │ │ └─runtime
             │ │   └─86009 /usr/bin/conmon --api-version 1 -c 012822ae8bedefb05d753efd429cb131456844e02bd9516e891592371e33cae1 -u 012822ae8bedefb05d753efd429cb131456844e02bd9516e891592371e33cae1 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/012822ae8bedefb05d753efd429cb131456844e02bd9516e891592371e33cae1/userdata -p /run/containers/storage/overlay-containers/012822ae8bedefb05d753efd429cb131456844e02bd9516e891592371e33cae1/userdata/pidfile -n ceph-67f67f44-54fc-54ea-8df0-10931b6ecdaf-osd-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/012822ae8bedefb05d753efd429cb131456844e02bd9516e891592371e33cae1 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/012822ae8bedefb05d753efd429cb131456844e02bd9516e891592371e33cae1/userdata/oci-log --conmon-pidfile /run/ceph-67f67f44-54fc-54ea-8df0-10931b6ecdaf@osd.0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 012822ae8bedefb05d753efd429cb131456844e02bd9516e891592371e33cae1
             │ ├─ceph-67f67f44-54fc-54ea-8df0-10931b6ecdaf@osd.1.service
             │ │ ├─libpod-payload-187eb50611d2ddb1b41d9735d4d6534119cb949c4865c9c5b94e777c53253f5a
             │ │ │ ├─87053 /run/podman-init -- /usr/bin/ceph-osd -n osd.1 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --osd-objectstore=bluestore
             │ │ │ └─87055 /usr/bin/ceph-osd -n osd.1 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --osd-objectstore=bluestore
             │ │ └─runtime
             │ │   └─87051 /usr/bin/conmon --api-version 1 -c 187eb50611d2ddb1b41d9735d4d6534119cb949c4865c9c5b94e777c53253f5a -u 187eb50611d2ddb1b41d9735d4d6534119cb949c4865c9c5b94e777c53253f5a -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/187eb50611d2ddb1b41d9735d4d6534119cb949c4865c9c5b94e777c53253f5a/userdata -p /run/containers/storage/overlay-containers/187eb50611d2ddb1b41d9735d4d6534119cb949c4865c9c5b94e777c53253f5a/userdata/pidfile -n ceph-67f67f44-54fc-54ea-8df0-10931b6ecdaf-osd-1 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/187eb50611d2ddb1b41d9735d4d6534119cb949c4865c9c5b94e777c53253f5a --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/187eb50611d2ddb1b41d9735d4d6534119cb949c4865c9c5b94e777c53253f5a/userdata/oci-log --conmon-pidfile /run/ceph-67f67f44-54fc-54ea-8df0-10931b6ecdaf@osd.1.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 187eb50611d2ddb1b41d9735d4d6534119cb949c4865c9c5b94e777c53253f5a
             │ ├─ceph-67f67f44-54fc-54ea-8df0-10931b6ecdaf@osd.2.service
             │ │ ├─libpod-payload-2c04f740a8b253d8ecd8e18a6e47e557960887b7213c8498c38edd5db483ed78
             │ │ │ ├─88097 /run/podman-init -- /usr/bin/ceph-osd -n osd.2 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --osd-objectstore=bluestore
             │ │ │ └─88099 /usr/bin/ceph-osd -n osd.2 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --osd-objectstore=bluestore
             │ │ └─runtime
             │ │   └─88095 /usr/bin/conmon --api-version 1 -c 2c04f740a8b253d8ecd8e18a6e47e557960887b7213c8498c38edd5db483ed78 -u 2c04f740a8b253d8ecd8e18a6e47e557960887b7213c8498c38edd5db483ed78 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/2c04f740a8b253d8ecd8e18a6e47e557960887b7213c8498c38edd5db483ed78/userdata -p /run/containers/storage/overlay-containers/2c04f740a8b253d8ecd8e18a6e47e557960887b7213c8498c38edd5db483ed78/userdata/pidfile -n ceph-67f67f44-54fc-54ea-8df0-10931b6ecdaf-osd-2 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/2c04f740a8b253d8ecd8e18a6e47e557960887b7213c8498c38edd5db483ed78 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/2c04f740a8b253d8ecd8e18a6e47e557960887b7213c8498c38edd5db483ed78/userdata/oci-log --conmon-pidfile /run/ceph-67f67f44-54fc-54ea-8df0-10931b6ecdaf@osd.2.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 2c04f740a8b253d8ecd8e18a6e47e557960887b7213c8498c38edd5db483ed78
             │ └─ceph-67f67f44-54fc-54ea-8df0-10931b6ecdaf@rgw.rgw.compute-0.efuxpz.service
             │   ├─libpod-payload-7f00419a4644aef9dce1fa0301835835d4bb386233bc723500dd8ee22076ad4d
             │   │ ├─94931 /run/podman-init -- /usr/bin/radosgw -n client.rgw.rgw.compute-0.efuxpz -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --stop-timeout=120
             │   │ └─94933 /usr/bin/radosgw -n client.rgw.rgw.compute-0.efuxpz -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --stop-timeout=120
             │   └─runtime
             │     └─94929 /usr/bin/conmon --api-version 1 -c 7f00419a4644aef9dce1fa0301835835d4bb386233bc723500dd8ee22076ad4d -u 7f00419a4644aef9dce1fa0301835835d4bb386233bc723500dd8ee22076ad4d -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/7f00419a4644aef9dce1fa0301835835d4bb386233bc723500dd8ee22076ad4d/userdata -p /run/containers/storage/overlay-containers/7f00419a4644aef9dce1fa0301835835d4bb386233bc723500dd8ee22076ad4d/userdata/pidfile -n ceph-67f67f44-54fc-54ea-8df0-10931b6ecdaf-rgw-rgw-compute-0-efuxpz --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/7f00419a4644aef9dce1fa0301835835d4bb386233bc723500dd8ee22076ad4d --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/7f00419a4644aef9dce1fa0301835835d4bb386233bc723500dd8ee22076ad4d/userdata/oci-log --conmon-pidfile /run/ceph-67f67f44-54fc-54ea-8df0-10931b6ecdaf@rgw.rgw.compute-0.efuxpz.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 7f00419a4644aef9dce1fa0301835835d4bb386233bc723500dd8ee22076ad4d
             ├─system-getty.slice
             │ └─getty@tty1.service
             │   └─1009 /sbin/agetty -o "-p -- \\u" --noclear - linux
             ├─system-serial\x2dgetty.slice
             │ └─serial-getty@ttyS0.service
             │   └─1010 /sbin/agetty -o "-p -- \\u" --keep-baud 115200,57600,38400,9600 - vt220
             ├─systemd-hostnamed.service
             │ └─265996 /usr/lib/systemd/systemd-hostnamed
             ├─systemd-journald.service
             │ └─675 /usr/lib/systemd/systemd-journald
             ├─systemd-logind.service
             │ └─786 /usr/lib/systemd/systemd-logind
             ├─systemd-machined.service
             │ └─206383 /usr/lib/systemd/systemd-machined
             ├─systemd-udevd.service
             │ └─udev
             │   └─728 /usr/lib/systemd/systemd-udevd
             ├─tuned.service
             │ └─105736 /usr/bin/python3 -Es /usr/sbin/tuned -l -P
             ├─virtlogd.service
             │ └─205752 /usr/sbin/virtlogd
             ├─virtnodedevd.service
             │ └─244229 /usr/sbin/virtnodedevd --timeout 120
             └─virtqemud.service
               └─243015 /usr/sbin/virtqemud --timeout 120

Dec 09 16:44:22 compute-0 nova_compute[243452]: 2025-12-09 16:44:22.047 243461 DEBUG oslo_service.periodic_task [None req-8ef5e864-f7dd-4e45-95e0-4532f2566626 - - - - - -] Running periodic task ComputeManager._sync_scheduler_instance_info run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210[00m
Dec 09 16:44:26 compute-0 rsyslogd[1004]: imjournal: journal files changed, reloading...  [v8.2510.0-2.el9 try https://www.rsyslog.com/e/0 ]
Dec 09 16:44:27 compute-0 podman[264741]: 2025-12-09 16:44:27.631483359 +0000 UTC m=+0.066612270 container health_status fbe906a4b60641b0d2bc026295092273d9a8b9783389304fc11b10bffae9c692 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn@sha256:85729a662800e6b42ceb088545fed39a2ac58704b4a37fd540cdef3ebf9e59a2, name=ovn_metadata_agent, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.vendor=CentOS, config_id=ovn_metadata_agent, managed_by=edpm_ansible, org.label-schema.build-date=20251202, org.label-schema.license=GPLv2, tcib_managed=true, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, container_name=ovn_metadata_agent, tcib_build_tag=c3923531bcda0b0811b2d5053f189beb, config_data={'cgroupns': 'host', 'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'EDPM_CONFIG_HASH': '0823bd3e096c75f72e4a95820d41b0d4b6a1172bd2892ddb9f29b788a11bc87d'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_metadata_agent', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn@sha256:85729a662800e6b42ceb088545fed39a2ac58704b4a37fd540cdef3ebf9e59a2', 'net': 'host', 'pid': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/run/openvswitch:/run/openvswitch:z', '/var/lib/config-data/ansible-generated/neutron-ovn-metadata-agent:/etc/neutron.conf.d:z', '/run/netns:/run/netns:shared', '/var/lib/kolla/config_files/ovn_metadata_agent.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/neutron:/var/lib/neutron:shared,z', '/var/lib/neutron/ovn_metadata_haproxy_wrapper:/usr/local/bin/haproxy:ro', '/var/lib/neutron/kill_scripts:/etc/neutron/kill_scripts:ro', '/var/lib/openstack/cacerts/neutron-metadata/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_metadata_agent:/openstack:ro,z']}, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team)
Dec 09 16:44:27 compute-0 podman[264740]: 2025-12-09 16:44:27.665854033 +0000 UTC m=+0.110580826 container health_status 0a76737b5f2b25872fe2b565002617e1a84450afb10d979302e98619d30d6470 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller@sha256:fa24ce4aa285e3632c86a53e8d0385d4c788d049da42dd06570ad9d44aae00de, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.name=CentOS Stream 9 Base Image, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller@sha256:fa24ce4aa285e3632c86a53e8d0385d4c788d049da42dd06570ad9d44aae00de', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, managed_by=edpm_ansible, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, tcib_build_tag=c3923531bcda0b0811b2d5053f189beb, container_name=ovn_controller, org.label-schema.build-date=20251202, tcib_managed=true, config_id=ovn_controller, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team)
Dec 09 16:44:31 compute-0 rsyslogd[1004]: imjournal from <np0005552052:ceph-osd>: begin to drop messages due to rate-limiting
Dec 09 16:44:35 compute-0 rsyslogd[1004]: imjournal: journal files changed, reloading...  [v8.2510.0-2.el9 try https://www.rsyslog.com/e/0 ]
Dec 09 16:44:35 compute-0 rsyslogd[1004]: imjournal: journal files changed, reloading...  [v8.2510.0-2.el9 try https://www.rsyslog.com/e/0 ]
Dec 09 16:44:37 compute-0 nova_compute[243452]: 2025-12-09 16:44:37.308 243461 WARNING oslo.service.loopingcall [-] Function 'nova.servicegroup.drivers.db.DbDriver._report_state' run outlasted interval by 5.41 sec[00m
Dec 09 16:44:41 compute-0 podman[266462]: 2025-12-09 16:44:41.76384783 +0000 UTC m=+0.081322047 container health_status 84d676632bb4f080a989766472a2cc2fcf267803021f3bcdf51f0d8bfc9055e2 (image=quay.io/podified-antelope-centos9/openstack-multipathd@sha256:df38dbd6b3eccec2abaa8e3618a385405ccec1b73ae8c3573a138b0c961ed31f, name=multipathd, health_status=healthy, health_failing_streak=0, health_log=, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, tcib_build_tag=c3923531bcda0b0811b2d5053f189beb, config_data={'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/multipathd', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-multipathd@sha256:df38dbd6b3eccec2abaa8e3618a385405ccec1b73ae8c3573a138b0c961ed31f', 'net': 'host', 'privileged': True, 'restart': 'always', 'volumes': ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/var/lib/kolla/config_files/multipathd.json:/var/lib/kolla/config_files/config.json:ro', '/dev:/dev', '/run/udev:/run/udev', '/sys:/sys', '/lib/modules:/lib/modules:ro', '/etc/iscsi:/etc/iscsi:ro', '/var/lib/iscsi:/var/lib/iscsi', '/etc/multipath:/etc/multipath:z', '/etc/multipath.conf:/etc/multipath.conf:ro', '/var/lib/openstack/healthchecks/multipathd:/openstack:ro,z']}, managed_by=edpm_ansible, tcib_managed=true, org.label-schema.build-date=20251202, config_id=multipathd, container_name=multipathd, org.label-schema.name=CentOS Stream 9 Base Image)
Dec 09 16:44:56 compute-0 virtqemud[243015]: Failed to connect socket to '/var/run/libvirt/virtstoraged-sock-ro': No such file or directory

● user-1000.slice - User Slice of UID 1000
     Loaded: loaded
    Drop-In: /usr/lib/systemd/system/user-.slice.d
             └─10-defaults.conf
     Active: active since Tue 2025-12-09 15:27:58 UTC; 1h 16min ago
      Until: Tue 2025-12-09 15:27:58 UTC; 1h 16min ago
       Docs: man:user@.service(5)
         IO: 600.7M read, 8.6G written
      Tasks: 37 (limit: 20059)
     Memory: 3.4G (peak: 4.1G)
        CPU: 22min 18.996s
     CGroup: /user.slice/user-1000.slice
             ├─session-1.scope
             │ └─4551 /usr/bin/python3
             ├─session-56.scope
             │ ├─262891 "sshd-session: zuul [priv]"
             │ ├─262913 "sshd-session: zuul@notty"
             │ ├─262914 sudo bash -c "rm -rf /var/tmp/sos-osp && mkdir /var/tmp/sos-osp && sos report --batch --all-logs --tmp-dir=/var/tmp/sos-osp  -p container,openstack_edpm,system,storage,virt"
             │ ├─262938 /usr/bin/python3 -s /sbin/sos report --batch --all-logs --tmp-dir=/var/tmp/sos-osp -p container,openstack_edpm,system,storage,virt
             │ ├─268564 timeout 15s turbostat --debug sleep 10
             │ ├─269108 timeout 300s systemctl status --all
             │ ├─269111 systemctl status --all
             │ ├─269431 timeout 300s ceph osd dump --format json-pretty
             │ ├─269433 /usr/bin/python3 -s /usr/bin/ceph osd dump --format json-pretty
             │ ├─269511 timeout --foreground 300s virsh -r nodedev-dumpxml computer
             │ └─269512 virsh -r nodedev-dumpxml computer
             └─user@1000.service
               ├─app.slice
               │ └─dbus-broker.service
               │   ├─12153 /usr/bin/dbus-broker-launch --scope user
               │   └─12182 dbus-broker --log 4 --controller 9 --machine-id 4d4ef2323cc3337bbfd9081b2a323b4e --max-bytes 100000000000000 --max-fds 25000000000000 --max-matches 5000000000
               ├─init.scope
               │ ├─4339 /usr/lib/systemd/systemd --user
               │ └─4342 "(sd-pam)"
               └─user.slice
                 └─podman-pause-305cba0a.scope
                   └─12094 catatonit -P

Dec 09 16:37:35 compute-0 sshd-session[256572]: pam_unix(sshd:session): session closed for user zuul
Dec 09 16:37:36 compute-0 sshd-session[257047]: Connection closed by 192.168.122.30 port 44970
Dec 09 16:37:36 compute-0 sshd-session[257044]: pam_unix(sshd:session): session closed for user zuul
Dec 09 16:44:09 compute-0 sudo[262914]:     zuul : PWD=/home/zuul ; USER=root ; COMMAND=/bin/bash -c 'rm -rf /var/tmp/sos-osp && mkdir /var/tmp/sos-osp && sos report --batch --all-logs --tmp-dir=/var/tmp/sos-osp  -p container,openstack_edpm,system,storage,virt'
Dec 09 16:44:09 compute-0 sudo[262914]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Dec 09 16:44:18 compute-0 ovs-vsctl[263261]: ovs|00001|db_ctl_base|ERR|no key "dpdk-init" in Open_vSwitch record "." column other_config
Dec 09 16:44:29 compute-0 crontab[264994]: (root) LIST (root)
Dec 09 16:44:46 compute-0 ovs-appctl[267640]: ovs|00001|daemon_unix|WARN|/var/run/openvswitch/ovs-monitor-ipsec.pid: open: No such file or directory
Dec 09 16:44:46 compute-0 ovs-appctl[267650]: ovs|00001|daemon_unix|WARN|/var/run/openvswitch/ovs-monitor-ipsec.pid: open: No such file or directory
Dec 09 16:44:46 compute-0 ovs-appctl[267654]: ovs|00001|daemon_unix|WARN|/var/run/openvswitch/ovs-monitor-ipsec.pid: open: No such file or directory

● user-42477.slice - User Slice of UID 42477
     Loaded: loaded
    Drop-In: /usr/lib/systemd/system/user-.slice.d
             └─10-defaults.conf
     Active: active since Tue 2025-12-09 16:03:32 UTC; 41min ago
      Until: Tue 2025-12-09 16:03:32 UTC; 41min ago
       Docs: man:user@.service(5)
         IO: 2.7M read, 136.9M written
      Tasks: 28 (limit: 20059)
     Memory: 50.4M (peak: 80.4M)
        CPU: 2min 59.250s
     CGroup: /user.slice/user-42477.slice
             ├─session-21.scope
             │ ├─76647 "sshd-session: ceph-admin [priv]"
             │ └─76670 "sshd-session: ceph-admin"
             ├─session-23.scope
             │ ├─76665 "sshd-session: ceph-admin [priv]"
             │ └─76671 "sshd-session: ceph-admin@notty"
             ├─session-24.scope
             │ ├─76697 "sshd-session: ceph-admin [priv]"
             │ └─76700 "sshd-session: ceph-admin@notty"
             ├─session-25.scope
             │ ├─76726 "sshd-session: ceph-admin [priv]"
             │ └─76729 "sshd-session: ceph-admin@notty"
             ├─session-26.scope
             │ ├─76755 "sshd-session: ceph-admin [priv]"
             │ └─76758 "sshd-session: ceph-admin@notty"
             ├─session-27.scope
             │ ├─76784 "sshd-session: ceph-admin [priv]"
             │ └─76787 "sshd-session: ceph-admin@notty"
             ├─session-28.scope
             │ ├─76813 "sshd-session: ceph-admin [priv]"
             │ └─76816 "sshd-session: ceph-admin@notty"
             ├─session-29.scope
             │ ├─76842 "sshd-session: ceph-admin [priv]"
             │ └─76845 "sshd-session: ceph-admin@notty"
             ├─session-30.scope
             │ ├─76871 "sshd-session: ceph-admin [priv]"
             │ └─76874 "sshd-session: ceph-admin@notty"
             ├─session-31.scope
             │ ├─76900 "sshd-session: ceph-admin [priv]"
             │ └─76903 "sshd-session: ceph-admin@notty"
             ├─session-32.scope
             │ ├─76927 "sshd-session: ceph-admin [priv]"
             │ └─76930 "sshd-session: ceph-admin@notty"
             ├─session-33.scope
             │ ├─ 76956 "sshd-session: ceph-admin [priv]"
             │ ├─ 76959 "sshd-session: ceph-admin@notty"
             │ ├─269057 sudo /bin/python3 /var/lib/ceph/67f67f44-54fc-54ea-8df0-10931b6ecdaf/cephadm.ed5a13ad26f7f55dd30e9b63855e4e581fd86973bec1d21a12ed0bb26af19c8b --env CEPH_VOLUME_OSDSPEC_AFFINITY=default_drive_group --image quay.io/ceph/ceph@sha256:1228c3d05e45fbc068a8c33614e4409b6dac688bcc77369b06009b5830fa8d86 --timeout 895 ceph-volume --fsid 67f67f44-54fc-54ea-8df0-10931b6ecdaf --config-json - -- lvm batch --no-auto /dev/ceph_vg0/ceph_lv0 /dev/ceph_vg1/ceph_lv1 /dev/ceph_vg2/ceph_lv2 --objectstore bluestore --yes --no-systemd
             │ └─269082 /bin/python3 /var/lib/ceph/67f67f44-54fc-54ea-8df0-10931b6ecdaf/cephadm.ed5a13ad26f7f55dd30e9b63855e4e581fd86973bec1d21a12ed0bb26af19c8b --env CEPH_VOLUME_OSDSPEC_AFFINITY=default_drive_group --image quay.io/ceph/ceph@sha256:1228c3d05e45fbc068a8c33614e4409b6dac688bcc77369b06009b5830fa8d86 --timeout 895 ceph-volume --fsid 67f67f44-54fc-54ea-8df0-10931b6ecdaf --config-json - -- lvm batch --no-auto /dev/ceph_vg0/ceph_lv0 /dev/ceph_vg1/ceph_lv1 /dev/ceph_vg2/ceph_lv2 --objectstore bluestore --yes --no-systemd
             └─user@42477.service
               └─init.scope
                 ├─76651 /usr/lib/systemd/systemd --user
                 └─76654 "(sd-pam)"

Dec 09 16:44:55 compute-0 podman[269121]: 2025-12-09 16:44:55.494698428 +0000 UTC m=+0.134465193 container attach 6de51581d8e77a1bc4dbf7916fdfe9005c1866ceba3ee9b9582f2907611c6fda (image=quay.io/ceph/ceph@sha256:1228c3d05e45fbc068a8c33614e4409b6dac688bcc77369b06009b5830fa8d86, name=inspiring_pare, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.41.3, CEPH_SHA1=69f84cc2651aa259a15bc192ddaabd3baba07489, org.label-schema.license=GPLv2, org.label-schema.build-date=20251030, CEPH_REF=tentacle, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS)
Dec 09 16:44:55 compute-0 podman[269121]: 2025-12-09 16:44:55.496633073 +0000 UTC m=+0.136399818 container died 6de51581d8e77a1bc4dbf7916fdfe9005c1866ceba3ee9b9582f2907611c6fda (image=quay.io/ceph/ceph@sha256:1228c3d05e45fbc068a8c33614e4409b6dac688bcc77369b06009b5830fa8d86, name=inspiring_pare, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20251030, org.label-schema.vendor=CentOS, io.buildah.version=1.41.3, CEPH_REF=tentacle, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=69f84cc2651aa259a15bc192ddaabd3baba07489, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Dec 09 16:44:55 compute-0 podman[269121]: 2025-12-09 16:44:55.535835024 +0000 UTC m=+0.175601769 container remove 6de51581d8e77a1bc4dbf7916fdfe9005c1866ceba3ee9b9582f2907611c6fda (image=quay.io/ceph/ceph@sha256:1228c3d05e45fbc068a8c33614e4409b6dac688bcc77369b06009b5830fa8d86, name=inspiring_pare, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20251030, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, CEPH_SHA1=69f84cc2651aa259a15bc192ddaabd3baba07489, OSD_FLAVOR=default, io.buildah.version=1.41.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0, CEPH_REF=tentacle, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True)
Dec 09 16:44:55 compute-0 podman[269391]: 2025-12-09 16:44:55.694358369 +0000 UTC m=+0.040353085 container create 4323287b9d9f02a12363c9f5cc1c8f6d0d210317f09eb056106c7d354196560f (image=quay.io/ceph/ceph@sha256:1228c3d05e45fbc068a8c33614e4409b6dac688bcc77369b06009b5830fa8d86, name=funny_jepsen, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, ceph=True, io.buildah.version=1.41.3, CEPH_REF=tentacle, CEPH_SHA1=69f84cc2651aa259a15bc192ddaabd3baba07489, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, org.label-schema.build-date=20251030)
Dec 09 16:44:55 compute-0 podman[269391]: 2025-12-09 16:44:55.677284905 +0000 UTC m=+0.023279641 image pull 524f3da276461682bec27427fb8a63b5139c40ad4185939aede197474a6817b3 quay.io/ceph/ceph@sha256:1228c3d05e45fbc068a8c33614e4409b6dac688bcc77369b06009b5830fa8d86
Dec 09 16:44:55 compute-0 podman[269391]: 2025-12-09 16:44:55.782562159 +0000 UTC m=+0.128556925 container init 4323287b9d9f02a12363c9f5cc1c8f6d0d210317f09eb056106c7d354196560f (image=quay.io/ceph/ceph@sha256:1228c3d05e45fbc068a8c33614e4409b6dac688bcc77369b06009b5830fa8d86, name=funny_jepsen, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, io.buildah.version=1.41.3, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=tentacle, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, CEPH_SHA1=69f84cc2651aa259a15bc192ddaabd3baba07489, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2, org.label-schema.build-date=20251030, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/)
Dec 09 16:44:55 compute-0 podman[269391]: 2025-12-09 16:44:55.792402088 +0000 UTC m=+0.138396824 container start 4323287b9d9f02a12363c9f5cc1c8f6d0d210317f09eb056106c7d354196560f (image=quay.io/ceph/ceph@sha256:1228c3d05e45fbc068a8c33614e4409b6dac688bcc77369b06009b5830fa8d86, name=funny_jepsen, ceph=True, org.label-schema.build-date=20251030, io.buildah.version=1.41.3, CEPH_REF=tentacle, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, CEPH_SHA1=69f84cc2651aa259a15bc192ddaabd3baba07489, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Dec 09 16:44:55 compute-0 podman[269391]: 2025-12-09 16:44:55.795948179 +0000 UTC m=+0.141942915 container attach 4323287b9d9f02a12363c9f5cc1c8f6d0d210317f09eb056106c7d354196560f (image=quay.io/ceph/ceph@sha256:1228c3d05e45fbc068a8c33614e4409b6dac688bcc77369b06009b5830fa8d86, name=funny_jepsen, org.label-schema.build-date=20251030, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=tentacle, org.label-schema.schema-version=1.0, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.41.3, CEPH_SHA1=69f84cc2651aa259a15bc192ddaabd3baba07489, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default)
Dec 09 16:44:56 compute-0 podman[269391]: 2025-12-09 16:44:56.294547085 +0000 UTC m=+0.640541801 container died 4323287b9d9f02a12363c9f5cc1c8f6d0d210317f09eb056106c7d354196560f (image=quay.io/ceph/ceph@sha256:1228c3d05e45fbc068a8c33614e4409b6dac688bcc77369b06009b5830fa8d86, name=funny_jepsen, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.vendor=CentOS, io.buildah.version=1.41.3, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, org.label-schema.build-date=20251030, ceph=True, CEPH_SHA1=69f84cc2651aa259a15bc192ddaabd3baba07489, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=tentacle)
Dec 09 16:44:56 compute-0 podman[269391]: 2025-12-09 16:44:56.354778722 +0000 UTC m=+0.700773438 container remove 4323287b9d9f02a12363c9f5cc1c8f6d0d210317f09eb056106c7d354196560f (image=quay.io/ceph/ceph@sha256:1228c3d05e45fbc068a8c33614e4409b6dac688bcc77369b06009b5830fa8d86, name=funny_jepsen, CEPH_REF=tentacle, ceph=True, org.label-schema.schema-version=1.0, org.label-schema.build-date=20251030, CEPH_SHA1=69f84cc2651aa259a15bc192ddaabd3baba07489, io.buildah.version=1.41.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)

● user.slice - User and Session Slice
     Loaded: loaded (/usr/lib/systemd/system/user.slice; static)
     Active: active since Tue 2025-12-09 14:51:36 UTC; 1h 53min ago
      Until: Tue 2025-12-09 14:51:36 UTC; 1h 53min ago
       Docs: man:systemd.special(7)
         IO: 603.5M read, 8.7G written
      Tasks: 65
     Memory: 3.4G (peak: 4.2G)
        CPU: 25min 18.858s
     CGroup: /user.slice
             ├─user-1000.slice
             │ ├─session-1.scope
             │ │ └─4551 /usr/bin/python3
             │ ├─session-56.scope
             │ │ ├─262891 "sshd-session: zuul [priv]"
             │ │ ├─262913 "sshd-session: zuul@notty"
             │ │ ├─262914 sudo bash -c "rm -rf /var/tmp/sos-osp && mkdir /var/tmp/sos-osp && sos report --batch --all-logs --tmp-dir=/var/tmp/sos-osp  -p container,openstack_edpm,system,storage,virt"
             │ │ ├─262938 /usr/bin/python3 -s /sbin/sos report --batch --all-logs --tmp-dir=/var/tmp/sos-osp -p container,openstack_edpm,system,storage,virt
             │ │ ├─268564 timeout 15s turbostat --debug sleep 10
             │ │ ├─269108 timeout 300s systemctl status --all
             │ │ ├─269111 systemctl status --all
             │ │ ├─269431 timeout 300s ceph osd dump --format json-pretty
             │ │ ├─269433 /usr/bin/python3 -s /usr/bin/ceph osd dump --format json-pretty
             │ │ ├─269511 timeout --foreground 300s virsh -r nodedev-dumpxml computer
             │ │ └─269512 virsh -r nodedev-dumpxml computer
             │ └─user@1000.service
             │   ├─app.slice
             │   │ └─dbus-broker.service
             │   │   ├─12153 /usr/bin/dbus-broker-launch --scope user
             │   │   └─12182 dbus-broker --log 4 --controller 9 --machine-id 4d4ef2323cc3337bbfd9081b2a323b4e --max-bytes 100000000000000 --max-fds 25000000000000 --max-matches 5000000000
             │   ├─init.scope
             │   │ ├─4339 /usr/lib/systemd/systemd --user
             │   │ └─4342 "(sd-pam)"
             │   └─user.slice
             │     └─podman-pause-305cba0a.scope
             │       └─12094 catatonit -P
             └─user-42477.slice
               ├─session-21.scope
               │ ├─76647 "sshd-session: ceph-admin [priv]"
               │ └─76670 "sshd-session: ceph-admin"
               ├─session-23.scope
               │ ├─76665 "sshd-session: ceph-admin [priv]"
               │ └─76671 "sshd-session: ceph-admin@notty"
               ├─session-24.scope
               │ ├─76697 "sshd-session: ceph-admin [priv]"
               │ └─76700 "sshd-session: ceph-admin@notty"
               ├─session-25.scope
               │ ├─76726 "sshd-session: ceph-admin [priv]"
               │ └─76729 "sshd-session: ceph-admin@notty"
               ├─session-26.scope
               │ ├─76755 "sshd-session: ceph-admin [priv]"
               │ └─76758 "sshd-session: ceph-admin@notty"
               ├─session-27.scope
               │ ├─76784 "sshd-session: ceph-admin [priv]"
               │ └─76787 "sshd-session: ceph-admin@notty"
               ├─session-28.scope
               │ ├─76813 "sshd-session: ceph-admin [priv]"
               │ └─76816 "sshd-session: ceph-admin@notty"
               ├─session-29.scope
               │ ├─76842 "sshd-session: ceph-admin [priv]"
               │ └─76845 "sshd-session: ceph-admin@notty"
               ├─session-30.scope
               │ ├─76871 "sshd-session: ceph-admin [priv]"
               │ └─76874 "sshd-session: ceph-admin@notty"
               ├─session-31.scope
               │ ├─76900 "sshd-session: ceph-admin [priv]"
               │ └─76903 "sshd-session: ceph-admin@notty"
               ├─session-32.scope
               │ ├─76927 "sshd-session: ceph-admin [priv]"
               │ └─76930 "sshd-session: ceph-admin@notty"
               ├─session-33.scope
               │ ├─ 76956 "sshd-session: ceph-admin [priv]"
               │ ├─ 76959 "sshd-session: ceph-admin@notty"
               │ ├─269057 sudo /bin/python3 /var/lib/ceph/67f67f44-54fc-54ea-8df0-10931b6ecdaf/cephadm.ed5a13ad26f7f55dd30e9b63855e4e581fd86973bec1d21a12ed0bb26af19c8b --env CEPH_VOLUME_OSDSPEC_AFFINITY=default_drive_group --image quay.io/ceph/ceph@sha256:1228c3d05e45fbc068a8c33614e4409b6dac688bcc77369b06009b5830fa8d86 --timeout 895 ceph-volume --fsid 67f67f44-54fc-54ea-8df0-10931b6ecdaf --config-json - -- lvm batch --no-auto /dev/ceph_vg0/ceph_lv0 /dev/ceph_vg1/ceph_lv1 /dev/ceph_vg2/ceph_lv2 --objectstore bluestore --yes --no-systemd
               │ └─269082 "[python3]"
               └─user@42477.service
                 └─init.scope
                   ├─76651 /usr/lib/systemd/systemd --user
                   └─76654 "(sd-pam)"

● dbus.socket - D-Bus System Message Bus Socket
     Loaded: loaded (/usr/lib/systemd/system/dbus.socket; enabled; preset: enabled)
     Active: active (running) since Tue 2025-12-09 14:51:37 UTC; 1h 53min ago
      Until: Tue 2025-12-09 14:51:37 UTC; 1h 53min ago
   Triggers: ● dbus-broker.service
     Listen: /run/dbus/system_bus_socket (Stream)
     CGroup: /system.slice/dbus.socket

Dec 09 14:51:37 localhost systemd[1]: Listening on D-Bus System Message Bus Socket.

● dm-event.socket - Device-mapper event daemon FIFOs
     Loaded: loaded (/usr/lib/systemd/system/dm-event.socket; enabled; preset: enabled)
     Active: active (listening) since Tue 2025-12-09 15:55:53 UTC; 49min ago
      Until: Tue 2025-12-09 15:55:53 UTC; 49min ago
   Triggers: ● dm-event.service
       Docs: man:dmeventd(8)
     Listen: /run/dmeventd-server (FIFO)
             /run/dmeventd-client (FIFO)
     CGroup: /system.slice/dm-event.socket

Dec 09 15:55:53 compute-0 systemd[1]: Listening on Device-mapper event daemon FIFOs.

● iscsid.socket - Open-iSCSI iscsid Socket
     Loaded: loaded (/usr/lib/systemd/system/iscsid.socket; enabled; preset: enabled)
     Active: active (running) since Tue 2025-12-09 16:19:09 UTC; 25min ago
      Until: Tue 2025-12-09 16:19:09 UTC; 25min ago
   Triggers: ● iscsid.service
       Docs: man:iscsid(8)
             man:iscsiadm(8)
     Listen: @ISCSIADM_ABSTRACT_NAMESPACE (Stream)
     CGroup: /system.slice/iscsid.socket

Dec 09 16:19:09 compute-0 systemd[1]: Listening on Open-iSCSI iscsid Socket.

○ iscsiuio.socket - Open-iSCSI iscsiuio Socket
     Loaded: loaded (/usr/lib/systemd/system/iscsiuio.socket; enabled; preset: enabled)
     Active: inactive (dead)
   Triggers: ● iscsiuio.service
       Docs: man:iscsiuio(8)
     Listen: @ISCSID_UIP_ABSTRACT_NAMESPACE (Stream)

○ libvirtd-admin.socket - libvirt legacy monolithic daemon admin socket
     Loaded: loaded (/usr/lib/systemd/system/libvirtd-admin.socket; disabled; preset: disabled)
     Active: inactive (dead)
   Triggers: ● libvirtd.service
     Listen: /run/libvirt/libvirt-admin-sock (Stream)

○ libvirtd-ro.socket - libvirt legacy monolithic daemon read-only socket
     Loaded: loaded (/usr/lib/systemd/system/libvirtd-ro.socket; disabled; preset: disabled)
     Active: inactive (dead)
   Triggers: ● libvirtd.service
     Listen: /run/libvirt/libvirt-sock-ro (Stream)

○ libvirtd-tls.socket
     Loaded: masked (Reason: Unit libvirtd-tls.socket is masked.)
     Active: inactive (dead)

○ libvirtd.socket - libvirt legacy monolithic daemon socket
     Loaded: loaded (/usr/lib/systemd/system/libvirtd.socket; disabled; preset: disabled)
     Active: inactive (dead)
   Triggers: ● libvirtd.service
     Listen: /run/libvirt/libvirt-sock (Stream)

● lvm2-lvmpolld.socket - LVM2 poll daemon socket
     Loaded: loaded (/usr/lib/systemd/system/lvm2-lvmpolld.socket; enabled; preset: enabled)
     Active: active (listening) since Tue 2025-12-09 15:55:53 UTC; 49min ago
      Until: Tue 2025-12-09 15:55:53 UTC; 49min ago
   Triggers: ● lvm2-lvmpolld.service
       Docs: man:lvmpolld(8)
     Listen: /run/lvm/lvmpolld.socket (Stream)
     CGroup: /system.slice/lvm2-lvmpolld.socket

Dec 09 15:55:53 compute-0 systemd[1]: Listening on LVM2 poll daemon socket.

● podman.socket - Podman API Socket
     Loaded: loaded (/etc/systemd/system/podman.socket; enabled; preset: disabled)
     Active: active (listening) since Tue 2025-12-09 16:37:08 UTC; 7min ago
      Until: Tue 2025-12-09 16:37:08 UTC; 7min ago
   Triggers: ● podman.service
       Docs: man:podman-system-service(1)
     Listen: /run/podman/podman.sock (Stream)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48628)
     Memory: 4.0K (peak: 512.0K)
        CPU: 4ms
     CGroup: /system.slice/podman.socket

Dec 09 16:37:08 compute-0 systemd[1]: Starting Podman API Socket...
Dec 09 16:37:08 compute-0 systemd[1]: Listening on Podman API Socket.

● rpcbind.socket - RPCbind Server Activation Socket
     Loaded: loaded (/usr/lib/systemd/system/rpcbind.socket; enabled; preset: enabled)
     Active: active (running) since Tue 2025-12-09 14:51:36 UTC; 1h 53min ago
      Until: Tue 2025-12-09 14:51:36 UTC; 1h 53min ago
   Triggers: ● rpcbind.service
     Listen: /run/rpcbind.sock (Stream)
             0.0.0.0:111 (Stream)
             0.0.0.0:111 (Datagram)
             [::]:111 (Stream)
             [::]:111 (Datagram)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48628)
     Memory: 16.0K (peak: 288.0K)
        CPU: 4ms
     CGroup: /system.slice/rpcbind.socket

● sssd-kcm.socket - SSSD Kerberos Cache Manager responder socket
     Loaded: loaded (/usr/lib/systemd/system/sssd-kcm.socket; enabled; preset: enabled)
     Active: active (listening) since Tue 2025-12-09 14:51:37 UTC; 1h 53min ago
      Until: Tue 2025-12-09 14:51:37 UTC; 1h 53min ago
   Triggers: ● sssd-kcm.service
       Docs: man:sssd-kcm(8)
     Listen: /run/.heim_org.h5l.kcm-socket (Stream)
     CGroup: /system.slice/sssd-kcm.socket

Dec 09 14:51:37 localhost systemd[1]: Listening on SSSD Kerberos Cache Manager responder socket.

○ syslog.socket - Syslog Socket
     Loaded: loaded (/usr/lib/systemd/system/syslog.socket; static)
     Active: inactive (dead)
   Triggers: ● syslog.service
       Docs: man:systemd.special(7)
             https://www.freedesktop.org/wiki/Software/systemd/syslog
     Listen: /run/systemd/journal/syslog (Datagram)

● systemd-coredump.socket - Process Core Dump Socket
     Loaded: loaded (/usr/lib/systemd/system/systemd-coredump.socket; static)
     Active: active (listening) since Tue 2025-12-09 14:51:36 UTC; 1h 53min ago
      Until: Tue 2025-12-09 14:51:36 UTC; 1h 53min ago
       Docs: man:systemd-coredump(8)
     Listen: /run/systemd/coredump (SequentialPacket)
   Accepted: 0; Connected: 0;
     CGroup: /system.slice/systemd-coredump.socket

● systemd-initctl.socket - initctl Compatibility Named Pipe
     Loaded: loaded (/usr/lib/systemd/system/systemd-initctl.socket; static)
     Active: active (listening) since Tue 2025-12-09 14:51:36 UTC; 1h 53min ago
      Until: Tue 2025-12-09 14:51:36 UTC; 1h 53min ago
   Triggers: ● systemd-initctl.service
       Docs: man:systemd-initctl.socket(8)
     Listen: /run/initctl (FIFO)
     CGroup: /system.slice/systemd-initctl.socket

● systemd-journald-dev-log.socket - Journal Socket (/dev/log)
     Loaded: loaded (/usr/lib/systemd/system/systemd-journald-dev-log.socket; static)
     Active: active (running) since Tue 2025-12-09 14:51:34 UTC; 1h 53min ago
      Until: Tue 2025-12-09 14:51:34 UTC; 1h 53min ago
   Triggers: ● systemd-journald.service
       Docs: man:systemd-journald.service(8)
             man:journald.conf(5)
     Listen: /run/systemd/journal/dev-log (Datagram)
     CGroup: /system.slice/systemd-journald-dev-log.socket

Notice: journal has been rotated since unit was started, output may be incomplete.

● systemd-journald.socket - Journal Socket
     Loaded: loaded (/usr/lib/systemd/system/systemd-journald.socket; static)
     Active: active (running) since Tue 2025-12-09 14:51:34 UTC; 1h 53min ago
      Until: Tue 2025-12-09 14:51:34 UTC; 1h 53min ago
   Triggers: ● systemd-journald.service
       Docs: man:systemd-journald.service(8)
             man:journald.conf(5)
     Listen: /run/systemd/journal/socket (Datagram)
             /run/systemd/journal/stdout (Stream)
     CGroup: /system.slice/systemd-journald.socket

Notice: journal has been rotated since unit was started, output may be incomplete.

● systemd-rfkill.socket - Load/Save RF Kill Switch Status /dev/rfkill Watch
     Loaded: loaded (/usr/lib/systemd/system/systemd-rfkill.socket; static)
     Active: active (listening) since Tue 2025-12-09 14:51:38 UTC; 1h 53min ago
      Until: Tue 2025-12-09 14:51:38 UTC; 1h 53min ago
   Triggers: ● systemd-rfkill.service
       Docs: man:systemd-rfkill.socket(8)
     Listen: /dev/rfkill (Special)
     CGroup: /system.slice/systemd-rfkill.socket

Dec 09 14:51:38 np0005552052.novalocal systemd[1]: Listening on Load/Save RF Kill Switch Status /dev/rfkill Watch.

● systemd-udevd-control.socket - udev Control Socket
     Loaded: loaded (/usr/lib/systemd/system/systemd-udevd-control.socket; static)
     Active: active (running) since Tue 2025-12-09 14:51:36 UTC; 1h 53min ago
      Until: Tue 2025-12-09 14:51:36 UTC; 1h 53min ago
   Triggers: ● systemd-udevd.service
       Docs: man:systemd-udevd-control.socket(8)
             man:udev(7)
     Listen: /run/udev/control (SequentialPacket)
     CGroup: /system.slice/systemd-udevd-control.socket

● systemd-udevd-kernel.socket - udev Kernel Socket
     Loaded: loaded (/usr/lib/systemd/system/systemd-udevd-kernel.socket; static)
     Active: active (running) since Tue 2025-12-09 14:51:36 UTC; 1h 53min ago
      Until: Tue 2025-12-09 14:51:36 UTC; 1h 53min ago
   Triggers: ● systemd-udevd.service
       Docs: man:systemd-udevd-kernel.socket(8)
             man:udev(7)
     Listen: kobject-uevent 1 (Netlink)
     CGroup: /system.slice/systemd-udevd-kernel.socket

○ virtinterfaced-admin.socket - libvirt interface daemon admin socket
     Loaded: loaded (/usr/lib/systemd/system/virtinterfaced-admin.socket; enabled; preset: enabled)
     Active: inactive (dead)
   Triggers: ● virtinterfaced.service
     Listen: /run/libvirt/virtinterfaced-admin-sock (Stream)

○ virtinterfaced-ro.socket - libvirt interface daemon read-only socket
     Loaded: loaded (/usr/lib/systemd/system/virtinterfaced-ro.socket; enabled; preset: enabled)
     Active: inactive (dead)
   Triggers: ● virtinterfaced.service
     Listen: /run/libvirt/virtinterfaced-sock-ro (Stream)

○ virtinterfaced.socket - libvirt interface daemon socket
     Loaded: loaded (/usr/lib/systemd/system/virtinterfaced.socket; enabled; preset: enabled)
     Active: inactive (dead)
   Triggers: ● virtinterfaced.service
     Listen: /run/libvirt/virtinterfaced-sock (Stream)

○ virtlockd-admin.socket - libvirt locking daemon admin socket
     Loaded: loaded (/usr/lib/systemd/system/virtlockd-admin.socket; enabled; preset: enabled)
     Active: inactive (dead)
   Triggers: ● virtlockd.service
     Listen: /run/libvirt/virtlockd-admin-sock (Stream)

● virtlockd.socket - libvirt locking daemon socket
     Loaded: loaded (/usr/lib/systemd/system/virtlockd.socket; enabled; preset: enabled)
     Active: active (listening) since Tue 2025-12-09 16:18:06 UTC; 26min ago
      Until: Tue 2025-12-09 16:18:06 UTC; 26min ago
   Triggers: ● virtlockd.service
     Listen: /run/libvirt/virtlockd-sock (Stream)
     CGroup: /system.slice/virtlockd.socket

Dec 09 16:18:06 compute-0 systemd[1]: Listening on libvirt locking daemon socket.

● virtlogd-admin.socket - libvirt logging daemon admin socket
     Loaded: loaded (/usr/lib/systemd/system/virtlogd-admin.socket; enabled; preset: enabled)
    Drop-In: /etc/systemd/system/virtlogd-admin.socket.d
             └─override.conf
     Active: active (running) since Tue 2025-12-09 16:18:02 UTC; 26min ago
      Until: Tue 2025-12-09 16:18:02 UTC; 26min ago
   Triggers: ● virtlogd.service
     Listen: /run/libvirt/virtlogd-admin-sock (Stream)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48628)
     Memory: 0B (peak: 512.0K)
        CPU: 4ms
     CGroup: /system.slice/virtlogd-admin.socket

Dec 09 16:18:02 compute-0 systemd[1]: Starting libvirt logging daemon admin socket...
Dec 09 16:18:02 compute-0 systemd[1]: Listening on libvirt logging daemon admin socket.

● virtlogd.socket - libvirt logging daemon socket
     Loaded: loaded (/usr/lib/systemd/system/virtlogd.socket; enabled; preset: enabled)
    Drop-In: /etc/systemd/system/virtlogd.socket.d
             └─override.conf
     Active: active (running) since Tue 2025-12-09 16:18:02 UTC; 26min ago
      Until: Tue 2025-12-09 16:18:02 UTC; 26min ago
   Triggers: ● virtlogd.service
     Listen: /run/libvirt/virtlogd-sock (Stream)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48628)
     Memory: 0B (peak: 512.0K)
        CPU: 3ms
     CGroup: /system.slice/virtlogd.socket

Dec 09 16:18:02 compute-0 systemd[1]: Starting libvirt logging daemon socket...
Dec 09 16:18:02 compute-0 systemd[1]: Listening on libvirt logging daemon socket.

○ virtnetworkd-admin.socket - libvirt network daemon admin socket
     Loaded: loaded (/usr/lib/systemd/system/virtnetworkd-admin.socket; enabled; preset: enabled)
     Active: inactive (dead)
   Triggers: ● virtnetworkd.service
     Listen: /run/libvirt/virtnetworkd-admin-sock (Stream)

○ virtnetworkd-ro.socket - libvirt network daemon read-only socket
     Loaded: loaded (/usr/lib/systemd/system/virtnetworkd-ro.socket; enabled; preset: enabled)
     Active: inactive (dead)
   Triggers: ● virtnetworkd.service
     Listen: /run/libvirt/virtnetworkd-sock-ro (Stream)

○ virtnetworkd.socket - libvirt network daemon socket
     Loaded: loaded (/usr/lib/systemd/system/virtnetworkd.socket; enabled; preset: enabled)
     Active: inactive (dead)
   Triggers: ● virtnetworkd.service
     Listen: /run/libvirt/virtnetworkd-sock (Stream)

● virtnodedevd-admin.socket - libvirt nodedev daemon admin socket
     Loaded: loaded (/usr/lib/systemd/system/virtnodedevd-admin.socket; enabled; preset: enabled)
    Drop-In: /etc/systemd/system/virtnodedevd-admin.socket.d
             └─override.conf
     Active: active (running) since Tue 2025-12-09 16:18:03 UTC; 26min ago
      Until: Tue 2025-12-09 16:18:03 UTC; 26min ago
   Triggers: ● virtnodedevd.service
     Listen: /run/libvirt/virtnodedevd-admin-sock (Stream)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48628)
     Memory: 0B (peak: 512.0K)
        CPU: 2ms
     CGroup: /system.slice/virtnodedevd-admin.socket

Dec 09 16:18:03 compute-0 systemd[1]: Starting libvirt nodedev daemon admin socket...
Dec 09 16:18:03 compute-0 systemd[1]: Listening on libvirt nodedev daemon admin socket.

● virtnodedevd-ro.socket - libvirt nodedev daemon read-only socket
     Loaded: loaded (/usr/lib/systemd/system/virtnodedevd-ro.socket; enabled; preset: enabled)
    Drop-In: /etc/systemd/system/virtnodedevd-ro.socket.d
             └─override.conf
     Active: active (running) since Tue 2025-12-09 16:18:03 UTC; 26min ago
      Until: Tue 2025-12-09 16:18:03 UTC; 26min ago
   Triggers: ● virtnodedevd.service
     Listen: /run/libvirt/virtnodedevd-sock-ro (Stream)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48628)
     Memory: 0B (peak: 512.0K)
        CPU: 3ms
     CGroup: /system.slice/virtnodedevd-ro.socket

Dec 09 16:18:03 compute-0 systemd[1]: Starting libvirt nodedev daemon read-only socket...
Dec 09 16:18:03 compute-0 systemd[1]: Listening on libvirt nodedev daemon read-only socket.

● virtnodedevd.socket - libvirt nodedev daemon socket
     Loaded: loaded (/usr/lib/systemd/system/virtnodedevd.socket; enabled; preset: enabled)
    Drop-In: /etc/systemd/system/virtnodedevd.socket.d
             └─override.conf
     Active: active (running) since Tue 2025-12-09 16:18:03 UTC; 26min ago
      Until: Tue 2025-12-09 16:18:03 UTC; 26min ago
   Triggers: ● virtnodedevd.service
     Listen: /run/libvirt/virtnodedevd-sock (Stream)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48628)
     Memory: 0B (peak: 512.0K)
        CPU: 3ms
     CGroup: /system.slice/virtnodedevd.socket

Dec 09 16:18:03 compute-0 systemd[1]: Starting libvirt nodedev daemon socket...
Dec 09 16:18:03 compute-0 systemd[1]: Listening on libvirt nodedev daemon socket.

○ virtnwfilterd-admin.socket - libvirt nwfilter daemon admin socket
     Loaded: loaded (/usr/lib/systemd/system/virtnwfilterd-admin.socket; enabled; preset: enabled)
     Active: inactive (dead)
   Triggers: ● virtnwfilterd.service
     Listen: /run/libvirt/virtnwfilterd-admin-sock (Stream)

○ virtnwfilterd-ro.socket - libvirt nwfilter daemon read-only socket
     Loaded: loaded (/usr/lib/systemd/system/virtnwfilterd-ro.socket; enabled; preset: enabled)
     Active: inactive (dead)
   Triggers: ● virtnwfilterd.service
     Listen: /run/libvirt/virtnwfilterd-sock-ro (Stream)

○ virtnwfilterd.socket - libvirt nwfilter daemon socket
     Loaded: loaded (/usr/lib/systemd/system/virtnwfilterd.socket; enabled; preset: enabled)
     Active: inactive (dead)
   Triggers: ● virtnwfilterd.service
     Listen: /run/libvirt/virtnwfilterd-sock (Stream)

● virtproxyd-admin.socket - libvirt proxy daemon admin socket
     Loaded: loaded (/usr/lib/systemd/system/virtproxyd-admin.socket; enabled; preset: enabled)
    Drop-In: /etc/systemd/system/virtproxyd-admin.socket.d
             └─override.conf
     Active: active (listening) since Tue 2025-12-09 16:18:05 UTC; 26min ago
      Until: Tue 2025-12-09 16:18:05 UTC; 26min ago
   Triggers: ● virtproxyd.service
     Listen: /run/libvirt/libvirt-admin-sock (Stream)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48628)
     Memory: 4.0K (peak: 512.0K)
        CPU: 4ms
     CGroup: /system.slice/virtproxyd-admin.socket

Dec 09 16:18:05 compute-0 systemd[1]: Starting libvirt proxy daemon admin socket...
Dec 09 16:18:05 compute-0 systemd[1]: Listening on libvirt proxy daemon admin socket.

● virtproxyd-ro.socket - libvirt proxy daemon read-only socket
     Loaded: loaded (/usr/lib/systemd/system/virtproxyd-ro.socket; enabled; preset: enabled)
    Drop-In: /etc/systemd/system/virtproxyd-ro.socket.d
             └─override.conf
     Active: active (listening) since Tue 2025-12-09 16:18:05 UTC; 26min ago
      Until: Tue 2025-12-09 16:18:05 UTC; 26min ago
   Triggers: ● virtproxyd.service
     Listen: /run/libvirt/libvirt-sock-ro (Stream)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48628)
     Memory: 4.0K (peak: 512.0K)
        CPU: 3ms
     CGroup: /system.slice/virtproxyd-ro.socket

Dec 09 16:18:05 compute-0 systemd[1]: Starting libvirt proxy daemon read-only socket...
Dec 09 16:18:05 compute-0 systemd[1]: Listening on libvirt proxy daemon read-only socket.

● virtproxyd-tls.socket - libvirt proxy daemon TLS IP socket
     Loaded: loaded (/usr/lib/systemd/system/virtproxyd-tls.socket; enabled; preset: disabled)
     Active: active (listening) since Tue 2025-12-09 16:16:54 UTC; 28min ago
      Until: Tue 2025-12-09 16:16:54 UTC; 28min ago
   Triggers: ● virtproxyd.service
     Listen: [::]:16514 (Stream)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48628)
     Memory: 8.0K (peak: 256.0K)
        CPU: 1ms
     CGroup: /system.slice/virtproxyd-tls.socket

Dec 09 16:16:54 compute-0 systemd[1]: Listening on libvirt proxy daemon TLS IP socket.

● virtproxyd.socket - libvirt proxy daemon socket
     Loaded: loaded (/usr/lib/systemd/system/virtproxyd.socket; enabled; preset: enabled)
    Drop-In: /etc/systemd/system/virtproxyd.socket.d
             └─override.conf
     Active: active (listening) since Tue 2025-12-09 16:16:54 UTC; 28min ago
      Until: Tue 2025-12-09 16:16:54 UTC; 28min ago
   Triggers: ● virtproxyd.service
     Listen: /run/libvirt/libvirt-sock (Stream)
     CGroup: /system.slice/virtproxyd.socket

Dec 09 16:16:54 compute-0 systemd[1]: Listening on libvirt proxy daemon socket.

● virtqemud-admin.socket - libvirt QEMU daemon admin socket
     Loaded: loaded (/usr/lib/systemd/system/virtqemud-admin.socket; enabled; preset: enabled)
    Drop-In: /etc/systemd/system/virtqemud-admin.socket.d
             └─override.conf
     Active: active (running) since Tue 2025-12-09 16:18:06 UTC; 26min ago
      Until: Tue 2025-12-09 16:18:06 UTC; 26min ago
   Triggers: ● virtqemud.service
     Listen: /run/libvirt/virtqemud-admin-sock (Stream)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48628)
     Memory: 0B (peak: 576.0K)
        CPU: 3ms
     CGroup: /system.slice/virtqemud-admin.socket

Dec 09 16:18:06 compute-0 systemd[1]: Starting libvirt QEMU daemon admin socket...
Dec 09 16:18:06 compute-0 systemd[1]: Listening on libvirt QEMU daemon admin socket.

● virtqemud-ro.socket - libvirt QEMU daemon read-only socket
     Loaded: loaded (/usr/lib/systemd/system/virtqemud-ro.socket; enabled; preset: enabled)
    Drop-In: /etc/systemd/system/virtqemud-ro.socket.d
             └─override.conf
     Active: active (running) since Tue 2025-12-09 16:18:06 UTC; 26min ago
      Until: Tue 2025-12-09 16:18:06 UTC; 26min ago
   Triggers: ● virtqemud.service
     Listen: /run/libvirt/virtqemud-sock-ro (Stream)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48628)
     Memory: 0B (peak: 480.0K)
        CPU: 2ms
     CGroup: /system.slice/virtqemud-ro.socket

Dec 09 16:18:06 compute-0 systemd[1]: Starting libvirt QEMU daemon read-only socket...
Dec 09 16:18:06 compute-0 systemd[1]: Listening on libvirt QEMU daemon read-only socket.

● virtqemud.socket - libvirt QEMU daemon socket
     Loaded: loaded (/usr/lib/systemd/system/virtqemud.socket; enabled; preset: enabled)
    Drop-In: /etc/systemd/system/virtqemud.socket.d
             └─override.conf
     Active: active (running) since Tue 2025-12-09 16:18:06 UTC; 26min ago
      Until: Tue 2025-12-09 16:18:06 UTC; 26min ago
   Triggers: ● virtqemud.service
     Listen: /run/libvirt/virtqemud-sock (Stream)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48628)
     Memory: 0B (peak: 484.0K)
        CPU: 5ms
     CGroup: /system.slice/virtqemud.socket

Dec 09 16:18:06 compute-0 systemd[1]: Starting libvirt QEMU daemon socket...
Dec 09 16:18:06 compute-0 systemd[1]: Listening on libvirt QEMU daemon socket.

● virtsecretd-admin.socket - libvirt secret daemon admin socket
     Loaded: loaded (/usr/lib/systemd/system/virtsecretd-admin.socket; enabled; preset: enabled)
    Drop-In: /etc/systemd/system/virtsecretd-admin.socket.d
             └─override.conf
     Active: active (listening) since Tue 2025-12-09 16:18:07 UTC; 26min ago
      Until: Tue 2025-12-09 16:18:07 UTC; 26min ago
   Triggers: ● virtsecretd.service
     Listen: /run/libvirt/virtsecretd-admin-sock (Stream)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48628)
     Memory: 0B (peak: 560.0K)
        CPU: 5ms
     CGroup: /system.slice/virtsecretd-admin.socket

Dec 09 16:18:07 compute-0 systemd[1]: Starting libvirt secret daemon admin socket...
Dec 09 16:18:07 compute-0 systemd[1]: Listening on libvirt secret daemon admin socket.

● virtsecretd-ro.socket - libvirt secret daemon read-only socket
     Loaded: loaded (/usr/lib/systemd/system/virtsecretd-ro.socket; enabled; preset: enabled)
    Drop-In: /etc/systemd/system/virtsecretd-ro.socket.d
             └─override.conf
     Active: active (listening) since Tue 2025-12-09 16:18:07 UTC; 26min ago
      Until: Tue 2025-12-09 16:18:07 UTC; 26min ago
   Triggers: ● virtsecretd.service
     Listen: /run/libvirt/virtsecretd-sock-ro (Stream)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48628)
     Memory: 0B (peak: 512.0K)
        CPU: 5ms
     CGroup: /system.slice/virtsecretd-ro.socket

Dec 09 16:18:07 compute-0 systemd[1]: Starting libvirt secret daemon read-only socket...
Dec 09 16:18:07 compute-0 systemd[1]: Listening on libvirt secret daemon read-only socket.

● virtsecretd.socket - libvirt secret daemon socket
     Loaded: loaded (/usr/lib/systemd/system/virtsecretd.socket; enabled; preset: enabled)
    Drop-In: /etc/systemd/system/virtsecretd.socket.d
             └─override.conf
     Active: active (listening) since Tue 2025-12-09 16:18:07 UTC; 26min ago
      Until: Tue 2025-12-09 16:18:07 UTC; 26min ago
   Triggers: ● virtsecretd.service
     Listen: /run/libvirt/virtsecretd-sock (Stream)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48628)
     Memory: 0B (peak: 512.0K)
        CPU: 5ms
     CGroup: /system.slice/virtsecretd.socket

Dec 09 16:18:07 compute-0 systemd[1]: Starting libvirt secret daemon socket...
Dec 09 16:18:07 compute-0 systemd[1]: Listening on libvirt secret daemon socket.

○ virtstoraged-admin.socket - libvirt storage daemon admin socket
     Loaded: loaded (/usr/lib/systemd/system/virtstoraged-admin.socket; enabled; preset: enabled)
     Active: inactive (dead)
   Triggers: ● virtstoraged.service
     Listen: /run/libvirt/virtstoraged-admin-sock (Stream)

○ virtstoraged-ro.socket - libvirt storage daemon read-only socket
     Loaded: loaded (/usr/lib/systemd/system/virtstoraged-ro.socket; enabled; preset: enabled)
     Active: inactive (dead)
   Triggers: ● virtstoraged.service
     Listen: /run/libvirt/virtstoraged-sock-ro (Stream)

○ virtstoraged.socket - libvirt storage daemon socket
     Loaded: loaded (/usr/lib/systemd/system/virtstoraged.socket; enabled; preset: enabled)
     Active: inactive (dead)
   Triggers: ● virtstoraged.service
     Listen: /run/libvirt/virtstoraged-sock (Stream)

● swap.swap - /swap
     Loaded: loaded (/etc/fstab; generated)
     Active: active since Tue 2025-12-09 15:57:52 UTC; 47min ago
      Until: Tue 2025-12-09 15:57:52 UTC; 47min ago
       What: /swap
       Docs: man:fstab(5)
             man:systemd-fstab-generator(8)

● basic.target - Basic System
     Loaded: loaded (/usr/lib/systemd/system/basic.target; static)
     Active: active since Tue 2025-12-09 14:51:37 UTC; 1h 53min ago
      Until: Tue 2025-12-09 14:51:37 UTC; 1h 53min ago
       Docs: man:systemd.special(7)

Dec 09 14:51:37 localhost systemd[1]: Reached target Basic System.

○ blockdev@dev-disk-by\x2duuid-fcf6b761\x2d831a\x2d48a7\x2d9f5f\x2d068b5063763f.target - Block Device Preparation for /dev/disk/by-uuid/fcf6b761-831a-48a7-9f5f-068b5063763f
     Loaded: loaded (/usr/lib/systemd/system/blockdev@.target; static)
     Active: inactive (dead)
       Docs: man:systemd.special(7)

○ blockdev@dev-vda1.target - Block Device Preparation for /dev/vda1
     Loaded: loaded (/usr/lib/systemd/system/blockdev@.target; static)
     Active: inactive (dead)
       Docs: man:systemd.special(7)

● ceph-67f67f44-54fc-54ea-8df0-10931b6ecdaf.target - Ceph cluster 67f67f44-54fc-54ea-8df0-10931b6ecdaf
     Loaded: loaded (/etc/systemd/system/ceph-67f67f44-54fc-54ea-8df0-10931b6ecdaf.target; enabled; preset: disabled)
     Active: active since Tue 2025-12-09 16:03:00 UTC; 41min ago
      Until: Tue 2025-12-09 16:03:00 UTC; 41min ago

Dec 09 16:03:00 compute-0 systemd[1]: Reached target Ceph cluster 67f67f44-54fc-54ea-8df0-10931b6ecdaf.

● ceph.target - All Ceph clusters and services
     Loaded: loaded (/etc/systemd/system/ceph.target; enabled; preset: disabled)
     Active: active since Tue 2025-12-09 16:03:00 UTC; 41min ago
      Until: Tue 2025-12-09 16:03:00 UTC; 41min ago

Dec 09 16:03:00 compute-0 systemd[1]: Reached target All Ceph clusters and services.

● cloud-config.target - Cloud-config availability
     Loaded: loaded (/usr/lib/systemd/system/cloud-config.target; static)
     Active: active since Tue 2025-12-09 14:51:40 UTC; 1h 53min ago
      Until: Tue 2025-12-09 14:51:40 UTC; 1h 53min ago

Dec 09 14:51:40 np0005552052.novalocal systemd[1]: Reached target Cloud-config availability.

● cloud-init.target - Cloud-init target
     Loaded: loaded (/usr/lib/systemd/system/cloud-init.target; enabled-runtime; preset: disabled)
     Active: active since Tue 2025-12-09 14:51:41 UTC; 1h 53min ago
      Until: Tue 2025-12-09 14:51:41 UTC; 1h 53min ago

Dec 09 14:51:41 np0005552052.novalocal systemd[1]: Reached target Cloud-init target.

○ cryptsetup-pre.target - Local Encrypted Volumes (Pre)
     Loaded: loaded (/usr/lib/systemd/system/cryptsetup-pre.target; static)
     Active: inactive (dead)
       Docs: man:systemd.special(7)

● cryptsetup.target - Local Encrypted Volumes
     Loaded: loaded (/usr/lib/systemd/system/cryptsetup.target; static)
     Active: active since Tue 2025-12-09 14:51:36 UTC; 1h 53min ago
      Until: Tue 2025-12-09 14:51:36 UTC; 1h 53min ago
       Docs: man:systemd.special(7)

● edpm_libvirt.target
     Loaded: loaded (/etc/systemd/system/edpm_libvirt.target; static)
     Active: active since Tue 2025-12-09 16:18:41 UTC; 26min ago
      Until: Tue 2025-12-09 16:18:41 UTC; 26min ago

Dec 09 16:18:41 compute-0 systemd[1]: Reached target edpm_libvirt.target.

○ emergency.target - Emergency Mode
     Loaded: loaded (/usr/lib/systemd/system/emergency.target; static)
     Active: inactive (dead)
       Docs: man:systemd.special(7)

○ first-boot-complete.target - First Boot Complete
     Loaded: loaded (/usr/lib/systemd/system/first-boot-complete.target; static)
     Active: inactive (dead)
  Condition: start condition failed at Tue 2025-12-09 14:51:37 UTC; 1h 53min ago
       Docs: man:systemd.special(7)

Dec 09 14:51:37 localhost systemd[1]: First Boot Complete was skipped because of an unmet condition check (ConditionFirstBoot=yes).

○ getty-pre.target - Preparation for Logins
     Loaded: loaded (/usr/lib/systemd/system/getty-pre.target; static)
     Active: inactive (dead)
       Docs: man:systemd.special(7)
             man:systemd-getty-generator(8)
             http://0pointer.de/blog/projects/serial-console.html

● getty.target - Login Prompts
     Loaded: loaded (/usr/lib/systemd/system/getty.target; static)
     Active: active since Tue 2025-12-09 14:51:40 UTC; 1h 53min ago
      Until: Tue 2025-12-09 14:51:40 UTC; 1h 53min ago
       Docs: man:systemd.special(7)
             man:systemd-getty-generator(8)
             http://0pointer.de/blog/projects/serial-console.html

Dec 09 14:51:40 np0005552052.novalocal systemd[1]: Reached target Login Prompts.

○ graphical.target - Graphical Interface
     Loaded: loaded (/usr/lib/systemd/system/graphical.target; static)
     Active: inactive (dead)
       Docs: man:systemd.special(7)

○ initrd-fs.target - Initrd File Systems
     Loaded: loaded (/usr/lib/systemd/system/initrd-fs.target; static)
     Active: inactive (dead) since Tue 2025-12-09 14:51:36 UTC; 1h 53min ago
       Docs: man:systemd.special(7)

Dec 09 14:51:36 localhost systemd[1]: Reached target Initrd File Systems.

○ initrd-root-device.target - Initrd Root Device
     Loaded: loaded (/usr/lib/systemd/system/initrd-root-device.target; static)
     Active: inactive (dead) since Tue 2025-12-09 14:51:36 UTC; 1h 53min ago
       Docs: man:systemd.special(7)

Dec 09 14:51:35 localhost systemd[1]: Reached target Initrd Root Device.
Dec 09 14:51:36 localhost systemd[1]: Stopped target Initrd Root Device.

○ initrd-root-fs.target - Initrd Root File System
     Loaded: loaded (/usr/lib/systemd/system/initrd-root-fs.target; static)
     Active: inactive (dead) since Tue 2025-12-09 14:51:36 UTC; 1h 53min ago
       Docs: man:systemd.special(7)

Dec 09 14:51:36 localhost systemd[1]: Reached target Initrd Root File System.

○ initrd-switch-root.target - Switch Root
     Loaded: loaded (/usr/lib/systemd/system/initrd-switch-root.target; static)
     Active: inactive (dead) since Tue 2025-12-09 14:51:36 UTC; 1h 53min ago

Dec 09 14:51:36 localhost systemd[1]: Reached target Switch Root.

○ initrd-usr-fs.target - Initrd /usr File System
     Loaded: loaded (/usr/lib/systemd/system/initrd-usr-fs.target; static)
     Active: inactive (dead) since Tue 2025-12-09 14:51:36 UTC; 1h 53min ago
       Docs: man:systemd.special(7)

Dec 09 14:51:36 localhost systemd[1]: Stopped target Initrd /usr File System.
Notice: journal has been rotated since unit was started, output may be incomplete.

○ initrd.target - Initrd Default Target
     Loaded: loaded (/usr/lib/systemd/system/initrd.target; static)
     Active: inactive (dead) since Tue 2025-12-09 14:51:36 UTC; 1h 53min ago
       Docs: man:systemd.special(7)

Dec 09 14:51:36 localhost systemd[1]: Reached target Initrd Default Target.
Dec 09 14:51:36 localhost systemd[1]: Stopped target Initrd Default Target.

● integritysetup.target - Local Integrity Protected Volumes
     Loaded: loaded (/usr/lib/systemd/system/integritysetup.target; static)
     Active: active since Tue 2025-12-09 14:51:36 UTC; 1h 53min ago
      Until: Tue 2025-12-09 14:51:36 UTC; 1h 53min ago
       Docs: man:systemd.special(7)

● local-fs-pre.target - Preparation for Local File Systems
     Loaded: loaded (/usr/lib/systemd/system/local-fs-pre.target; static)
     Active: active since Tue 2025-12-09 14:51:37 UTC; 1h 53min ago
      Until: Tue 2025-12-09 14:51:37 UTC; 1h 53min ago
       Docs: man:systemd.special(7)

Dec 09 14:51:37 localhost systemd[1]: Reached target Preparation for Local File Systems.

● local-fs.target - Local File Systems
     Loaded: loaded (/usr/lib/systemd/system/local-fs.target; static)
     Active: active since Tue 2025-12-09 14:51:37 UTC; 1h 53min ago
      Until: Tue 2025-12-09 14:51:37 UTC; 1h 53min ago
       Docs: man:systemd.special(7)

Dec 09 14:51:37 localhost systemd[1]: Reached target Local File Systems.

● multi-user.target - Multi-User System
     Loaded: loaded (/usr/lib/systemd/system/multi-user.target; indirect; preset: disabled)
     Active: active since Tue 2025-12-09 14:51:40 UTC; 1h 53min ago
      Until: Tue 2025-12-09 14:51:40 UTC; 1h 53min ago
       Docs: man:systemd.special(7)

Dec 09 14:51:40 np0005552052.novalocal systemd[1]: Reached target Multi-User System.

● network-online.target - Network is Online
     Loaded: loaded (/usr/lib/systemd/system/network-online.target; static)
     Active: active since Tue 2025-12-09 14:51:40 UTC; 1h 53min ago
      Until: Tue 2025-12-09 14:51:40 UTC; 1h 53min ago
       Docs: man:systemd.special(7)
             https://systemd.io/NETWORK_ONLINE

Dec 09 14:51:40 np0005552052.novalocal systemd[1]: Reached target Network is Online.

● network-pre.target - Preparation for Network
     Loaded: loaded (/usr/lib/systemd/system/network-pre.target; static)
     Active: active since Tue 2025-12-09 14:51:38 UTC; 1h 53min ago
      Until: Tue 2025-12-09 14:51:38 UTC; 1h 53min ago
       Docs: man:systemd.special(7)
             https://systemd.io/NETWORK_ONLINE

Dec 09 14:51:38 np0005552052.novalocal systemd[1]: Reached target Preparation for Network.

● network.target - Network
     Loaded: loaded (/usr/lib/systemd/system/network.target; static)
     Active: active since Tue 2025-12-09 14:51:38 UTC; 1h 53min ago
      Until: Tue 2025-12-09 14:51:38 UTC; 1h 53min ago
       Docs: man:systemd.special(7)
             https://systemd.io/NETWORK_ONLINE

Dec 09 14:51:38 np0005552052.novalocal systemd[1]: Reached target Network.

● nfs-client.target - NFS client services
     Loaded: loaded (/usr/lib/systemd/system/nfs-client.target; enabled; preset: disabled)
     Active: active since Tue 2025-12-09 14:51:38 UTC; 1h 53min ago
      Until: Tue 2025-12-09 14:51:38 UTC; 1h 53min ago

Dec 09 14:51:38 np0005552052.novalocal systemd[1]: Reached target NFS client services.

○ nss-lookup.target - Host and Network Name Lookups
     Loaded: loaded (/usr/lib/systemd/system/nss-lookup.target; static)
     Active: inactive (dead)
       Docs: man:systemd.special(7)

● nss-user-lookup.target - User and Group Name Lookups
     Loaded: loaded (/usr/lib/systemd/system/nss-user-lookup.target; static)
     Active: active since Tue 2025-12-09 14:51:37 UTC; 1h 53min ago
      Until: Tue 2025-12-09 14:51:37 UTC; 1h 53min ago
       Docs: man:systemd.special(7)

Dec 09 14:51:37 localhost systemd[1]: Reached target User and Group Name Lookups.

● paths.target - Path Units
     Loaded: loaded (/usr/lib/systemd/system/paths.target; static)
     Active: active since Tue 2025-12-09 14:51:36 UTC; 1h 53min ago
      Until: Tue 2025-12-09 14:51:36 UTC; 1h 53min ago
       Docs: man:systemd.special(7)

○ remote-cryptsetup.target - Remote Encrypted Volumes
     Loaded: loaded (/usr/lib/systemd/system/remote-cryptsetup.target; disabled; preset: enabled)
     Active: inactive (dead) since Tue 2025-12-09 14:51:36 UTC; 1h 53min ago
       Docs: man:systemd.special(7)

Dec 09 14:51:35 localhost systemd[1]: Reached target Remote Encrypted Volumes.
Dec 09 14:51:36 localhost systemd[1]: Stopped target Remote Encrypted Volumes.

● remote-fs-pre.target - Preparation for Remote File Systems
     Loaded: loaded (/usr/lib/systemd/system/remote-fs-pre.target; static)
     Active: active since Tue 2025-12-09 14:51:38 UTC; 1h 53min ago
      Until: Tue 2025-12-09 14:51:38 UTC; 1h 53min ago
       Docs: man:systemd.special(7)

Dec 09 14:51:38 np0005552052.novalocal systemd[1]: Reached target Preparation for Remote File Systems.

● remote-fs.target - Remote File Systems
     Loaded: loaded (/usr/lib/systemd/system/remote-fs.target; enabled; preset: enabled)
     Active: active since Tue 2025-12-09 14:51:38 UTC; 1h 53min ago
      Until: Tue 2025-12-09 14:51:38 UTC; 1h 53min ago
       Docs: man:systemd.special(7)

Dec 09 14:51:38 np0005552052.novalocal systemd[1]: Reached target Remote File Systems.

○ remote-veritysetup.target - Remote Verity Protected Volumes
     Loaded: loaded (/usr/lib/systemd/system/remote-veritysetup.target; disabled; preset: disabled)
     Active: inactive (dead)
       Docs: man:systemd.special(7)

○ rescue.target - Rescue Mode
     Loaded: loaded (/usr/lib/systemd/system/rescue.target; static)
     Active: inactive (dead)
       Docs: man:systemd.special(7)

● rpc_pipefs.target
     Loaded: loaded (/usr/lib/systemd/system/rpc_pipefs.target; static)
     Active: active since Tue 2025-12-09 14:51:36 UTC; 1h 53min ago
      Until: Tue 2025-12-09 14:51:36 UTC; 1h 53min ago

● rpcbind.target - RPC Port Mapper
     Loaded: loaded (/usr/lib/systemd/system/rpcbind.target; static)
     Active: active since Tue 2025-12-09 14:51:36 UTC; 1h 53min ago
      Until: Tue 2025-12-09 14:51:Unit syslog.target could not be found.
36 UTC; 1h 53min ago
       Docs: man:systemd.special(7)

○ shutdown.target - System Shutdown
     Loaded: loaded (/usr/lib/systemd/system/shutdown.target; static)
     Active: inactive (dead)
       Docs: man:systemd.special(7)

● slices.target - Slice Units
     Loaded: loaded (/usr/lib/systemd/system/slices.target; static)
     Active: active since Tue 2025-12-09 14:51:36 UTC; 1h 53min ago
      Until: Tue 2025-12-09 14:51:36 UTC; 1h 53min ago
       Docs: man:systemd.special(7)

● sockets.target - Socket Units
     Loaded: loaded (/usr/lib/systemd/system/sockets.target; static)
     Active: active since Tue 2025-12-09 14:51:37 UTC; 1h 53min ago
      Until: Tue 2025-12-09 14:51:37 UTC; 1h 53min ago
       Docs: man:systemd.special(7)

Dec 09 14:51:37 localhost systemd[1]: Reached target Socket Units.

● sshd-keygen.target
     Loaded: loaded (/usr/lib/systemd/system/sshd-keygen.target; static)
     Active: active since Tue 2025-12-09 16:16:36 UTC; 28min ago
      Until: Tue 2025-12-09 16:16:36 UTC; 28min ago

Dec 09 16:16:36 compute-0 systemd[1]: Reached target sshd-keygen.target.

● swap.target - Swaps
     Loaded: loaded (/usr/lib/systemd/system/swap.target; static)
     Active: active since Tue 2025-12-09 14:51:36 UTC; 1h 53min ago
      Until: Tue 2025-12-09 14:51:36 UTC; 1h 53min ago
       Docs: man:systemd.special(7)

● sysinit.target - System Initialization
     Loaded: loaded (/usr/lib/systemd/system/sysinit.target; static)
     Active: active since Tue 2025-12-09 14:51:37 UTC; 1h 53min ago
      Until: Tue 2025-12-09 14:51:37 UTC; 1h 53min ago
       Docs: man:systemd.special(7)

Dec 09 14:51:37 localhost systemd[1]: Reached target System Initialization.

● time-set.target - System Time Set
     Loaded: loaded (/usr/lib/systemd/system/time-set.target; static)
     Active: active since Tue 2025-12-09 16:03:01 UTC; 41min ago
      Until: Tue 2025-12-09 16:03:01 UTC; 41min ago
       Docs: man:systemd.special(7)

Dec 09 16:03:01 compute-0 systemd[1]: Reached target System Time Set.

● time-sync.target - System Time Synchronized
     Loaded: loaded (/usr/lib/systemd/system/time-sync.target; static)
     Active: active since Tue 2025-12-09 16:03:01 UTC; 41min ago
      Until: Tue 2025-12-09 16:03:01 UTC; 41min ago
       Docs: man:systemd.special(7)

Dec 09 16:03:01 compute-0 systemd[1]: Reached target System Time Synchronized.

● timers.target - Timer Units
     Loaded: loaded (/usr/lib/systemd/system/timers.target; static)
     Active: active since Tue 2025-12-09 14:51:37 UTC; 1h 53min ago
      Until: Tue 2025-12-09 14:51:37 UTC; 1h 53min ago
       Docs: man:systemd.special(7)

Dec 09 14:51:37 localhost systemd[1]: Reached target Timer Units.

○ umount.target - Unmount All Filesystems
     Loaded: loaded (/usr/lib/systemd/system/umount.target; static)
     Active: inactive (dead)
       Docs: man:systemd.special(7)

○ veritysetup-pre.target - Local Verity Protected Volumes (Pre)
     Loaded: loaded (/usr/lib/systemd/system/veritysetup-pre.target; static)
     Active: inactive (dead)
       Docs: man:systemd.special(7)

● veritysetup.target - Local Verity Protected Volumes
     Loaded: loaded (/usr/lib/systemd/system/veritysetup.target; static)
     Active: active since Tue 2025-12-09 14:51:36 UTC; 1h 53min ago
      Until: Tue 2025-12-09 14:51:36 UTC; 1h 53min ago
       Docs: man:systemd.special(7)

○ virt-guest-shutdown.target - Libvirt guests shutdown
     Loaded: loaded (/etc/systemd/system/virt-guest-shutdown.target; static)
     Active: inactive (dead)
       Docs: https://libvirt.org

● 0a76737b5f2b25872fe2b565002617e1a84450afb10d979302e98619d30d6470-46e7fcdfdf3aba34.timer - /usr/bin/podman healthcheck run 0a76737b5f2b25872fe2b565002617e1a84450afb10d979302e98619d30d6470
     Loaded: loaded (/run/systemd/transient/0a76737b5f2b25872fe2b565002617e1a84450afb10d979302e98619d30d6470-46e7fcdfdf3aba34.timer; transient)
  Transient: yes
     Active: active (waiting) since Tue 2025-12-09 16:13:10 UTC; 31min ago
      Until: Tue 2025-12-09 16:13:10 UTC; 31min ago
    Trigger: Tue 2025-12-09 16:44:57 UTC; 840ms left
   Triggers: ● 0a76737b5f2b25872fe2b565002617e1a84450afb10d979302e98619d30d6470-46e7fcdfdf3aba34.service

Dec 09 16:13:10 compute-0 systemd[1]: Started /usr/bin/podman healthcheck run 0a76737b5f2b25872fe2b565002617e1a84450afb10d979302e98619d30d6470.

● 84d676632bb4f080a989766472a2cc2fcf267803021f3bcdf51f0d8bfc9055e2-c203cf0939cbbd6.timer - /usr/bin/podman healthcheck run 84d676632bb4f080a989766472a2cc2fcf267803021f3bcdf51f0d8bfc9055e2
     Loaded: loaded (/run/systemd/transient/84d676632bb4f080a989766472a2cc2fcf267803021f3bcdf51f0d8bfc9055e2-c203cf0939cbbd6.timer; transient)
  Transient: yes
     Active: active (waiting) since Tue 2025-12-09 16:20:03 UTC; 24min ago
      Until: Tue 2025-12-09 16:20:03 UTC; 24min ago
    Trigger: Tue 2025-12-09 16:45:11 UTC; 14s left
   Triggers: ● 84d676632bb4f080a989766472a2cc2fcf267803021f3bcdf51f0d8bfc9055e2-c203cf0939cbbd6.service

Dec 09 16:20:03 compute-0 systemd[1]: Started /usr/bin/podman healthcheck run 84d676632bb4f080a989766472a2cc2fcf267803021f3bcdf51f0d8bfc9055e2.

● dnf-makecache.timer - dnf makecache --timer
     Loaded: loaded (/usr/lib/systemd/system/dnf-makecache.timer; enabled; preset: enabled)
     Active: active (waiting) since Tue 2025-12-09 14:51:37 UTC; 1h 53min ago
      Until: Tue 2025-12-09 14:51:37 UTC; 1h 53min ago
    Trigger: Tue 2025-12-09 16:59:39 UTC; 14min left
   Triggers: ● dnf-makecache.service

Dec 09 14:51:37 localhost systemd[1]: Started dnf makecache --timer.

● fbe906a4b60641b0d2bc026295092273d9a8b9783389304fc11b10bffae9c692-413c4d33667cee78.timer - /usr/bin/podman healthcheck run fbe906a4b60641b0d2bc026295092273d9a8b9783389304fc11b10bffae9c692
     Loaded: loaded (/run/systemd/transient/fbe906a4b60641b0d2bc026295092273d9a8b9783389304fc11b10bffae9c692-413c4d33667cee78.timer; transient)
  Transient: yes
     Active: active (waiting) since Tue 2025-12-09 16:14:15 UTC; 30min ago
      Until: Tue 2025-12-09 16:14:15 UTC; 30min ago
    Trigger: Tue 2025-12-09 16:44:57 UTC; 789ms left
   Triggers: ● fbe906a4b60641b0d2bc026295092273d9a8b9783389304fc11b10bffae9c692-413c4d33667cee78.service

Dec 09 16:14:15 compute-0 systemd[1]: Started /usr/bin/podman healthcheck run fbe906a4b60641b0d2bc026295092273d9a8b9783389304fc11b10bffae9c692.

● logrotate.timer - Daily rotation of log files
     Loaded: loaded (/usr/lib/systemd/system/logrotate.timer; enabled; preset: enabled)
     Active: active (waiting) since Tue 2025-12-09 14:51:37 UTC; 1h 53min ago
      Until: Tue 2025-12-09 14:51:37 UTC; 1h 53min ago
    Trigger: Wed 2025-12-10 00:00:00 UTC; 7h left
   Triggers: ● logrotate.service
       Docs: man:logrotate(8)
             man:logrotate.conf(5)

Dec 09 14:51:37 localhost systemd[1]: Started Daily rotation of log files.

○ raid-check.timer - Weekly RAID setup health check
     Loaded: loaded (/usr/lib/systemd/system/raid-check.timer; enabled; preset: enabled)
     Active: inactive (dead)
    Trigger: n/a
   Triggers: ● raid-check.service

○ sysstat-collect.timer - Run system activity accounting tool every 10 minutes
     Loaded: loaded (/usr/lib/systemd/system/sysstat-collect.timer; enabled; preset: disabled)
     Active: inactive (dead)
    Trigger: n/a
   Triggers: ● sysstat-collect.service

○ sysstat-summary.timer - Generate summary of yesterday's process accounting
     Loaded: loaded (/usr/lib/systemd/system/sysstat-summary.timer; enabled; preset: disabled)
     Active: inactive (dead)
    Trigger: n/a
   Triggers: ● sysstat-summary.service

● systemd-tmpfiles-clean.timer - Daily Cleanup of Temporary Directories
     Loaded: loaded (/usr/lib/systemd/system/systemd-tmpfiles-clean.timer; static)
     Active: active (waiting) since Tue 2025-12-09 14:51:37 UTC; 1h 53min ago
      Until: Tue 2025-12-09 14:51:37 UTC; 1h 53min ago
    Trigger: Wed 2025-12-10 15:06:46 UTC; 22h left
   Triggers: ● systemd-tmpfiles-clean.service
       Docs: man:tmpfiles.d(5)
             man:systemd-tmpfiles(8)

Dec 09 14:51:37 localhost systemd[1]: Started Daily Cleanup of Temporary Directories.

● unbound-anchor.timer - daily update of the root trust anchor for DNSSEC
     Loaded: loaded (/usr/lib/systemd/system/unbound-anchor.timer; enabled; preset: enabled)
     Active: active (waiting) since Tue 2025-12-09 15:58:24 UTC; 46min ago
      Until: Tue 2025-12-09 15:58:24 UTC; 46min ago
    Trigger: Wed 2025-12-10 00:00:00 UTC; 7h left
   Triggers: ● unbound-anchor.service
       Docs: man:unbound-anchor(8)

Dec 09 15:58:24 compute-0 systemd[1]: Started daily update of the root trust anchor for DNSSEC.
