● compute-1
    State: running
    Units: 447 loaded (incl. loaded aliases)
     Jobs: 0 queued
   Failed: 0 units
    Since: Tue 2026-01-27 07:47:05 UTC; 1h 35min ago
  systemd: 252-64.el9
   CGroup: /
           ├─240109 turbostat --debug sleep 10
           ├─240112 sleep 10
           ├─init.scope
           │ └─1 /usr/lib/systemd/systemd --switched-root --system --deserialize 31
           ├─machine.slice
           │ ├─libpod-a3bb16dbb05ddc47f45ec9373ca7302ec69431e14964c6960ac6f45103f0676a.scope
           │ │ └─container
           │ │   ├─221468 dumb-init --single-child -- kolla_start
           │ │   └─221470 /usr/bin/python3 /usr/bin/nova-compute
           │ ├─libpod-d6ce9deaaecdb1627bbc6b1f3be04c03bb6975dd82af97ea8a1c73c9cca6abd1.scope
           │ │ └─container
           │ │   ├─139975 dumb-init --single-child -- kolla_start
           │ │   ├─139978 "neutron-ovn-metadata-agent (/usr/bin/python3 /usr/bin/neutron-ovn-metadata-agent)"
           │ │   ├─140319 "neutron-ovn-metadata-agent (/usr/bin/python3 /usr/bin/neutron-ovn-metadata-agent)"
           │ │   └─140418 /usr/bin/python3 /bin/privsep-helper --config-file /etc/neutron/neutron.conf --config-dir /etc/neutron.conf.d --privsep_context neutron.privileged.namespace_cmd --privsep_sock_path /tmp/tmpm4k7fsyh/privsep.sock
           │ └─libpod-f524c08bb2ae37ade38d0ec5ff5b0696715a77e37f29e059b5b993958b3ea20d.scope
           │   └─container
           │     ├─130303 dumb-init --single-child -- kolla_start
           │     └─130306 /usr/bin/ovn-controller --pidfile unix:/run/openvswitch/db.sock -p /etc/pki/tls/private/ovndb.key -c /etc/pki/tls/certs/ovndb.crt -C /etc/pki/tls/certs/ovndbca.crt
           ├─system.slice
           │ ├─NetworkManager.service
           │ │ └─48943 /usr/sbin/NetworkManager --no-daemon
           │ ├─auditd.service
           │ │ ├─703 /sbin/auditd
           │ │ └─705 /usr/sbin/sedispatch
           │ ├─chronyd.service
           │ │ └─58510 /usr/sbin/chronyd -F 2
           │ ├─crond.service
           │ │ └─1011 /usr/sbin/crond -n
           │ ├─dbus-broker.service
           │ │ ├─744 /usr/bin/dbus-broker-launch --scope system --audit
           │ │ └─773 dbus-broker --log 4 --controller 9 --machine-id 85ac68c10a6e7ae08ceb898dbdca0cb5 --max-bytes 536870912 --max-fds 4096 --max-matches 131072 --audit
           │ ├─edpm_nova_compute.service
           │ │ └─221466 /usr/bin/conmon --api-version 1 -c a3bb16dbb05ddc47f45ec9373ca7302ec69431e14964c6960ac6f45103f0676a -u a3bb16dbb05ddc47f45ec9373ca7302ec69431e14964c6960ac6f45103f0676a -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/a3bb16dbb05ddc47f45ec9373ca7302ec69431e14964c6960ac6f45103f0676a/userdata -p /run/containers/storage/overlay-containers/a3bb16dbb05ddc47f45ec9373ca7302ec69431e14964c6960ac6f45103f0676a/userdata/pidfile -n nova_compute --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/a3bb16dbb05ddc47f45ec9373ca7302ec69431e14964c6960ac6f45103f0676a --full-attach -s -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/a3bb16dbb05ddc47f45ec9373ca7302ec69431e14964c6960ac6f45103f0676a/userdata/oci-log --conmon-pidfile /run/nova_compute.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg a3bb16dbb05ddc47f45ec9373ca7302ec69431e14964c6960ac6f45103f0676a
           │ ├─edpm_ovn_controller.service
           │ │ └─130301 /usr/bin/conmon --api-version 1 -c f524c08bb2ae37ade38d0ec5ff5b0696715a77e37f29e059b5b993958b3ea20d -u f524c08bb2ae37ade38d0ec5ff5b0696715a77e37f29e059b5b993958b3ea20d -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/f524c08bb2ae37ade38d0ec5ff5b0696715a77e37f29e059b5b993958b3ea20d/userdata -p /run/containers/storage/overlay-containers/f524c08bb2ae37ade38d0ec5ff5b0696715a77e37f29e059b5b993958b3ea20d/userdata/pidfile -n ovn_controller --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/f524c08bb2ae37ade38d0ec5ff5b0696715a77e37f29e059b5b993958b3ea20d --full-attach -s -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/f524c08bb2ae37ade38d0ec5ff5b0696715a77e37f29e059b5b993958b3ea20d/userdata/oci-log --conmon-pidfile /run/ovn_controller.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg f524c08bb2ae37ade38d0ec5ff5b0696715a77e37f29e059b5b993958b3ea20d
           │ ├─edpm_ovn_metadata_agent.service
           │ │ └─139973 /usr/bin/conmon --api-version 1 -c d6ce9deaaecdb1627bbc6b1f3be04c03bb6975dd82af97ea8a1c73c9cca6abd1 -u d6ce9deaaecdb1627bbc6b1f3be04c03bb6975dd82af97ea8a1c73c9cca6abd1 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/d6ce9deaaecdb1627bbc6b1f3be04c03bb6975dd82af97ea8a1c73c9cca6abd1/userdata -p /run/containers/storage/overlay-containers/d6ce9deaaecdb1627bbc6b1f3be04c03bb6975dd82af97ea8a1c73c9cca6abd1/userdata/pidfile -n ovn_metadata_agent --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/d6ce9deaaecdb1627bbc6b1f3be04c03bb6975dd82af97ea8a1c73c9cca6abd1 --full-attach -s -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/d6ce9deaaecdb1627bbc6b1f3be04c03bb6975dd82af97ea8a1c73c9cca6abd1/userdata/oci-log --conmon-pidfile /run/ovn_metadata_agent.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg d6ce9deaaecdb1627bbc6b1f3be04c03bb6975dd82af97ea8a1c73c9cca6abd1
           │ ├─gssproxy.service
           │ │ └─872 /usr/sbin/gssproxy -D
           │ ├─irqbalance.service
           │ │ └─788 /usr/sbin/irqbalance
           │ ├─iscsid.service
           │ │ └─206933 /usr/sbin/iscsid -f
           │ ├─multipathd.service
           │ │ └─207091 /sbin/multipathd -d -s
           │ ├─ovs-vswitchd.service
           │ │ └─47250 ovs-vswitchd unix:/var/run/openvswitch/db.sock -vconsole:emer -vsyslog:err -vfile:info --mlockall --user openvswitch:hugetlbfs --no-chdir --log-file=/var/log/openvswitch/ovs-vswitchd.log --pidfile=/var/run/openvswitch/ovs-vswitchd.pid --detach
           │ ├─ovsdb-server.service
           │ │ └─47167 ovsdb-server /etc/openvswitch/conf.db -vconsole:emer -vsyslog:err -vfile:info --remote=punix:/var/run/openvswitch/db.sock --private-key=db:Open_vSwitch,SSL,private_key --certificate=db:Open_vSwitch,SSL,certificate --bootstrap-ca-cert=db:Open_vSwitch,SSL,ca_cert --user openvswitch:hugetlbfs --no-chdir --log-file=/var/log/openvswitch/ovsdb-server.log --pidfile=/var/run/openvswitch/ovsdb-server.pid --detach
           │ ├─polkit.service
           │ │ └─43431 /usr/lib/polkit-1/polkitd --no-debug
           │ ├─rpcbind.service
           │ │ └─701 /usr/bin/rpcbind -w -f
           │ ├─rsyslog.service
           │ │ └─1007 /usr/sbin/rsyslogd -n
           │ ├─sshd.service
           │ │ └─165764 "sshd: /usr/sbin/sshd -D [listener] 0 of 10-100 startups"
           │ ├─system-ceph\x2d281e9bde\x2d2795\x2d59f4\x2d98ac\x2d90cf5b49a2de.slice
           │ │ ├─ceph-281e9bde-2795-59f4-98ac-90cf5b49a2de@crash.compute-1.service
           │ │ │ ├─libpod-payload-8039fbf5b15063d013ad0b7da5995720757f418eb92b3d7e17b4ed9709486f7c
           │ │ │ │ ├─76982 /run/podman-init -- /usr/bin/ceph-crash -n client.crash.compute-1
           │ │ │ │ └─76984 /usr/bin/python3 -s /usr/bin/ceph-crash -n client.crash.compute-1
           │ │ │ └─runtime
           │ │ │   └─76980 /usr/bin/conmon --api-version 1 -c 8039fbf5b15063d013ad0b7da5995720757f418eb92b3d7e17b4ed9709486f7c -u 8039fbf5b15063d013ad0b7da5995720757f418eb92b3d7e17b4ed9709486f7c -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/8039fbf5b15063d013ad0b7da5995720757f418eb92b3d7e17b4ed9709486f7c/userdata -p /run/containers/storage/overlay-containers/8039fbf5b15063d013ad0b7da5995720757f418eb92b3d7e17b4ed9709486f7c/userdata/pidfile -n ceph-281e9bde-2795-59f4-98ac-90cf5b49a2de-crash-compute-1 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/8039fbf5b15063d013ad0b7da5995720757f418eb92b3d7e17b4ed9709486f7c --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/8039fbf5b15063d013ad0b7da5995720757f418eb92b3d7e17b4ed9709486f7c/userdata/oci-log --conmon-pidfile /run/ceph-281e9bde-2795-59f4-98ac-90cf5b49a2de@crash.compute-1.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 8039fbf5b15063d013ad0b7da5995720757f418eb92b3d7e17b4ed9709486f7c
           │ │ ├─ceph-281e9bde-2795-59f4-98ac-90cf5b49a2de@mds.cephfs.compute-1.taxacd.service
           │ │ │ ├─libpod-payload-3a68f1d14d85f6e31a6938b8bcbd914609af59268da0b55b5b26224c4e100211
           │ │ │ │ ├─84857 /run/podman-init -- /usr/bin/ceph-mds -n mds.cephfs.compute-1.taxacd -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
           │ │ │ │ └─84859 /usr/bin/ceph-mds -n mds.cephfs.compute-1.taxacd -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
           │ │ │ └─runtime
           │ │ │   └─84855 /usr/bin/conmon --api-version 1 -c 3a68f1d14d85f6e31a6938b8bcbd914609af59268da0b55b5b26224c4e100211 -u 3a68f1d14d85f6e31a6938b8bcbd914609af59268da0b55b5b26224c4e100211 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/3a68f1d14d85f6e31a6938b8bcbd914609af59268da0b55b5b26224c4e100211/userdata -p /run/containers/storage/overlay-containers/3a68f1d14d85f6e31a6938b8bcbd914609af59268da0b55b5b26224c4e100211/userdata/pidfile -n ceph-281e9bde-2795-59f4-98ac-90cf5b49a2de-mds-cephfs-compute-1-taxacd --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/3a68f1d14d85f6e31a6938b8bcbd914609af59268da0b55b5b26224c4e100211 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/3a68f1d14d85f6e31a6938b8bcbd914609af59268da0b55b5b26224c4e100211/userdata/oci-log --conmon-pidfile /run/ceph-281e9bde-2795-59f4-98ac-90cf5b49a2de@mds.cephfs.compute-1.taxacd.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 3a68f1d14d85f6e31a6938b8bcbd914609af59268da0b55b5b26224c4e100211
           │ │ ├─ceph-281e9bde-2795-59f4-98ac-90cf5b49a2de@mgr.compute-1.jqbgxp.service
           │ │ │ ├─libpod-payload-2e8b86fae08488082046be8e33ef9fc03ecfc0973404b56eb059b1b63639578d
           │ │ │ │ ├─81955 /run/podman-init -- /usr/bin/ceph-mgr -n mgr.compute-1.jqbgxp -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
           │ │ │ │ └─81957 /usr/bin/ceph-mgr -n mgr.compute-1.jqbgxp -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
           │ │ │ └─runtime
           │ │ │   └─81953 /usr/bin/conmon --api-version 1 -c 2e8b86fae08488082046be8e33ef9fc03ecfc0973404b56eb059b1b63639578d -u 2e8b86fae08488082046be8e33ef9fc03ecfc0973404b56eb059b1b63639578d -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/2e8b86fae08488082046be8e33ef9fc03ecfc0973404b56eb059b1b63639578d/userdata -p /run/containers/storage/overlay-containers/2e8b86fae08488082046be8e33ef9fc03ecfc0973404b56eb059b1b63639578d/userdata/pidfile -n ceph-281e9bde-2795-59f4-98ac-90cf5b49a2de-mgr-compute-1-jqbgxp --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/2e8b86fae08488082046be8e33ef9fc03ecfc0973404b56eb059b1b63639578d --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/2e8b86fae08488082046be8e33ef9fc03ecfc0973404b56eb059b1b63639578d/userdata/oci-log --conmon-pidfile /run/ceph-281e9bde-2795-59f4-98ac-90cf5b49a2de@mgr.compute-1.jqbgxp.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 2e8b86fae08488082046be8e33ef9fc03ecfc0973404b56eb059b1b63639578d
           │ │ ├─ceph-281e9bde-2795-59f4-98ac-90cf5b49a2de@mon.compute-1.service
           │ │ │ ├─libpod-payload-251b1a4718b2fd4c67af53f2ed308c3c23cf8f9f405ab5e74c4b1afc3d7c00dd
           │ │ │ │ ├─81594 /run/podman-init -- /usr/bin/ceph-mon -n mon.compute-1 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false
           │ │ │ │ └─81596 /usr/bin/ceph-mon -n mon.compute-1 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false
           │ │ │ └─runtime
           │ │ │   └─81592 /usr/bin/conmon --api-version 1 -c 251b1a4718b2fd4c67af53f2ed308c3c23cf8f9f405ab5e74c4b1afc3d7c00dd -u 251b1a4718b2fd4c67af53f2ed308c3c23cf8f9f405ab5e74c4b1afc3d7c00dd -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/251b1a4718b2fd4c67af53f2ed308c3c23cf8f9f405ab5e74c4b1afc3d7c00dd/userdata -p /run/containers/storage/overlay-containers/251b1a4718b2fd4c67af53f2ed308c3c23cf8f9f405ab5e74c4b1afc3d7c00dd/userdata/pidfile -n ceph-281e9bde-2795-59f4-98ac-90cf5b49a2de-mon-compute-1 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/251b1a4718b2fd4c67af53f2ed308c3c23cf8f9f405ab5e74c4b1afc3d7c00dd --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/251b1a4718b2fd4c67af53f2ed308c3c23cf8f9f405ab5e74c4b1afc3d7c00dd/userdata/oci-log --conmon-pidfile /run/ceph-281e9bde-2795-59f4-98ac-90cf5b49a2de@mon.compute-1.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 251b1a4718b2fd4c67af53f2ed308c3c23cf8f9f405ab5e74c4b1afc3d7c00dd
           │ │ ├─ceph-281e9bde-2795-59f4-98ac-90cf5b49a2de@osd.1.service
           │ │ │ ├─libpod-payload-6c1249c6f24e9c6538782f26c6c9ae2b2774052438800f108c52dd0b0dd395ef
           │ │ │ │ ├─78931 /run/podman-init -- /usr/bin/ceph-osd -n osd.1 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
           │ │ │ │ └─78933 /usr/bin/ceph-osd -n osd.1 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
           │ │ │ └─runtime
           │ │ │   └─78929 /usr/bin/conmon --api-version 1 -c 6c1249c6f24e9c6538782f26c6c9ae2b2774052438800f108c52dd0b0dd395ef -u 6c1249c6f24e9c6538782f26c6c9ae2b2774052438800f108c52dd0b0dd395ef -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/6c1249c6f24e9c6538782f26c6c9ae2b2774052438800f108c52dd0b0dd395ef/userdata -p /run/containers/storage/overlay-containers/6c1249c6f24e9c6538782f26c6c9ae2b2774052438800f108c52dd0b0dd395ef/userdata/pidfile -n ceph-281e9bde-2795-59f4-98ac-90cf5b49a2de-osd-1 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/6c1249c6f24e9c6538782f26c6c9ae2b2774052438800f108c52dd0b0dd395ef --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/6c1249c6f24e9c6538782f26c6c9ae2b2774052438800f108c52dd0b0dd395ef/userdata/oci-log --conmon-pidfile /run/ceph-281e9bde-2795-59f4-98ac-90cf5b49a2de@osd.1.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 6c1249c6f24e9c6538782f26c6c9ae2b2774052438800f108c52dd0b0dd395ef
           │ │ └─ceph-281e9bde-2795-59f4-98ac-90cf5b49a2de@rgw.rgw.compute-1.nigpsg.service
           │ │   ├─libpod-payload-39cc3372cbea0c2530037f484bf587c49489c87fa8290ae449ca851706b4c249
           │ │   │ ├─83598 /run/podman-init -- /usr/bin/radosgw -n client.rgw.rgw.compute-1.nigpsg -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
           │ │   │ └─83600 /usr/bin/radosgw -n client.rgw.rgw.compute-1.nigpsg -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
           │ │   └─runtime
           │ │     └─83596 /usr/bin/conmon --api-version 1 -c 39cc3372cbea0c2530037f484bf587c49489c87fa8290ae449ca851706b4c249 -u 39cc3372cbea0c2530037f484bf587c49489c87fa8290ae449ca851706b4c249 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/39cc3372cbea0c2530037f484bf587c49489c87fa8290ae449ca851706b4c249/userdata -p /run/containers/storage/overlay-containers/39cc3372cbea0c2530037f484bf587c49489c87fa8290ae449ca851706b4c249/userdata/pidfile -n ceph-281e9bde-2795-59f4-98ac-90cf5b49a2de-rgw-rgw-compute-1-nigpsg --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/39cc3372cbea0c2530037f484bf587c49489c87fa8290ae449ca851706b4c249 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/39cc3372cbea0c2530037f484bf587c49489c87fa8290ae449ca851706b4c249/userdata/oci-log --conmon-pidfile /run/ceph-281e9bde-2795-59f4-98ac-90cf5b49a2de@rgw.rgw.compute-1.nigpsg.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 39cc3372cbea0c2530037f484bf587c49489c87fa8290ae449ca851706b4c249
           │ ├─system-getty.slice
           │ │ └─getty@tty1.service
           │ │   └─1012 /sbin/agetty -o "-p -- \\u" --noclear - linux
           │ ├─system-serial\x2dgetty.slice
           │ │ └─serial-getty@ttyS0.service
           │ │   └─1013 /sbin/agetty -o "-p -- \\u" --keep-baud 115200,57600,38400,9600 - vt220
           │ ├─systemd-hostnamed.service
           │ │ └─237147 /usr/lib/systemd/systemd-hostnamed
           │ ├─systemd-journald.service
           │ │ └─680 /usr/lib/systemd/systemd-journald
           │ ├─systemd-logind.service
           │ │ └─806 /usr/lib/systemd/systemd-logind
           │ ├─systemd-machined.service
           │ │ └─190815 /usr/lib/systemd/systemd-machined
           │ ├─systemd-udevd.service
           │ │ └─udev
           │ │   └─731 /usr/lib/systemd/systemd-udevd
           │ ├─tuned.service
           │ │ └─92931 /usr/bin/python3 -Es /usr/sbin/tuned -l -P
           │ ├─virtlogd.service
           │ │ └─190186 /usr/sbin/virtlogd
           │ ├─virtnodedevd.service
           │ │ └─222034 /usr/sbin/virtnodedevd --timeout 120
           │ └─virtqemud.service
           │   └─221900 /usr/sbin/virtqemud --timeout 120
           └─user.slice
             ├─user-1000.slice
             │ ├─session-1.scope
             │ │ └─4519 /usr/bin/python3
             │ ├─session-52.scope
             │ │ ├─234639 "sshd-session: zuul [priv]"
             │ │ ├─234642 "sshd-session: zuul@notty"
             │ │ ├─234643 sudo bash -c "rm -rf /var/tmp/sos-osp && mkdir /var/tmp/sos-osp && sos report --batch --all-logs --tmp-dir=/var/tmp/sos-osp  -p container,openstack_edpm,system,storage,virt"
             │ │ ├─234667 /usr/bin/python3 -s /sbin/sos report --batch --all-logs --tmp-dir=/var/tmp/sos-osp -p container,openstack_edpm,system,storage,virt
             │ │ ├─240108 timeout 15s turbostat --debug sleep 10
             │ │ ├─240520 timeout 300s ceph fs dump --format json-pretty
             │ │ ├─240521 /usr/bin/python3 -s /usr/bin/ceph fs dump --format json-pretty
             │ │ ├─240522 timeout 300s semanage module -l
             │ │ ├─240523 /usr/bin/python3 -EsI /usr/sbin/semanage module -l
             │ │ ├─240545 timeout 300s systemctl status --all
             │ │ └─240546 systemctl status --all
             │ └─user@1000.service
             │   ├─app.slice
             │   │ └─dbus-broker.service
             │   │   ├─29571 /usr/bin/dbus-broker-launch --scope user
             │   │   └─29572 dbus-broker --log 4 --controller 9 --machine-id 85ac68c10a6e7ae08ceb898dbdca0cb5 --max-bytes 100000000000000 --max-fds 25000000000000 --max-matches 5000000000
             │   ├─init.scope
             │   │ ├─4307 /usr/lib/systemd/systemd --user
             │   │ └─4309 "(sd-pam)"
             │   └─user.slice
             │     └─podman-pause-444cdfba.scope
             │       └─29556 catatonit -P
             └─user-42477.slice
               ├─session-21.scope
               │ ├─72547 "sshd-session: ceph-admin [priv]"
               │ └─72571 "sshd-session: ceph-admin"
               ├─session-23.scope
               │ ├─72565 "sshd-session: ceph-admin [priv]"
               │ └─72572 "sshd-session: ceph-admin@notty"
               ├─session-24.scope
               │ ├─72623 "sshd-session: ceph-admin [priv]"
               │ └─72626 "sshd-session: ceph-admin@notty"
               ├─session-25.scope
               │ ├─72677 "sshd-session: ceph-admin [priv]"
               │ └─72680 "sshd-session: ceph-admin@notty"
               ├─session-26.scope
               │ ├─72731 "sshd-session: ceph-admin [priv]"
               │ └─72734 "sshd-session: ceph-admin@notty"
               ├─session-27.scope
               │ ├─72785 "sshd-session: ceph-admin [priv]"
               │ └─72788 "sshd-session: ceph-admin@notty"
               ├─session-28.scope
               │ ├─72839 "sshd-session: ceph-admin [priv]"
               │ └─72842 "sshd-session: ceph-admin@notty"
               ├─session-29.scope
               │ ├─72893 "sshd-session: ceph-admin [priv]"
               │ └─72896 "sshd-session: ceph-admin@notty"
               ├─session-30.scope
               │ ├─72947 "sshd-session: ceph-admin [priv]"
          Unit boot.automount could not be found.
     │ └─72950 "sshd-session: ceph-admin@notty"
               ├─session-31.scope
               │ ├─73001 "sshd-session: ceph-admin [priv]"
               │ └─73004 "sshd-session: ceph-admin@notty"
               ├─session-32.scope
               │ ├─73028 "sshd-session: ceph-admin [priv]"
               │ └─73031 "sshd-session: ceph-admin@notty"
               ├─session-33.scope
               │ ├─73082 "sshd-session: ceph-admin [priv]"
               │ └─73085 "sshd-session: ceph-admin@notty"
               └─user@42477.service
                 └─init.scope
                   ├─72551 /usr/lib/systemd/systemd --user
                   └─72553 "(sd-pam)"

● proc-sys-fs-binfmt_misc.automount - Arbitrary Executable File Formats File System Automount Point
     Loaded: loaded (/usr/lib/systemd/system/proc-sys-fs-binfmt_misc.automount; static)
     Active: active (running) since Tue 2026-01-27 07:47:06 UTC; 1h 35min ago
      Until: Tue 2026-01-27 07:47:06 UTC; 1h 35min ago
   Triggers: ● proc-sys-fs-binfmt_misc.mount
      Where: /proc/sys/fs/binfmt_misc
       Docs: https://docs.kernel.org/admin-guide/binfmt-misc.html
             https://www.freedesktop.org/wiki/Software/systemd/APIFileSystems

Jan 27 08:28:52 compute-1 systemd[1]: proc-sys-fs-binfmt_misc.automount: Got automount request for /proc/sys/fs/binfmt_misc, triggered by 73526 (sysctl)

● dev-cdrom.device - QEMU_DVD-ROM config-2
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:01.1-ata1-host0-target0:0:0-0:0:0:0-block-sr0.device
     Loaded: loaded
     Active: active (plugged) since Tue 2026-01-27 07:47:07 UTC; 1h 35min ago
      Until: Tue 2026-01-27 07:47:07 UTC; 1h 35min ago
     Device: /sys/devices/pci0000:00/0000:00:01.1/ata1/host0/target0:0:0/0:0:0:0/block/sr0

● dev-ceph_vg0-ceph_lv0.device - /dev/ceph_vg0/ceph_lv0
    Follows: unit currently follows state of sys-devices-virtual-block-dm\x2d0.device
     Loaded: loaded
     Active: active (plugged) since Tue 2026-01-27 08:26:34 UTC; 56min ago
      Until: Tue 2026-01-27 08:26:34 UTC; 56min ago
     Device: /sys/devices/virtual/block/dm-0

● dev-disk-by\x2ddiskseq-1.device - /dev/disk/by-diskseq/1
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:04.0-virtio2-block-vda.device
     Loaded: loaded
     Active: active (plugged) since Tue 2026-01-27 07:47:07 UTC; 1h 35min ago
      Until: Tue 2026-01-27 07:47:07 UTC; 1h 35min ago
     Device: /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda

● dev-disk-by\x2ddiskseq-3.device - QEMU_DVD-ROM config-2
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:01.1-ata1-host0-target0:0:0-0:0:0:0-block-sr0.device
     Loaded: loaded
     Active: active (plugged) since Tue 2026-01-27 07:47:07 UTC; 1h 35min ago
      Until: Tue 2026-01-27 07:47:07 UTC; 1h 35min ago
     Device: /sys/devices/pci0000:00/0000:00:01.1/ata1/host0/target0:0:0/0:0:0:0/block/sr0

● dev-disk-by\x2ddiskseq-5.device - /dev/disk/by-diskseq/5
    Follows: unit currently follows state of sys-devices-virtual-block-loop3.device
     Loaded: loaded
     Active: active (plugged) since Tue 2026-01-27 08:26:34 UTC; 56min ago
      Until: Tue 2026-01-27 08:26:34 UTC; 56min ago
     Device: /sys/devices/virtual/block/loop3

● dev-disk-by\x2ddiskseq-6.device - /dev/disk/by-diskseq/6
    Follows: unit currently follows state of sys-devices-virtual-block-dm\x2d0.device
     Loaded: loaded
     Active: active (plugged) since Tue 2026-01-27 08:26:34 UTC; 56min ago
      Until: Tue 2026-01-27 08:26:34 UTC; 56min ago
     Device: /sys/devices/virtual/block/dm-0

● dev-disk-by\x2did-ata\x2dQEMU_DVD\x2dROM_QM00001.device - QEMU_DVD-ROM config-2
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:01.1-ata1-host0-target0:0:0-0:0:0:0-block-sr0.device
     Loaded: loaded
     Active: active (plugged) since Tue 2026-01-27 07:47:07 UTC; 1h 35min ago
      Until: Tue 2026-01-27 07:47:07 UTC; 1h 35min ago
     Device: /sys/devices/pci0000:00/0000:00:01.1/ata1/host0/target0:0:0/0:0:0:0/block/sr0

● dev-disk-by\x2did-dm\x2dname\x2dceph_vg0\x2dceph_lv0.device - /dev/disk/by-id/dm-name-ceph_vg0-ceph_lv0
    Follows: unit currently follows state of sys-devices-virtual-block-dm\x2d0.device
     Loaded: loaded
     Active: active (plugged) since Tue 2026-01-27 08:26:34 UTC; 56min ago
      Until: Tue 2026-01-27 08:26:34 UTC; 56min ago
     Device: /sys/devices/virtual/block/dm-0

● dev-disk-by\x2did-dm\x2duuid\x2dLVM\x2dKJzEK2E93WxvnUIizExaSRHlRkzq9CcsDu2duBlKpOOkwgc7T7bK3LX6UyubFvre.device - /dev/disk/by-id/dm-uuid-LVM-KJzEK2E93WxvnUIizExaSRHlRkzq9CcsDu2duBlKpOOkwgc7T7bK3LX6UyubFvre
    Follows: unit currently follows state of sys-devices-virtual-block-dm\x2d0.device
     Loaded: loaded
     Active: active (plugged) since Tue 2026-01-27 08:26:34 UTC; 56min ago
      Until: Tue 2026-01-27 08:26:34 UTC; 56min ago
     Device: /sys/devices/virtual/block/dm-0

● dev-disk-by\x2did-lvm\x2dpv\x2duuid\x2dQocBgq\x2d0ak6\x2d60nI\x2d8FzY\x2dj7pI\x2dagHP\x2dzdx3c6.device - /dev/disk/by-id/lvm-pv-uuid-QocBgq-0ak6-60nI-8FzY-j7pI-agHP-zdx3c6
    Follows: unit currently follows state of sys-devices-virtual-block-loop3.device
     Loaded: loaded
     Active: active (plugged) since Tue 2026-01-27 08:26:34 UTC; 56min ago
      Until: Tue 2026-01-27 08:26:34 UTC; 56min ago
     Device: /sys/devices/virtual/block/loop3

● dev-disk-by\x2dlabel-config\x2d2.device - QEMU_DVD-ROM config-2
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:01.1-ata1-host0-target0:0:0-0:0:0:0-block-sr0.device
     Loaded: loaded
     Active: active (plugged) since Tue 2026-01-27 07:47:07 UTC; 1h 35min ago
      Until: Tue 2026-01-27 07:47:07 UTC; 1h 35min ago
     Device: /sys/devices/pci0000:00/0000:00:01.1/ata1/host0/target0:0:0/0:0:0:0/block/sr0

● dev-disk-by\x2dpartuuid-97d3b354\x2d01.device - /dev/disk/by-partuuid/97d3b354-01
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:04.0-virtio2-block-vda-vda1.device
     Loaded: loaded
     Active: active (plugged) since Tue 2026-01-27 07:47:07 UTC; 1h 35min ago
      Until: Tue 2026-01-27 07:47:07 UTC; 1h 35min ago
     Device: /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda/vda1

● dev-disk-by\x2dpath-pci\x2d0000:00:01.1\x2data\x2d1.0.device - QEMU_DVD-ROM config-2
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:01.1-ata1-host0-target0:0:0-0:0:0:0-block-sr0.device
     Loaded: loaded
     Active: active (plugged) since Tue 2026-01-27 07:47:07 UTC; 1h 35min ago
      Until: Tue 2026-01-27 07:47:07 UTC; 1h 35min ago
     Device: /sys/devices/pci0000:00/0000:00:01.1/ata1/host0/target0:0:0/0:0:0:0/block/sr0

● dev-disk-by\x2dpath-pci\x2d0000:00:01.1\x2data\x2d1.device - QEMU_DVD-ROM config-2
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:01.1-ata1-host0-target0:0:0-0:0:0:0-block-sr0.device
     Loaded: loaded
     Active: active (plugged) since Tue 2026-01-27 07:47:07 UTC; 1h 35min ago
      Until: Tue 2026-01-27 07:47:07 UTC; 1h 35min ago
     Device: /sys/devices/pci0000:00/0000:00:01.1/ata1/host0/target0:0:0/0:0:0:0/block/sr0

● dev-disk-by\x2dpath-pci\x2d0000:00:04.0.device - /dev/disk/by-path/pci-0000:00:04.0
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:04.0-virtio2-block-vda.device
     Loaded: loaded
     Active: active (plugged) since Tue 2026-01-27 07:47:07 UTC; 1h 35min ago
      Until: Tue 2026-01-27 07:47:07 UTC; 1h 35min ago
     Device: /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda

● dev-disk-by\x2dpath-pci\x2d0000:00:04.0\x2dpart1.device - /dev/disk/by-path/pci-0000:00:04.0-part1
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:04.0-virtio2-block-vda-vda1.device
     Loaded: loaded
     Active: active (plugged) since Tue 2026-01-27 07:47:07 UTC; 1h 35min ago
      Until: Tue 2026-01-27 07:47:07 UTC; 1h 35min ago
     Device: /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda/vda1

● dev-disk-by\x2dpath-virtio\x2dpci\x2d0000:00:04.0.device - /dev/disk/by-path/virtio-pci-0000:00:04.0
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:04.0-virtio2-block-vda.device
     Loaded: loaded
     Active: active (plugged) since Tue 2026-01-27 07:47:07 UTC; 1h 35min ago
      Until: Tue 2026-01-27 07:47:07 UTC; 1h 35min ago
     Device: /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda

● dev-disk-by\x2dpath-virtio\x2dpci\x2d0000:00:04.0\x2dpart1.device - /dev/disk/by-path/virtio-pci-0000:00:04.0-part1
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:04.0-virtio2-block-vda-vda1.device
     Loaded: loaded
     Active: active (plugged) since Tue 2026-01-27 07:47:07 UTC; 1h 35min ago
      Until: Tue 2026-01-27 07:47:07 UTC; 1h 35min ago
     Device: /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda/vda1

● dev-disk-by\x2duuid-2026\x2d01\x2d27\x2d07\x2d46\x2d48\x2d00.device - QEMU_DVD-ROM config-2
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:01.1-ata1-host0-target0:0:0-0:0:0:0-block-sr0.device
     Loaded: loaded
     Active: active (plugged) since Tue 2026-01-27 07:47:07 UTC; 1h 35min ago
      Until: Tue 2026-01-27 07:47:07 UTC; 1h 35min ago
     Device: /sys/devices/pci0000:00/0000:00:01.1/ata1/host0/target0:0:0/0:0:0:0/block/sr0

● dev-disk-by\x2duuid-22ac9141\x2d3960\x2d4912\x2db20e\x2d19fc8a328d40.device - /dev/disk/by-uuid/22ac9141-3960-4912-b20e-19fc8a328d40
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:04.0-virtio2-block-vda-vda1.device
     Loaded: loaded
     Active: active (plugged) since Tue 2026-01-27 07:47:07 UTC; 1h 35min ago
      Until: Tue 2026-01-27 07:47:07 UTC; 1h 35min ago
     Device: /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda/vda1

Jan 27 07:47:04 localhost systemd[1]: Found device /dev/disk/by-uuid/22ac9141-3960-4912-b20e-19fc8a328d40.

● dev-dm\x2d0.device - /dev/dm-0
    Follows: unit currently follows state of sys-devices-virtual-block-dm\x2d0.device
     Loaded: loaded
     Active: active (plugged) since Tue 2026-01-27 08:26:34 UTC; 56min ago
      Until: Tue 2026-01-27 08:26:34 UTC; 56min ago
     Device: /sys/devices/virtual/block/dm-0

● dev-loop3.device - /dev/loop3
    Follows: unit currently follows state of sys-devices-virtual-block-loop3.device
     Loaded: loaded
     Active: active (plugged) since Tue 2026-01-27 08:26:34 UTC; 56min ago
      Until: Tue 2026-01-27 08:26:34 UTC; 56min ago
     Device: /sys/devices/virtual/block/loop3

● dev-mapper-ceph_vg0\x2dceph_lv0.device - /dev/mapper/ceph_vg0-ceph_lv0
    Follows: unit currently follows state of sys-devices-virtual-block-dm\x2d0.device
     Loaded: loaded
     Active: active (plugged) since Tue 2026-01-27 08:26:34 UTC; 56min ago
      Until: Tue 2026-01-27 08:26:34 UTC; 56min ago
     Device: /sys/devices/virtual/block/dm-0

● dev-rfkill.device - /dev/rfkill
    Follows: unit currently follows state of sys-devices-virtual-misc-rfkill.device
     Loaded: loaded
     Active: active (plugged) since Tue 2026-01-27 07:47:09 UTC; 1h 35min ago
      Until: Tue 2026-01-27 07:47:09 UTC; 1h 35min ago
     Device: /sys/devices/virtual/misc/rfkill

● dev-sr0.device - QEMU_DVD-ROM config-2
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:01.1-ata1-host0-target0:0:0-0:0:0:0-block-sr0.device
     Loaded: loaded
     Active: active (plugged) since Tue 2026-01-27 07:47:07 UTC; 1h 35min ago
      Until: Tue 2026-01-27 07:47:07 UTC; 1h 35min ago
     Device: /sys/devices/pci0000:00/0000:00:01.1/ata1/host0/target0:0:0/0:0:0:0/block/sr0

● dev-ttyS0.device - /dev/ttyS0
    Follows: unit currently follows state of sys-devices-pnp0-00:00-tty-ttyS0.device
     Loaded: loaded
     Active: active (plugged) since Tue 2026-01-27 07:47:07 UTC; 1h 35min ago
      Until: Tue 2026-01-27 07:47:07 UTC; 1h 35min ago
     Device: /sys/devices/pnp0/00:00/tty/ttyS0

Jan 27 07:47:07 localhost systemd[1]: Condition check resulted in /dev/ttyS0 being skipped.

● dev-ttyS1.device - /dev/ttyS1
    Follows: unit currently follows state of sys-devices-platform-serial8250-tty-ttyS1.device
     Loaded: loaded
     Active: active (plugged) since Tue 2026-01-27 07:47:07 UTC; 1h 35min ago
      Until: Tue 2026-01-27 07:47:07 UTC; 1h 35min ago
     Device: /sys/devices/platform/serial8250/tty/ttyS1

● dev-ttyS2.device - /dev/ttyS2
    Follows: unit currently follows state of sys-devices-platform-serial8250-tty-ttyS2.device
     Loaded: loaded
     Active: active (plugged) since Tue 2026-01-27 07:47:07 UTC; 1h 35min ago
      Until: Tue 2026-01-27 07:47:07 UTC; 1h 35min ago
     Device: /sys/devices/platform/serial8250/tty/ttyS2

● dev-ttyS3.device - /dev/ttyS3
    Follows: unit currently follows state of sys-devices-platform-serial8250-tty-ttyS3.device
     Loaded: loaded
     Active: active (plugged) since Tue 2026-01-27 07:47:07 UTC; 1h 35min ago
      Until: Tue 2026-01-27 07:47:07 UTC; 1h 35min ago
     Device: /sys/devices/platform/serial8250/tty/ttyS3

● dev-vda.device - /dev/vda
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:04.0-virtio2-block-vda.device
     Loaded: loaded
     Active: active (plugged) since Tue 2026-01-27 07:47:07 UTC; 1h 35min ago
      Until: Tue 2026-01-27 07:47:07 UTC; 1h 35min ago
     Device: /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda

● dev-vda1.device - /dev/vda1
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:04.0-virtio2-block-vda-vda1.device
     Loaded: loaded
     Active: active (plugged) since Tue 2026-01-27 07:47:07 UTC; 1h 35min ago
      Until: Tue 2026-01-27 07:47:07 UTC; 1h 35min ago
     Device: /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda/vda1

● sys-devices-pci0000:00-0000:00:01.1-ata1-host0-target0:0:0-0:0:0:0-block-sr0.device - QEMU_DVD-ROM config-2
     Loaded: loaded
     Active: active (plugged) since Tue 2026-01-27 07:47:07 UTC; 1h 35min ago
      Until: Tue 2026-01-27 07:47:07 UTC; 1h 35min ago
     Device: /sys/devices/pci0000:00/0000:00:01.1/ata1/host0/target0:0:0/0:0:0:0/block/sr0

● sys-devices-pci0000:00-0000:00:03.0-virtio1-net-eth0.device - Virtio network device
     Loaded: loaded
     Active: active (plugged) since Tue 2026-01-27 07:47:07 UTC; 1h 35min ago
      Until: Tue 2026-01-27 07:47:07 UTC; 1h 35min ago
     Device: /sys/devices/pci0000:00/0000:00:03.0/virtio1/net/eth0

● sys-devices-pci0000:00-0000:00:04.0-virtio2-block-vda-vda1.device - /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda/vda1
     Loaded: loaded
     Active: active (plugged) since Tue 2026-01-27 07:47:07 UTC; 1h 35min ago
      Until: Tue 2026-01-27 07:47:07 UTC; 1h 35min ago
     Device: /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda/vda1

● sys-devices-pci0000:00-0000:00:04.0-virtio2-block-vda.device - /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda
     Loaded: loaded
     Active: active (plugged) since Tue 2026-01-27 07:47:07 UTC; 1h 35min ago
      Until: Tue 2026-01-27 07:47:07 UTC; 1h 35min ago
     Device: /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda

● sys-devices-pci0000:00-0000:00:07.0-virtio5-net-eth1.device - Virtio network device
     Loaded: loaded
     Active: active (plugged) since Tue 2026-01-27 07:49:48 UTC; 1h 33min ago
      Until: Tue 2026-01-27 07:49:48 UTC; 1h 33min ago
     Device: /sys/devices/pci0000:00/0000:00:07.0/virtio5/net/eth1

● sys-devices-platform-serial8250-tty-ttyS1.device - /sys/devices/platform/serial8250/tty/ttyS1
     Loaded: loaded
     Active: active (plugged) since Tue 2026-01-27 07:47:07 UTC; 1h 35min ago
      Until: Tue 2026-01-27 07:47:07 UTC; 1h 35min ago
     Device: /sys/devices/platform/serial8250/tty/ttyS1

● sys-devices-platform-serial8250-tty-ttyS2.device - /sys/devices/platform/serial8250/tty/ttyS2
     Loaded: loaded
     Active: active (plugged) since Tue 2026-01-27 07:47:07 UTC; 1h 35min ago
      Until: Tue 2026-01-27 07:47:07 UTC; 1h 35min ago
     Device: /sys/devices/platform/serial8250/tty/ttyS2

● sys-devices-platform-serial8250-tty-ttyS3.device - /sys/devices/platform/serial8250/tty/ttyS3
     Loaded: loaded
     Active: active (plugged) since Tue 2026-01-27 07:47:07 UTC; 1h 35min ago
      Until: Tue 2026-01-27 07:47:07 UTC; 1h 35min ago
     Device: /sys/devices/platform/serial8250/tty/ttyS3

● sys-devices-pnp0-00:00-tty-ttyS0.device - /sys/devices/pnp0/00:00/tty/ttyS0
     Loaded: loaded
     Active: active (plugged) since Tue 2026-01-27 07:47:07 UTC; 1h 35min ago
      Until: Tue 2026-01-27 07:47:07 UTC; 1h 35min ago
     Device: /sys/devices/pnp0/00:00/tty/ttyS0

● sys-devices-virtual-block-dm\x2d0.device - /sys/devices/virtual/block/dm-0
     Loaded: loaded
     Active: active (plugged) since Tue 2026-01-27 08:26:34 UTC; 56min ago
      Until: Tue 2026-01-27 08:26:34 UTC; 56min ago
     Device: /sys/devices/virtual/block/dm-0

● sys-devices-virtual-block-loop3.device - /sys/devices/virtual/block/loop3
     Loaded: loaded
     Active: active (plugged) since Tue 2026-01-27 08:26:34 UTC; 56min ago
      Until: Tue 2026-01-27 08:26:34 UTC; 56min ago
     Device: /sys/devices/virtual/block/loop3

● sys-devices-virtual-misc-rfkill.device - /sys/devices/virtual/misc/rfkill
     Loaded: loaded
     Active: active (plugged) since Tue 2026-01-27 07:47:09 UTC; 1h 35min ago
      Until: Tue 2026-01-27 07:47:09 UTC; 1h 35min ago
     Device: /sys/devices/virtual/misc/rfkill

● sys-devices-virtual-net-br\x2dex.device - /sys/devices/virtual/net/br-ex
     Loaded: loaded
     Active: active (plugged) since Tue 2026-01-27 08:23:09 UTC; 59min ago
      Until: Tue 2026-01-27 08:23:09 UTC; 59min ago
     Device: /sys/devices/virtual/net/br-ex

● sys-devices-virtual-net-br\x2dint.device - /sys/devices/virtual/net/br-int
     Loaded: loaded
     Active: active (plugged) since Tue 2026-01-27 08:39:38 UTC; 43min ago
      Until: Tue 2026-01-27 08:39:38 UTC; 43min ago
     Device: /sys/devices/virtual/net/br-int

● sys-devices-virtual-net-genev_sys_6081.device - /sys/devices/virtual/net/genev_sys_6081
     Loaded: loaded
     Active: active (plugged) since Tue 2026-01-27 08:39:38 UTC; 43min ago
      Until: Tue 2026-01-27 08:39:38 UTC; 43min ago
     Device: /sys/devices/virtual/net/genev_sys_6081

● sys-devices-virtual-net-ovs\x2dsystem.device - /sys/devices/virtual/net/ovs-system
     Loaded: loaded
     Active: active (plugged) since Tue 2026-01-27 08:23:09 UTC; 59min ago
      Until: Tue 2026-01-27 08:23:09 UTC; 59min ago
     Device: /sys/devices/virtual/net/ovs-system

● sys-devices-virtual-net-vlan20.device - /sys/devices/virtual/net/vlan20
     Loaded: loaded
     Active: active (plugged) since Tue 2026-01-27 08:23:09 UTC; 59min ago
      Until: Tue 2026-01-27 08:23:09 UTC; 59min ago
     Device: /sys/devices/virtual/net/vlan20

● sys-devices-virtual-net-vlan21.device - /sys/devices/virtual/net/vlan21
     Loaded: loaded
     Active: active (plugged) since Tue 2026-01-27 08:23:09 UTC; 59min ago
      Until: Tue 2026-01-27 08:23:09 UTC; 59min ago
     Device: /sys/devices/virtual/net/vlan21

● sys-devices-virtual-net-vlan22.device - /sys/devices/virtual/net/vlan22
     Loaded: loaded
     Active: active (plugged) since Tue 2026-01-27 08:23:09 UTC; 59min ago
      Until: Tue 2026-01-27 08:23:09 UTC; 59min ago
     Device: /sys/devices/virtual/net/vlan22

● sys-devices-virtual-net-vlan23.device - /sys/devices/virtual/net/vlan23
     Loaded: loaded
     Active: active (plugged) since Tue 2026-01-27 08:23:09 UTC; 59min ago
      Until: Tue 2026-01-27 08:23:09 UTC; 59min ago
     Device: /sys/devices/virtual/net/vlan23

● sys-module-configfs.device - /sys/module/configfs
     Loaded: loaded
     Active: active (plugged) since Tue 2026-01-27 07:47:07 UTC; 1h 35min ago
      Until: Tue 2026-01-27 07:47:07 UTC; 1h 35min ago
     Device: /sys/module/configfs

● sys-module-fuse.device - /sys/module/fuse
     Loaded: loaded
     Active: active (plugged) since Tue 2026-01-27 07:47:07 UTC; 1h 35min ago
      Until: Tue 2026-01-27 07:47:07 UTC; 1h 35min ago
     Device: /sys/module/fuse

● sys-subsystem-net-devices-br\x2dex.device - /sys/subsystem/net/devices/br-ex
     Loaded: loaded
     Active: active (plugged) since Tue 2026-01-27 08:23:Unit boot.mount could not be found.
09 UTC; 59min ago
      Until: Tue 2026-01-27 08:23:09 UTC; 59min ago
     Device: /sys/devices/virtual/net/br-ex

● sys-subsystem-net-devices-br\x2dint.device - /sys/subsystem/net/devices/br-int
     Loaded: loaded
     Active: active (plugged) since Tue 2026-01-27 08:39:38 UTC; 43min ago
      Until: Tue 2026-01-27 08:39:38 UTC; 43min ago
     Device: /sys/devices/virtual/net/br-int

● sys-subsystem-net-devices-eth0.device - Virtio network device
     Loaded: loaded
     Active: active (plugged) since Tue 2026-01-27 07:47:07 UTC; 1h 35min ago
      Until: Tue 2026-01-27 07:47:07 UTC; 1h 35min ago
     Device: /sys/devices/pci0000:00/0000:00:03.0/virtio1/net/eth0

● sys-subsystem-net-devices-eth1.device - Virtio network device
     Loaded: loaded
     Active: active (plugged) since Tue 2026-01-27 07:49:48 UTC; 1h 33min ago
      Until: Tue 2026-01-27 07:49:48 UTC; 1h 33min ago
     Device: /sys/devices/pci0000:00/0000:00:07.0/virtio5/net/eth1

● sys-subsystem-net-devices-genev_sys_6081.device - /sys/subsystem/net/devices/genev_sys_6081
     Loaded: loaded
     Active: active (plugged) since Tue 2026-01-27 08:39:38 UTC; 43min ago
      Until: Tue 2026-01-27 08:39:38 UTC; 43min ago
     Device: /sys/devices/virtual/net/genev_sys_6081

● sys-subsystem-net-devices-ovs\x2dsystem.device - /sys/subsystem/net/devices/ovs-system
     Loaded: loaded
     Active: active (plugged) since Tue 2026-01-27 08:23:09 UTC; 59min ago
      Until: Tue 2026-01-27 08:23:09 UTC; 59min ago
     Device: /sys/devices/virtual/net/ovs-system

● sys-subsystem-net-devices-vlan20.device - /sys/subsystem/net/devices/vlan20
     Loaded: loaded
     Active: active (plugged) since Tue 2026-01-27 08:23:09 UTC; 59min ago
      Until: Tue 2026-01-27 08:23:09 UTC; 59min ago
     Device: /sys/devices/virtual/net/vlan20

● sys-subsystem-net-devices-vlan21.device - /sys/subsystem/net/devices/vlan21
     Loaded: loaded
     Active: active (plugged) since Tue 2026-01-27 08:23:09 UTC; 59min ago
      Until: Tue 2026-01-27 08:23:09 UTC; 59min ago
     Device: /sys/devices/virtual/net/vlan21

● sys-subsystem-net-devices-vlan22.device - /sys/subsystem/net/devices/vlan22
     Loaded: loaded
     Active: active (plugged) since Tue 2026-01-27 08:23:09 UTC; 59min ago
      Until: Tue 2026-01-27 08:23:09 UTC; 59min ago
     Device: /sys/devices/virtual/net/vlan22

● sys-subsystem-net-devices-vlan23.device - /sys/subsystem/net/devices/vlan23
     Loaded: loaded
     Active: active (plugged) since Tue 2026-01-27 08:23:09 UTC; 59min ago
      Until: Tue 2026-01-27 08:23:09 UTC; 59min ago
     Device: /sys/devices/virtual/net/vlan23

● -.mount - Root Mount
     Loaded: loaded (/etc/fstab; generated)
     Active: active (mounted)
      Where: /
       What: /dev/vda1
       Docs: man:fstab(5)
             man:systemd-fstab-generator(8)

● dev-hugepages.mount - Huge Pages File System
     Loaded: loaded (/usr/lib/systemd/system/dev-hugepages.mount; static)
     Active: active (mounted) since Tue 2026-01-27 07:47:06 UTC; 1h 35min ago
      Until: Tue 2026-01-27 07:47:06 UTC; 1h 35min ago
      Where: /dev/hugepages
       What: hugetlbfs
       Docs: https://docs.kernel.org/admin-guide/mm/hugetlbpage.html
             https://www.freedesktop.org/wiki/Software/systemd/APIFileSystems
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48560)
     Memory: 52.0K (peak: 556.0K)
        CPU: 7ms
     CGroup: /dev-hugepages.mount

● dev-hugepages1G.mount - /dev/hugepages1G
     Loaded: loaded (/etc/fstab; generated)
     Active: active (mounted) since Tue 2026-01-27 08:25:31 UTC; 57min ago
      Until: Tue 2026-01-27 08:25:31 UTC; 57min ago
      Where: /dev/hugepages1G
       What: none
       Docs: man:fstab(5)
             man:systemd-fstab-generator(8)

● dev-hugepages2M.mount - /dev/hugepages2M
     Loaded: loaded (/etc/fstab; generated)
     Active: active (mounted) since Tue 2026-01-27 08:25:32 UTC; 57min ago
      Until: Tue 2026-01-27 08:25:32 UTC; 57min ago
      Where: /dev/hugepages2M
       What: none
       Docs: man:fstab(5)
             man:sUnit home.mount could not be found.
ystemd-fstab-generator(8)

● dev-mqueue.mount - POSIX Message Queue File System
     Loaded: loaded (/usr/lib/systemd/system/dev-mqueue.mount; static)
     Active: active (mounted) since Tue 2026-01-27 07:47:06 UTC; 1h 35min ago
      Until: Tue 2026-01-27 07:47:06 UTC; 1h 35min ago
      Where: /dev/mqueue
       What: mqueue
       Docs: man:mq_overview(7)
             https://www.freedesktop.org/wiki/Software/systemd/APIFileSystems
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48560)
     Memory: 4.0K (peak: 540.0K)
        CPU: 6ms
     CGroup: /dev-mqueue.mount

Jan 27 07:47:06 localhost systemd[1]: Mounted POSIX Message Queue File System.

○ proc-fs-nfsd.mount - NFSD configuration filesystem
     Loaded: loaded (/usr/lib/systemd/system/proc-fs-nfsd.mount; static)
     Active: inactive (dead)
      Where: /proc/fs/nfsd
       What: nfsd

● proc-sys-fs-binfmt_misc.mount - Arbitrary Executable File Formats File System
     Loaded: loaded (/usr/lib/systemd/system/proc-sys-fs-binfmt_misc.mount; disabled; preset: disabled)
     Active: active (mounted) since Tue 2026-01-27 08:28:52 UTC; 54min ago
      Until: Tue 2026-01-27 08:28:52 UTC; 54min ago
TriggeredBy: ● proc-sys-fs-binfmt_misc.automount
      Where: /proc/sys/fs/binfmt_misc
       What: binfmt_misc
       Docs: https://docs.kernel.org/admin-guide/binfmt-misc.html
             https://www.freedesktop.org/wiki/Software/systemd/APIFileSystems
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48560)
     Memory: 8.0K (peak: 540.0K)
        CPU: 8ms
     CGroup: /proc-sys-fs-binfmt_misc.mount

Jan 27 08:28:52 compute-1 systemd[1]: Mounting Arbitrary Executable File Formats File System...
Jan 27 08:28:52 compute-1 systemd[1]: Mounted Arbitrary Executable File Formats File System.

● run-credentials-systemd\x2dsysctl.service.mount - /run/credentials/systemd-sysctl.service
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Tue 2026-01-27 08:21:49 UTC; 1h 1min ago
      Until: Tue 2026-01-27 08:21:49 UTC; 1h 1min ago
      Where: /run/credentials/systemd-sysctl.service
       What: none

● run-credentials-systemd\x2dsysusers.service.mount - /run/credentials/systemd-sysusers.service
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Tue 2026-01-27 07:47:06 UTC; 1h 35min ago
      Until: Tue 2026-01-27 07:47:06 UTC; 1h 35min ago
      Where: /run/credentials/systemd-sysusers.service
       What: none

● run-credentials-systemd\x2dtmpfiles\x2dsetup.service.mount - /run/credentials/systemd-tmpfiles-setup.service
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Tue 2026-01-27 07:47:07 UTC; 1h 35min ago
      Until: Tue 2026-01-27 07:47:07 UTC; 1h 35min ago
      Where: /run/credentials/systemd-tmpfiles-setup.service
       What: none

● run-credentials-systemd\x2dtmpfiles\x2dsetup\x2ddev.service.mount - /run/credentials/systemd-tmpfiles-setup-dev.service
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Tue 2026-01-27 07:47:06 UTC; 1h 35min ago
      Until: Tue 2026-01-27 07:47:06 UTC; 1h 35min ago
      Where: /run/credentials/systemd-tmpfiles-setup-dev.service
       What: none

● run-netns.mount - /run/netns
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Tue 2026-01-27 08:24:53 UTC; 58min ago
      Until: Tue 2026-01-27 08:24:53 UTC; 58min ago
      Where: /run/netns
       What: tmpfs

● run-user-1000.mount - /run/user/1000
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Tue 2026-01-27 07:47:29 UTC; 1h 35min ago
      Until: Tue 2026-01-27 07:47:29 UTC; 1h 35min ago
      Where: /run/user/1000
       What: tmpfs

● run-user-42477.mount - /run/user/42477
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Tue 2026-01-27 08:28:46 UTC; 54min ago
      Until: Tue 2026-01-27 08:28:46 UTC; 54min ago
      Where: /run/user/42477
       What: tmpfs

● sys-fs-fuse-connections.mount - FUSE Control File System
     Loaded: loaded (/usr/lib/syUnit sysroot.mount could not be found.
stemd/system/sys-fs-fuse-connections.mount; static)
     Active: active (mounted) since Tue 2026-01-27 07:47:06 UTC; 1h 35min ago
      Until: Tue 2026-01-27 07:47:06 UTC; 1h 35min ago
      Where: /sys/fs/fuse/connections
       What: fusectl
       Docs: https://docs.kernel.org/filesystems/fuse.html
             https://www.freedesktop.org/wiki/Software/systemd/APIFileSystems
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48560)
     Memory: 4.0K (peak: 540.0K)
        CPU: 2ms
     CGroup: /sys-fs-fuse-connections.mount

Jan 27 07:47:06 localhost systemd[1]: Mounting FUSE Control File System...
Jan 27 07:47:06 localhost systemd[1]: Mounted FUSE Control File System.

● sys-kernel-config.mount - Kernel Configuration File System
     Loaded: loaded (/proc/self/mountinfo; static)
     Active: active (mounted) since Tue 2026-01-27 07:47:06 UTC; 1h 35min ago
      Until: Tue 2026-01-27 07:47:06 UTC; 1h 35min ago
      Where: /sys/kernel/config
       What: configfs
       Docs: https://docs.kernel.org/filesystems/configfs.html
             https://www.freedesktop.org/wiki/Software/systemd/APIFileSystems

● sys-kernel-debug-tracing.mount - /sys/kernel/debug/tracing
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Tue 2026-01-27 09:22:23 UTC; 35s ago
      Until: Tue 2026-01-27 09:22:23 UTC; 35s ago
      Where: /sys/kernel/debug/tracing
       What: tracefs

● sys-kernel-debug.mount - Kernel Debug File System
     Loaded: loaded (/usr/lib/systemd/system/sys-kernel-debug.mount; static)
     Active: active (mounted) since Tue 2026-01-27 07:47:06 UTC; 1h 35min ago
      Until: Tue 2026-01-27 07:47:06 UTC; 1h 35min ago
      Where: /sys/kernel/debug
       What: debugfs
       Docs: https://docs.kernel.org/filesystems/debugfs.html
             https://www.freedesktop.org/wiki/Software/systemd/APIFileSystems
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48560)
     Memory: 4.0K (peak: 540.0K)
        CPU: 5ms
     CGroup: /sys-kernel-debug.mount

Jan 27 07:47:06 localhost systemd[1]: Mounted Kernel Debug File System.

● sys-kernel-tracing.mount - Kernel Trace File System
     Loaded: loaded (/usr/lib/systemd/system/sys-kernel-tracing.mount; static)
     Active: active (mounted) since Tue 2026-01-27 07:47:06 UTC; 1h 35min ago
      Until: Tue 2026-01-27 07:47:06 UTC; 1h 35min ago
      Where: /sys/kernel/tracing
       What: tracefs
       Docs: https://docs.kernel.org/trace/ftrace.html
             https://www.freedesktop.org/wiki/Software/systemd/APIFileSystems
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48560)
     Memory: 4.0K (peak: 540.0K)
        CPU: 5ms
     CGroup: /sys-kernel-tracing.mount

Jan 27 07:47:06 localhost systemd[1]: Mounted Kernel Trace File System.

○ tmp.mount - Temporary Directory /tmp
     Loaded: loaded (/usr/lib/systemd/system/tmp.mount; disabled; preset: disabled)
     Active: inactive (dead)
      Where: /tmp
       What: tmpfs
       Docs: https://systemd.io/TEMPORARY_DIRECTORIES
             man:file-hierarchy(7)
             https://www.freedesktop.org/wiki/Software/systemd/APIFileSystems

● var-lib-containers-storage-overlay-1fb32fb448bbfd7a36a265a2a2e9fbd325bca290b9630cf18fae795e85ef2e8d-merged.mount - /var/lib/containers/storage/overlay/1fb32fb448bbfd7a36a265a2a2e9fbd325bca290b9630cf18fae795e85ef2e8d/merged
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Tue 2026-01-27 08:29:58 UTC; 52min ago
      Until: Tue 2026-01-27 08:29:58 UTC; 52min ago
      Where: /var/lib/containers/storage/overlay/1fb32fb448bbfd7a36a265a2a2e9fbd325bca290b9630cf18fae795e85ef2e8d/merged
       What: overlay

● var-lib-containers-storage-overlay-29bd55383cfc53c1739f6b426b77c29f20683f2718fc66b01113a68a408ac852-merged.mount - /var/lib/containers/storage/overlay/29bd55383cfc53c1739f6b426b77c29f20683f2718fc66b01113a68a408ac852/merged
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Tue 2026-01-27 08:40:52 UTC; 42min ago
      Until: Tue 2026-01-27 08:40:52 UTC; 42min ago
      Where: /var/lib/containers/storage/overlay/29bd55383cfc53c1739f6b426b77c29f20683f2718fc66b01113a68a408ac852/merged
       What: overlay

● var-lib-containers-storage-overlay-4c09b912a4a62f15319c2606b729a002d269ff157a88adea194bdeeaba70f56d-merged.mount - /var/lib/containers/storage/overlay/4c09b912a4a62f15319c2606b729a002d269ff157a88adea194bdeeaba70f56d/merged
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Tue 2026-01-27 08:29:26 UTC; 53min ago
      Until: Tue 2026-01-27 08:29:26 UTC; 53min ago
      Where: /var/lib/containers/storage/overlay/4c09b912a4a62f15319c2606b729a002d269ff157a88adea194bdeeaba70f56d/merged
       What: overlay

● var-lib-containers-storage-overlay-565f2df30fbbad6c80462c7285e9bd471fc7a22b5c2f458f97b1caa583a3144a-merged.mount - /var/lib/containers/storage/overlay/565f2df30fbbad6c80462c7285e9bd471fc7a22b5c2f458f97b1caa583a3144a/merged
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Tue 2026-01-27 08:29:15 UTC; 53min ago
      Until: Tue 2026-01-27 08:29:15 UTC; 53min ago
      Where: /var/lib/containers/storage/overlay/565f2df30fbbad6c80462c7285e9bd471fc7a22b5c2f458f97b1caa583a3144a/merged
       What: overlay

● var-lib-containers-storage-overlay-73c98e77dc1a172d283a0235d653aa8fe194a06c3ef0108c7147f14cc0cb3ed9-merged.mount - /var/lib/containers/storage/overlay/73c98e77dc1a172d283a0235d653aa8fe194a06c3ef0108c7147f14cc0cb3ed9/merged
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Tue 2026-01-27 08:39:37 UTC; 43min ago
      Until: Tue 2026-01-27 08:39:37 UTC; 43min ago
      Where: /var/lib/containers/storage/overlay/73c98e77dc1a172d283a0235d653aa8fe194a06c3ef0108c7147f14cc0cb3ed9/merged
       What: overlay

● var-lib-containers-storage-overlay-a0e89404f4e0c106337d51d6c081752ca156cc4046f8a0ea17e55665a10cc1d4-merged.mount - /var/lib/containers/storage/overlay/a0e89404f4e0c106337d51d6c081752ca156cc4046f8a0ea17e55665a10cc1d4/merged
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Tue 2026-01-27 08:30:50 UTC; 52min ago
      Until: Tue 2026-01-27 08:30:50 UTC; 52min ago
      Where: /var/lib/containers/storage/overlay/a0e89404f4e0c106337d51d6c081752ca156cc4046f8a0ea17e55665a10cc1d4/merged
       What: overlay

● var-lib-containers-storage-overlay-c04242aadde718793b6f05220a0001b8816d37a78eb3876c1644594f6bfce389-merged.mount - /var/lib/containers/storage/overlay/c04242aadde718793b6f05220a0001b8816d37a78eb3876c1644594f6bfce389/merged
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Tue 2026-01-27 08:31:26 UTC; 51min ago
      Until: Tue 2026-01-27 08:31:26 UTC; 51min ago
      Where: /var/lib/containers/storage/overlay/c04242aadde718793b6f05220a0001b8816d37a78eb3876c1644594f6bfce389/merged
       What: overlay

● var-lib-containers-storage-overlay-d672c7f89b4093a6697fec404b13ff2bebebdb344168fe3b70f2d9402fedfe1a-merged.mount - /var/lib/containers/storage/overlay/d672c7f89b4093a6697fec404b13ff2bebebdb344168fe3b70f2d9402fedfe1a/merged
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Tue 2026-01-27 08:48:54 UTC; 34min ago
      Until: Tue 2026-01-27 08:48:54 UTC; 34min ago
      Where: /var/lib/containers/storage/overlay/d672c7f89b4093a6697fec404b13ff2bebebdb344168fe3b70f2d9402fedfe1a/merged
       What: overlay

● var-lib-containers-storage-overlay-fbdcce181b9d2e41f089c1c79285b235c4c31ed9063c951cb595fd0e78af4313-merged.mount - /var/lib/containers/storage/overlay/fbdcce181b9d2e41f089c1c79285b235c4c31ed9063c951cb595fd0e78af4313/merged
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Tue 2026-01-27 08:30:09 UTC; 52min ago
      Until: Tue 2026-01-27 08:30:09 UTC; 52min ago
      Where: /var/lib/containers/storage/overlay/fbdcce181b9d2e41f089c1c79285b235c4c31ed9063c951cb595fd0e78af4313/merged
       What: overlay

● var-lib-containers-storage-overlay.mount - /var/lib/containers/storage/overlay
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Tue 2026-01-27 08:29:15 UTC; 53min ago
      Until: Tue 2026-01-27 08:29:15 UTC; 53min ago
      Where: /var/lib/containers/storage/overlay
       What: /dev/vda1

● var-lib-containers-storage-overlay\x2dcontainers-a3bb16dbb05ddc47f45ec9373ca7302ec69431e14964c6960ac6f45103f0676a-userdata-shm.mount - /var/lib/containers/storage/overlay-containers/a3bb16dbb05ddc47f45ec9373ca7302ec69431e14964c6960ac6f45103f0676a/userdata/shm
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Tue 2026-01-27 08:48:54 UTC; 34min ago
      Until: Tue 2026-01-27 08:48:54 UTC; 34min ago
      Where: /var/lib/containers/storage/overlay-containers/a3bb16dbb05ddc47f45ec9373ca7302ec69431e14964c6960ac6f45103f0676a/userdata/shm
       What: shm

● var-lib-containers-storage-overlay\x2dcontainers-d6ce9deaaecdb1627bbc6b1f3be04c03bb6975dd82af97ea8a1c73c9cca6abd1-userdata-shm.mount - /var/lib/containers/storage/overlay-containers/d6ce9deaaecdb1627bbc6b1f3be04c03bb6975dd82af97ea8a1c73c9cca6abd1/userdata/shm
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Tue 2026-01-27 08:40:52 UTC; 42min ago
      Until: Tue 2026-01-27 08:40:52 UTC; 42min ago
      Where: /var/lib/containers/storage/overlay-containers/d6ce9deaaecdb1627bbc6b1f3be04c03bb6975dd82af97ea8a1c73c9cca6abd1/userdata/shm
       What: shm

● var-lib-containers-storage-overlay\x2dcontainers-f524c08bb2ae37ade38d0ec5ff5b0696715a77e37f29e059b5b993958b3ea20d-userdata-shm.mount - /var/lib/containers/storage/overlay-containers/f524c08bb2ae37ade38d0ec5ff5b0696715a77e37f29e059b5b993958b3ea20d/userdata/shm
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Tue 2026-01-27 08:39:37 UTC; 43min ago
      Until: Tue 2026-01-27 08:39:37 UTC; 43min ago
      Where: /var/lib/containers/storage/overlay-containers/f524c08bb2ae37ade38d0ec5ff5b0696715a77e37f29e059b5b993958b3ea20d/userdata/shm
       What: shm

○ var-lib-machines.mount - Virtual Machine and Container Storage (Compatibility)
     Loaded: loaded (/usr/lib/systemd/system/var-lib-machines.mount; static)
     Active: inactive (dead)
  Condition: start condition failed at Tue 2026-01-27 08:45:16 UTC; 37min ago
      Where: /var/lib/machines
       What: /var/lib/machines.raw

Jan 27 08:45:16 compute-1 systemd[1]: Virtual Machine and Container Storage (Compatibility) was skipped because of an unmet condition check (ConditionPathExists=/var/lib/machines.raw).

● var-lib-nfs-rpc_pipefs.mount - RPC Pipe File System
     Loaded: loaded (/proc/self/mountinfo; static)
     Active: active (mounted) since Tue 2026-01-27 07:47:06 UTC; 1h 35min ago
      Until: Tue 2026-01-27 07:47:06 UTC; 1h 35min ago
      Where: /var/lib/nfs/rpc_pipefs
       What: rpc_pipefs

● systemd-ask-password-console.path - Dispatch Password Requests to Console Directory Watch
     Loaded: loaded (/usr/lib/systemd/system/systemd-ask-password-console.path; static)
     Active: active (waiting) since Tue 2026-01-27 07:47:06 UTC; 1h 35min ago
      Until: Tue 2026-01-27 07:47:06 UTC; 1h 35min ago
   Triggers: ● systemd-ask-password-console.service
       Docs: man:systemd-ask-password-console.path(8)

● systemd-ask-password-wall.path - Forward Password Requests to Wall Directory Watch
     Loaded: loaded (/usr/lib/systemd/system/systemd-ask-password-wall.path; static)
     Active: active (waiting) since Tue 2026-01-27 07:47:06 UTC; 1h 35min ago
      Until: Tue 2026-01-27 07:47:06 UTC; 1h 35min ago
   Triggers: ● systemd-ask-password-wall.service
       Docs: man:systemd-ask-password-wall.path(8)

● init.scope - System and Service Manager
     Loaded: loaded
  Transient: yes
     Active: active (running) since Tue 2026-01-27 07:47:02 UTC; 1h 35min ago
       Docs: man:systemd(1)
         IO: 752.0K read, 0B written
      Tasks: 1 (limit: 48560)
     Memory: 46.1M (peak: 64.5M)
        CPU: 52.686s
     CGroup: /init.scope
             └─1 /usr/lib/systemd/systemd --switched-root --system --deserialize 31

Jan 27 09:00:30 compute-1 systemd[1]: libpod-conmon-60b97cc0f104261d7b36c795f8b389097a5888722aeb2c4e6cfaeaadad972630.scope: Deactivated successfully.
Jan 27 09:00:30 compute-1 systemd[1]: Started libpod-conmon-fbdd64e299b5ee7c3cca531c13e389352e17631c395cb3dca19318db592fdedb.scope.
Jan 27 09:00:30 compute-1 systemd[1]: Started libcrun container.
Jan 27 09:00:31 compute-1 systemd[1]: libpod-fbdd64e299b5ee7c3cca531c13e389352e17631c395cb3dca19318db592fdedb.scope: Deactivated successfully.
Jan 27 09:00:31 compute-1 systemd[1]: libpod-fbdd64e299b5ee7c3cca531c13e389352e17631c395cb3dca19318db592fdedb.scope: Consumed 1.259s CPU time.
Jan 27 09:00:31 compute-1 systemd[1]: var-lib-containers-storage-overlay-db303d38e7db45d217c7cd02e5f50b21b9105fd4d1266d8bbd9ff38d65b4522b-merged.mount: Deactivated successfully.
Jan 27 09:00:31 compute-1 systemd[1]: libpod-conmon-fbdd64e299b5ee7c3cca531c13e389352e17631c395cb3dca19318db592fdedb.scope: Deactivated successfully.
Jan 27 09:22:16 compute-1 systemd[1]: Started Session 52 of User zuul.
Jan 27 09:22:37 compute-1 systemd[1]: Starting Hostname Service...
Jan 27 09:22:37 compute-1 systemd[1]: Started Hostname Service.

● libpod-a3bb16dbb05ddc47f45ec9373ca7302ec69431e14964c6960ac6f45103f0676a.scope - libcrun container
     Loaded: loaded (/run/systemd/transient/libpod-a3bb16dbb05ddc47f45ec9373ca7302ec69431e14964c6960ac6f45103f0676a.scope; transient)
  Transient: yes
     Active: active (running) since Tue 2026-01-27 08:48:54 UTC; 34min ago
         IO: 452.0K read, 40.0K written
      Tasks: 23 (limit: 4096)
     Memory: 137.7M (peak: 167.3M)
        CPU: 42.332s
     CGroup: /machine.slice/libpod-a3bb16dbb05ddc47f45ec9373ca7302ec69431e14964c6960ac6f45103f0676a.scope
             └─container
               ├─221468 dumb-init --single-child -- kolla_start
               └─221470 /usr/bin/python3 /usr/bin/nova-compute

Jan 27 08:48:54 compute-1 systemd[1]: Started libcrun container.

● libpod-d6ce9deaaecdb1627bbc6b1f3be04c03bb6975dd82af97ea8a1c73c9cca6abd1.scope - libcrun container
     Loaded: loaded (/run/systemd/transient/libpod-d6ce9deaaecdb1627bbc6b1f3be04c03bb6975dd82af97ea8a1c73c9cca6abd1.scope; transient)
  Transient: yes
    Drop-In: /run/systemd/transient/libpod-d6ce9deaaecdb1627bbc6b1f3be04c03bb6975dd82af97ea8a1c73c9cca6abd1.scope.d
             └─dep.conf
     Active: active (running) since Tue 2026-01-27 08:40:52 UTC; 42min ago
         IO: 1.4M read, 220.0K written
      Tasks: 5 (limit: 4096)
     Memory: 222.5M (peak: 223.3M)
        CPU: 16.673s
     CGroup: /machine.slice/libpod-d6ce9deaaecdb1627bbc6b1f3be04c03bb6975dd82af97ea8a1c73c9cca6abd1.scope
             └─container
               ├─139975 dumb-init --single-child -- kolla_start
               ├─139978 "neutron-ovn-metadata-agent (/usr/bin/python3 /usr/bin/neutron-ovn-metadata-agent)"
               ├─140319 "neutron-ovn-metadata-agent (/usr/bin/python3 /usr/bin/neutron-ovn-metadata-agent)"
               └─140418 /usr/bin/python3 /bin/privsep-helper --config-file /etc/neutron/neutron.conf --config-dir /etc/neutron.conf.d --privsep_context neutron.privileged.namespace_cmd --privsep_sock_path /tmp/tmpm4k7fsyh/privsep.sock

Jan 27 08:40:52 compute-1 systemd[1]: Started libcrun container.

● libpod-f524c08bb2ae37ade38d0ec5ff5b0696715a77e37f29e059b5b993958b3ea20d.scope - libcrun container
     Loaded: loaded (/run/systemd/transient/libpod-f524c08bb2ae37ade38d0ec5ff5b0696715a77e37f29e059b5b993958b3ea20d.scope; transient)
  Transient: yes
    Drop-In: /run/systemd/transient/libpod-f524c08bb2ae37ade38d0ec5ff5b0696715a77e37f29e059b5b993958b3ea20d.scope.d
             └─dep.conf
     Active: active (running) since Tue 2026-01-27 08:39:37 UTC; 43min ago
         IO: 396.0K read, 4.0K written
      Tasks: 6 (limit: 4096)
     Memory: 8.7M (peak: 11.2M)
        CPU: 7.622s
     CGroup: /machine.slice/libpod-f524c08bb2ae37ade38d0ec5ff5b0696715a77e37f29e059b5b993958b3ea20d.scope
             └─container
               ├─130303 dumb-init --single-child -- kolla_start
               └─130306 /usr/bin/ovn-controller --pidfile unix:/run/openvswitch/db.sock -p /etc/pki/tls/private/ovndb.key -c /etc/pki/tls/certs/ovndb.crt -C /etc/pki/tls/certs/ovndbca.crt

Jan 27 08:39:37 compute-1 systemd[1]: Started libcrun container.

● session-1.scope - Session 1 of User zuul
     Loaded: loaded (/run/systemd/transient/session-1.scope; transient)
  Transient: yes
     Active: active (abandoned) since Tue 2026-01-27 07:47:30 UTC; 1h 35min ago
         IO: 0B read, 0B written
      Tasks: 1
     Memory: 17.1M (peak: 38.8M)
        CPU: 1min 14.061s
     CGroup: /user.slice/user-1000.slice/session-1.scope
             └─4519 /usr/bin/python3

Jan 27 07:48:17 np0005597077.novalocal sudo[6864]: pam_unix(sudo:session): session closed for user root
Jan 27 07:48:18 np0005597077.novalocal python3[6894]: ansible-ansible.legacy.command Invoked with executable=/bin/bash _raw_params=env
                                                       _uses_shell=True zuul_log_id=fa163ef9-e89a-c639-f9a2-000000000020-1-compute1 zuul_ansible_split_streams=False warn=False stdin_add_newline=True strip_empty_ends=True argv=None chdir=None creates=None removes=None stdin=None
Jan 27 07:48:19 np0005597077.novalocal python3[6922]: ansible-file Invoked with path=/home/zuul/workspace state=directory recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Jan 27 07:48:38 np0005597077.novalocal sudo[6946]:     zuul : PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-shrtexeqvscssaewpsighiaffuarmfct ; /usr/bin/python3'
Jan 27 07:48:38 np0005597077.novalocal sudo[6946]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Jan 27 07:48:39 np0005597077.novalocal python3[6948]: ansible-ansible.builtin.file Invoked with path=/etc/ci/env state=directory mode=0755 recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Jan 27 07:48:39 np0005597077.novalocal sudo[6946]: pam_unix(sudo:session): session closed for user root
Jan 27 07:49:39 np0005597077.novalocal sshd-session[4316]: Received disconnect from 38.102.83.114 port 54002:11: disconnected by user
Jan 27 07:49:39 np0005597077.novalocal sshd-session[4316]: Disconnected from user zuul 38.102.83.114 port 54002
Jan 27 07:49:39 np0005597077.novalocal sshd-session[4303]: pam_unix(sshd:session): session closed for user zuul

● session-21.scope - Session 21 of User ceph-admin
     Loaded: loaded (/run/systemd/transient/session-21.scope; transient)
  Transient: yes
     Active: active (running) since Tue 2026-01-27 08:28:46 UTC; 54min ago
         IO: 0B read, 0B written
      Tasks: 2
     Memory: 1.1M (peak: 1.3M)
        CPU: 97ms
     CGroup: /user.slice/user-42477.slice/session-21.scope
             ├─72547 "sshd-session: ceph-admin [priv]"
             └─72571 "sshd-session: ceph-admin"

Jan 27 08:28:46 compute-1 systemd[1]: Started Session 21 of User ceph-admin.

● session-23.scope - Session 23 of User ceph-admin
     Loaded: loaded (/run/systemd/transient/session-23.scope; transient)
  Transient: yes
     Active: active (running) since Tue 2026-01-27 08:28:46 UTC; 54min ago
         IO: 0B read, 0B written
      Tasks: 2
     Memory: 1.2M (peak: 4.3M)
        CPU: 216ms
     CGroup: /user.slice/user-42477.slice/session-23.scope
             ├─72565 "sshd-session: ceph-admin [priv]"
             └─72572 "sshd-session: ceph-admin@notty"

Jan 27 08:28:46 compute-1 systemd[1]: Started Session 23 of User ceph-admin.
Jan 27 08:28:46 compute-1 sudo[72573]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Jan 27 08:28:46 compute-1 sudo[72573]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Jan 27 08:28:46 compute-1 sudo[72573]: pam_unix(sudo:session): session closed for user root
Jan 27 08:28:46 compute-1 sudo[72598]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Jan 27 08:28:46 compute-1 sudo[72598]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Jan 27 08:28:46 compute-1 sudo[72598]: pam_unix(sudo:session): session closed for user root

● session-24.scope - Session 24 of User ceph-admin
     Loaded: loaded (/run/systemd/transient/session-24.scope; transient)
  Transient: yes
     Active: active (running) since Tue 2026-01-27 08:28:46 UTC; 54min ago
         IO: 0B read, 0B written
      Tasks: 2
     Memory: 1.1M (peak: 5.9M)
        CPU: 214ms
     CGroup: /user.slice/user-42477.slice/session-24.scope
             ├─72623 "sshd-session: ceph-admin [priv]"
             └─72626 "sshd-session: ceph-admin@notty"

Jan 27 08:28:46 compute-1 systemd[1]: Started Session 24 of User ceph-admin.
Jan 27 08:28:46 compute-1 sudo[72627]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Jan 27 08:28:46 compute-1 sudo[72627]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Jan 27 08:28:46 compute-1 sudo[72627]: pam_unix(sudo:session): session closed for user root
Jan 27 08:28:46 compute-1 sudo[72652]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/281e9bde-2795-59f4-98ac-90cf5b49a2de/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --timeout 895 check-host --expect-hostname compute-1
Jan 27 08:28:47 compute-1 sudo[72652]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Jan 27 08:28:47 compute-1 sudo[72652]: pam_unix(sudo:session): session closed for user root

● session-25.scope - Session 25 of User ceph-admin
     Loaded: loaded (/run/systemd/transient/session-25.scope; transient)
  Transient: yes
     Active: active (running) since Tue 2026-01-27 08:28:47 UTC; 54min ago
         IO: 0B read, 0B written
      Tasks: 2
     Memory: 1.1M (peak: 4.1M)
        CPU: 190ms
     CGroup: /user.slice/user-42477.slice/session-25.scope
             ├─72677 "sshd-session: ceph-admin [priv]"
             └─72680 "sshd-session: ceph-admin@notty"

Jan 27 08:28:47 compute-1 systemd[1]: Started Session 25 of User ceph-admin.
Jan 27 08:28:47 compute-1 sudo[72681]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Jan 27 08:28:47 compute-1 sudo[72681]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Jan 27 08:28:47 compute-1 sudo[72681]: pam_unix(sudo:session): session closed for user root
Jan 27 08:28:47 compute-1 sudo[72706]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/ls /var/lib/ceph/281e9bde-2795-59f4-98ac-90cf5b49a2de/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d
Jan 27 08:28:47 compute-1 sudo[72706]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Jan 27 08:28:47 compute-1 sudo[72706]: pam_unix(sudo:session): session closed for user root

● session-26.scope - Session 26 of User ceph-admin
     Loaded: loaded (/run/systemd/transient/session-26.scope; transient)
  Transient: yes
     Active: active (running) since Tue 2026-01-27 08:28:47 UTC; 54min ago
         IO: 0B read, 0B written
      Tasks: 2
     Memory: 1.1M (peak: 4.3M)
        CPU: 237ms
     CGroup: /user.slice/user-42477.slice/session-26.scope
             ├─72731 "sshd-session: ceph-admin [priv]"
             └─72734 "sshd-session: ceph-admin@notty"

Jan 27 08:28:47 compute-1 systemd[1]: Started Session 26 of User ceph-admin.
Jan 27 08:28:47 compute-1 sudo[72735]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Jan 27 08:28:47 compute-1 sudo[72735]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Jan 27 08:28:47 compute-1 sudo[72735]: pam_unix(sudo:session): session closed for user root
Jan 27 08:28:47 compute-1 sudo[72760]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/mkdir -p /var/lib/ceph/281e9bde-2795-59f4-98ac-90cf5b49a2de
Jan 27 08:28:47 compute-1 sudo[72760]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Jan 27 08:28:47 compute-1 sudo[72760]: pam_unix(sudo:session): session closed for user root

● session-27.scope - Session 27 of User ceph-admin
     Loaded: loaded (/run/systemd/transient/session-27.scope; transient)
  Transient: yes
     Active: active (running) since Tue 2026-01-27 08:28:48 UTC; 54min ago
         IO: 0B read, 0B written
      Tasks: 2
     Memory: 1.2M (peak: 4.3M)
        CPU: 204ms
     CGroup: /user.slice/user-42477.slice/session-27.scope
             ├─72785 "sshd-session: ceph-admin [priv]"
             └─72788 "sshd-session: ceph-admin@notty"

Jan 27 08:28:48 compute-1 systemd[1]: Started Session 27 of User ceph-admin.
Jan 27 08:28:48 compute-1 sudo[72789]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Jan 27 08:28:48 compute-1 sudo[72789]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Jan 27 08:28:48 compute-1 sudo[72789]: pam_unix(sudo:session): session closed for user root
Jan 27 08:28:48 compute-1 sudo[72814]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/mkdir -p /tmp/cephadm-281e9bde-2795-59f4-98ac-90cf5b49a2de/var/lib/ceph/281e9bde-2795-59f4-98ac-90cf5b49a2de
Jan 27 08:28:48 compute-1 sudo[72814]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Jan 27 08:28:48 compute-1 sudo[72814]: pam_unix(sudo:session): session closed for user root

● session-28.scope - Session 28 of User ceph-admin
     Loaded: loaded (/run/systemd/transient/session-28.scope; transient)
  Transient: yes
     Active: active (running) since Tue 2026-01-27 08:28:48 UTC; 54min ago
         IO: 0B read, 0B written
      Tasks: 2
     Memory: 1.1M (peak: 4.4M)
        CPU: 190ms
     CGroup: /user.slice/user-42477.slice/session-28.scope
             ├─72839 "sshd-session: ceph-admin [priv]"
             └─72842 "sshd-session: ceph-admin@notty"

Jan 27 08:28:48 compute-1 systemd[1]: Started Session 28 of User ceph-admin.
Jan 27 08:28:48 compute-1 sudo[72843]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Jan 27 08:28:48 compute-1 sudo[72843]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Jan 27 08:28:48 compute-1 sudo[72843]: pam_unix(sudo:session): session closed for user root
Jan 27 08:28:48 compute-1 sudo[72868]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/touch /tmp/cephadm-281e9bde-2795-59f4-98ac-90cf5b49a2de/var/lib/ceph/281e9bde-2795-59f4-98ac-90cf5b49a2de/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d.new
Jan 27 08:28:48 compute-1 sudo[72868]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Jan 27 08:28:48 compute-1 sudo[72868]: pam_unix(sudo:session): session closed for user root

● session-29.scope - Session 29 of User ceph-admin
     Loaded: loaded (/run/systemd/transient/session-29.scope; transient)
  Transient: yes
     Active: active (running) since Tue 2026-01-27 08:28:48 UTC; 54min ago
         IO: 0B read, 0B written
      Tasks: 2
     Memory: 1.1M (peak: 4.3M)
        CPU: 179ms
     CGroup: /user.slice/user-42477.slice/session-29.scope
             ├─72893 "sshd-session: ceph-admin [priv]"
             └─72896 "sshd-session: ceph-admin@notty"

Jan 27 08:28:48 compute-1 systemd[1]: Started Session 29 of User ceph-admin.
Jan 27 08:28:48 compute-1 sudo[72897]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Jan 27 08:28:48 compute-1 sudo[72897]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Jan 27 08:28:48 compute-1 sudo[72897]: pam_unix(sudo:session): session closed for user root
Jan 27 08:28:48 compute-1 sudo[72922]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/chown -R ceph-admin /tmp/cephadm-281e9bde-2795-59f4-98ac-90cf5b49a2de
Jan 27 08:28:48 compute-1 sudo[72922]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Jan 27 08:28:48 compute-1 sudo[72922]: pam_unix(sudo:session): session closed for user root

● session-30.scope - Session 30 of User ceph-admin
     Loaded: loaded (/run/systemd/transient/session-30.scope; transient)
  Transient: yes
     Active: active (running) since Tue 2026-01-27 08:28:49 UTC; 54min ago
         IO: 0B read, 0B written
      Tasks: 2
     Memory: 1.2M (peak: 4.7M)
        CPU: 213ms
     CGroup: /user.slice/user-42477.slice/session-30.scope
             ├─72947 "sshd-session: ceph-admin [priv]"
             └─72950 "sshd-session: ceph-admin@notty"

Jan 27 08:28:49 compute-1 systemd[1]: Started Session 30 of User ceph-admin.
Jan 27 08:28:49 compute-1 sudo[72951]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Jan 27 08:28:49 compute-1 sudo[72951]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Jan 27 08:28:49 compute-1 sudo[72951]: pam_unix(sudo:session): session closed for user root
Jan 27 08:28:49 compute-1 sudo[72976]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/chmod 644 /tmp/cephadm-281e9bde-2795-59f4-98ac-90cf5b49a2de/var/lib/ceph/281e9bde-2795-59f4-98ac-90cf5b49a2de/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d.new
Jan 27 08:28:49 compute-1 sudo[72976]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Jan 27 08:28:49 compute-1 sudo[72976]: pam_unix(sudo:session): session closed for user root

● session-31.scope - Session 31 of User ceph-admin
     Loaded: loaded (/run/systemd/transient/session-31.scope; transient)
  Transient: yes
     Active: active (running) since Tue 2026-01-27 08:28:49 UTC; 54min ago
         IO: 0B read, 216.0K written
      Tasks: 2
     Memory: 1.4M (peak: 3.5M)
        CPU: 135ms
     CGroup: /user.slice/user-42477.slice/session-31.scope
             ├─73001 "sshd-session: ceph-admin [priv]"
             └─73004 "sshd-session: ceph-admin@notty"

Jan 27 08:28:49 compute-1 systemd[1]: Started Session 31 of User ceph-admin.

● session-32.scope - Session 32 of User ceph-admin
     Loaded: loaded (/run/systemd/transient/session-32.scope; transient)
  Transient: yes
     Active: active (running) since Tue 2026-01-27 08:28:50 UTC; 54min ago
         IO: 0B read, 0B written
      Tasks: 2
     Memory: 1.1M (peak: 4.6M)
        CPU: 216ms
     CGroup: /user.slice/user-42477.slice/session-32.scope
             ├─73028 "sshd-session: ceph-admin [priv]"
             └─73031 "sshd-session: ceph-admin@notty"

Jan 27 08:28:50 compute-1 systemd[1]: Started Session 32 of User ceph-admin.
Jan 27 08:28:50 compute-1 sudo[73032]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Jan 27 08:28:50 compute-1 sudo[73032]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Jan 27 08:28:50 compute-1 sudo[73032]: pam_unix(sudo:session): session closed for user root
Jan 27 08:28:50 compute-1 sudo[73057]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/mv /tmp/cephadm-281e9bde-2795-59f4-98ac-90cf5b49a2de/var/lib/ceph/281e9bde-2795-59f4-98ac-90cf5b49a2de/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d.new /var/lib/ceph/281e9bde-2795-59f4-98ac-90cf5b49a2de/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d
Jan 27 08:28:50 compute-1 sudo[73057]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Jan 27 08:28:50 compute-1 sudo[73057]: pam_unix(sudo:session): session closed for user root

● session-33.scope - Session 33 of User ceph-admin
     Loaded: loaded (/run/systemd/transient/session-33.scope; transient)
  Transient: yes
     Active: active (running) since Tue 2026-01-27 08:28:50 UTC; 54min ago
         IO: 184.0K read, 1.4G written
      Tasks: 2
     Memory: 1.2G (peak: 1.7G)
        CPU: 1min 53.593s
     CGroup: /user.slice/user-42477.slice/session-33.scope
             ├─7308Unit apparmor.service could not be found.
Unit apt-daily.service could not be found.
2 "sshd-session: ceph-admin [priv]"
             └─73085 "sshd-session: ceph-admin@notty"

Jan 27 09:22:08 compute-1 sudo[234558]: pam_unix(sudo:session): session closed for user root
Jan 27 09:22:08 compute-1 sudo[234583]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/281e9bde-2795-59f4-98ac-90cf5b49a2de/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --timeout 895 gather-facts
Jan 27 09:22:08 compute-1 sudo[234583]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Jan 27 09:22:09 compute-1 sudo[234583]: pam_unix(sudo:session): session closed for user root
Jan 27 09:22:17 compute-1 sudo[234710]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Jan 27 09:22:17 compute-1 sudo[234710]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Jan 27 09:22:17 compute-1 sudo[234710]: pam_unix(sudo:session): session closed for user root
Jan 27 09:22:17 compute-1 sudo[234745]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/ls /etc/sysctl.d
Jan 27 09:22:17 compute-1 sudo[234745]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Jan 27 09:22:17 compute-1 sudo[234745]: pam_unix(sudo:session): session closed for user root

● session-52.scope - Session 52 of User zuul
     Loaded: loaded (/run/systemd/transient/session-52.scope; transient)
  Transient: yes
     Active: active (running) since Tue 2026-01-27 09:22:16 UTC; 42s ago
         IO: 272.2M read, 190.7M written
      Tasks: 18
     Memory: 618.1M (peak: 682.3M)
        CPU: 1min 49.139s
     CGroup: /user.slice/user-1000.slice/session-52.scope
             ├─234639 "sshd-session: zuul [priv]"
             ├─234642 "sshd-session: zuul@notty"
             ├─234643 sudo bash -c "rm -rf /var/tmp/sos-osp && mkdir /var/tmp/sos-osp && sos report --batch --all-logs --tmp-dir=/var/tmp/sos-osp  -p container,openstack_edpm,system,storage,virt"
             ├─234667 /usr/bin/python3 -s /sbin/sos report --batch --all-logs --tmp-dir=/var/tmp/sos-osp -p container,openstack_edpm,system,storage,virt
             ├─240108 timeout 15s turbostat --debug sleep 10
             ├─240545 timeout 300s systemctl status --all
             ├─240546 systemctl status --all
             ├─240557 timeout 300s ceph fs ls --format json-pretty
             └─240558 /usr/bin/python3 -s /usr/bin/ceph fs ls --format json-pretty

Jan 27 09:22:16 compute-1 systemd[1]: Started Session 52 of User zuul.
Jan 27 09:22:16 compute-1 sudo[234643]:     zuul : PWD=/home/zuul ; USER=root ; COMMAND=/bin/bash -c 'rm -rf /var/tmp/sos-osp && mkdir /var/tmp/sos-osp && sos report --batch --all-logs --tmp-dir=/var/tmp/sos-osp  -p container,openstack_edpm,system,storage,virt'
Jan 27 09:22:16 compute-1 sudo[234643]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Jan 27 09:22:24 compute-1 ovs-vsctl[235017]: ovs|00001|db_ctl_base|ERR|no key "dpdk-init" in Open_vSwitch record "." column other_config
Jan 27 09:22:48 compute-1 ovs-appctl[238966]: ovs|00001|daemon_unix|WARN|/var/run/openvswitch/ovs-monitor-ipsec.pid: open: No such file or directory
Jan 27 09:22:48 compute-1 ovs-appctl[238971]: ovs|00001|daemon_unix|WARN|/var/run/openvswitch/ovs-monitor-ipsec.pid: open: No such file or directory
Jan 27 09:22:48 compute-1 ovs-appctl[238974]: ovs|00001|daemon_unix|WARN|/var/run/openvswitch/ovs-monitor-ipsec.pid: open: No such file or directory

● auditd.service - Security Auditing Service
     Loaded: loaded (/usr/lib/systemd/system/auditd.service; enabled; preset: enabled)
     Active: active (running) since Tue 2026-01-27 07:47:06 UTC; 1h 35min ago
       Docs: man:auditd(8)
             https://github.com/linux-audit/audit-documentation
   Main PID: 703 (auditd)
         IO: 0B read, 20.8M written
      Tasks: 4 (limit: 48560)
     Memory: 14.1M (peak: 15.3M)
        CPU: 4.756s
     CGroup: /system.slice/auditd.service
             ├─703 /sbin/auditd
             └─705Unit auto-cpufreq.service could not be found.
Unit autofs.service could not be found.
 /usr/sbin/sedispatch

Jan 27 07:47:06 localhost augenrules[723]: failure 1
Jan 27 07:47:06 localhost augenrules[723]: pid 703
Jan 27 07:47:06 localhost augenrules[723]: rate_limit 0
Jan 27 07:47:06 localhost augenrules[723]: backlog_limit 8192
Jan 27 07:47:06 localhost augenrules[723]: lost 0
Jan 27 07:47:06 localhost augenrules[723]: backlog 0
Jan 27 07:47:06 localhost augenrules[723]: backlog_wait_time 60000
Jan 27 07:47:06 localhost augenrules[723]: backlog_wait_time_actual 0
Jan 27 07:47:06 localhost systemd[1]: Started Security Auditing Service.
Jan 27 08:45:32 compute-1 auditd[703]: Audit daemon rotating log files

○ auth-rpcgss-module.service - Kernel Module supporting RPCSEC_GSS
     Loaded: loaded (/usr/lib/systemd/system/auth-rpcgss-module.service; static)
     Active: inactive (dead)
  Condition: start condition failed at Tue 2026-01-27 07:47:06 UTC; 1h 35min ago

○ blk-availability.service - Availability of block devices
     Loaded: loaded (/usr/lib/systemd/system/blk-availability.service; disabled; preset: disabled)
     Active: inactive (dead)

● ceph-281e9bde-2795-59f4-98ac-90cf5b49a2de@crash.compute-1.service - Ceph crash.compute-1 for 281e9bde-2795-59f4-98ac-90cf5b49a2de
     Loaded: loaded (/etc/systemd/system/ceph-281e9bde-2795-59f4-98ac-90cf5b49a2de@.service; enabled; preset: disabled)
     Active: active (running) since Tue 2026-01-27 08:29:14 UTC; 53min ago
   Main PID: 76980 (conmon)
         IO: 8.0K read, 1.6M written
      Tasks: 3 (limit: 48560)
     Memory: 13.0M (peak: 34.0M)
        CPU: 1.866s
     CGroup: /system.slice/system-ceph\x2d281e9bde\x2d2795\x2d59f4\x2d98ac\x2d90cf5b49a2de.slice/ceph-281e9bde-2795-59f4-98ac-90cf5b49a2de@crash.compute-1.service
             ├─libpod-payload-8039fbf5b15063d013ad0b7da5995720757f418eb92b3d7e17b4ed9709486f7c
             │ ├─76982 /run/podman-init -- /usr/bin/ceph-crash -n client.crash.compute-1
             │ └─76984 /usr/bin/python3 -s /usr/bin/ceph-crash -n client.crash.compute-1
             └─runtime
               └─76980 /usr/bin/conmon --api-version 1 -c 8039fbf5b15063d013ad0b7da5995720757f418eb92b3d7e17b4ed9709486f7c -u 8039fbf5b15063d013ad0b7da5995720757f418eb92b3d7e17b4ed9709486f7c -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/8039fbf5b15063d013ad0b7da5995720757f418eb92b3d7e17b4ed9709486f7c/userdata -p /run/containers/storage/overlay-containers/8039fbf5b15063d013ad0b7da5995720757f418eb92b3d7e17b4ed9709486f7c/userdata/pidfile -n ceph-281e9bde-2795-59f4-98ac-90cf5b49a2de-crash-compute-1 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/8039fbf5b15063d013ad0b7da5995720757f418eb92b3d7e17b4ed9709486f7c --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/8039fbf5b15063d013ad0b7da5995720757f418eb92b3d7e17b4ed9709486f7c/userdata/oci-log --conmon-pidfile /run/ceph-281e9bde-2795-59f4-98ac-90cf5b49a2de@crash.compute-1.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 8039fbf5b15063d013ad0b7da5995720757f418eb92b3d7e17b4ed9709486f7c

Jan 27 08:29:14 compute-1 systemd[1]: Started Ceph crash.compute-1 for 281e9bde-2795-59f4-98ac-90cf5b49a2de.
Jan 27 08:29:15 compute-1 ceph-281e9bde-2795-59f4-98ac-90cf5b49a2de-crash-compute-1[76980]: INFO:ceph-crash:pinging cluster to exercise our key
Jan 27 08:29:15 compute-1 ceph-281e9bde-2795-59f4-98ac-90cf5b49a2de-crash-compute-1[76980]: 2026-01-27T08:29:15.380+0000 7fce646a3640 -1 auth: unable to find a keyring on /etc/ceph/ceph.client.admin.keyring,/etc/ceph/ceph.keyring,/etc/ceph/keyring,/etc/ceph/keyring.bin: (2) No such file or directory
Jan 27 08:29:15 compute-1 ceph-281e9bde-2795-59f4-98ac-90cf5b49a2de-crash-compute-1[76980]: 2026-01-27T08:29:15.380+0000 7fce646a3640 -1 AuthRegistry(0x7fce5c067440) no keyring found at /etc/ceph/ceph.client.admin.keyring,/etc/ceph/ceph.keyring,/etc/ceph/keyring,/etc/ceph/keyring.bin, disabling cephx
Jan 27 08:29:15 compute-1 ceph-281e9bde-2795-59f4-98ac-90cf5b49a2de-crash-compute-1[76980]: 2026-01-27T08:29:15.381+0000 7fce646a3640 -1 auth: unable to find a keyring on /etc/ceph/ceph.client.admin.keyring,/etc/ceph/ceph.keyring,/etc/ceph/keyring,/etc/ceph/keyring.bin: (2) No such file or directory
Jan 27 08:29:15 compute-1 ceph-281e9bde-2795-59f4-98ac-90cf5b49a2de-crash-compute-1[76980]: 2026-01-27T08:29:15.381+0000 7fce646a3640 -1 AuthRegistry(0x7fce646a2000) no keyring found at /etc/ceph/ceph.client.admin.keyring,/etc/ceph/ceph.keyring,/etc/ceph/keyring,/etc/ceph/keyring.bin, disabling cephx
Jan 27 08:29:15 compute-1 ceph-281e9bde-2795-59f4-98ac-90cf5b49a2de-crash-compute-1[76980]: 2026-01-27T08:29:15.383+0000 7fce62418640 -1 monclient(hunting): handle_auth_bad_method server allowed_methods [2] but i only support [1]
Jan 27 08:29:15 compute-1 ceph-281e9bde-2795-59f4-98ac-90cf5b49a2de-crash-compute-1[76980]: 2026-01-27T08:29:15.383+0000 7fce646a3640 -1 monclient: authenticate NOTE: no keyring found; disabled cephx authentication
Jan 27 08:29:15 compute-1 ceph-281e9bde-2795-59f4-98ac-90cf5b49a2de-crash-compute-1[76980]: [errno 13] RADOS permission denied (error connecting to the cluster)
Jan 27 08:29:15 compute-1 ceph-281e9bde-2795-59f4-98ac-90cf5b49a2de-crash-compute-1[76980]: INFO:ceph-crash:monitoring path /var/lib/ceph/crash, delay 600s

● ceph-281e9bde-2795-59f4-98ac-90cf5b49a2de@mds.cephfs.compute-1.taxacd.service - Ceph mds.cephfs.compute-1.taxacd for 281e9bde-2795-59f4-98ac-90cf5b49a2de
     Loaded: loaded (/etc/systemd/system/ceph-281e9bde-2795-59f4-98ac-90cf5b49a2de@.service; enabled; preset: disabled)
     Active: active (running) since Tue 2026-01-27 08:31:26 UTC; 51min ago
   Main PID: 84855 (conmon)
         IO: 0B read, 188.0K written
      Tasks: 18 (limit: 48560)
     Memory: 26.3M (peak: 26.8M)
        CPU: 1.815s
     CGroup: /system.slice/system-ceph\x2d281e9bde\x2d2795\x2d59f4\x2d98ac\x2d90cf5b49a2de.slice/ceph-281e9bde-2795-59f4-98ac-90cf5b49a2de@mds.cephfs.compute-1.taxacd.service
             ├─libpod-payload-3a68f1d14d85f6e31a6938b8bcbd914609af59268da0b55b5b26224c4e100211
             │ ├─84857 /run/podman-init -- /usr/bin/ceph-mds -n mds.cephfs.compute-1.taxacd -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ └─84859 /usr/bin/ceph-mds -n mds.cephfs.compute-1.taxacd -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             └─runtime
               └─84855 /usr/bin/conmon --api-version 1 -c 3a68f1d14d85f6e31a6938b8bcbd914609af59268da0b55b5b26224c4e100211 -u 3a68f1d14d85f6e31a6938b8bcbd914609af59268da0b55b5b26224c4e100211 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/3a68f1d14d85f6e31a6938b8bcbd914609af59268da0b55b5b26224c4e100211/userdata -p /run/containers/storage/overlay-containers/3a68f1d14d85f6e31a6938b8bcbd914609af59268da0b55b5b26224c4e100211/userdata/pidfile -n ceph-281e9bde-2795-59f4-98ac-90cf5b49a2de-mds-cephfs-compute-1-taxacd --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/3a68f1d14d85f6e31a6938b8bcbd914609af59268da0b55b5b26224c4e100211 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/3a68f1d14d85f6e31a6938b8bcbd914609af59268da0b55b5b26224c4e100211/userdata/oci-log --conmon-pidfile /run/ceph-281e9bde-2795-59f4-98ac-90cf5b49a2de@mds.cephfs.compute-1.taxacd.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 3a68f1d14d85f6e31a6938b8bcbd914609af59268da0b55b5b26224c4e100211

Jan 27 09:22:27 compute-1 ceph-mds[84859]: mds.cephfs.compute-1.taxacd Can't run that command on an inactive MDS!
Jan 27 09:22:27 compute-1 ceph-mds[84859]: mds.cephfs.compute-1.taxacd asok_command: dump_ops_in_flight {prefix=dump_ops_in_flight} (starting...)
Jan 27 09:22:27 compute-1 ceph-mds[84859]: mds.cephfs.compute-1.taxacd Can't run that command on an inactive MDS!
Jan 27 09:22:27 compute-1 ceph-mds[84859]: mds.cephfs.compute-1.taxacd asok_command: get subtrees {prefix=get subtrees} (starting...)
Jan 27 09:22:27 compute-1 ceph-mds[84859]: mds.cephfs.compute-1.taxacd Can't run that command on an inactive MDS!
Jan 27 09:22:27 compute-1 ceph-mds[84859]: mds.cephfs.compute-1.taxacd asok_command: ops {prefix=ops} (starting...)
Jan 27 09:22:27 compute-1 ceph-mds[84859]: mds.cephfs.compute-1.taxacd Can't run that command on an inactive MDS!
Jan 27 09:22:28 compute-1 ceph-mds[84859]: mds.cephfs.compute-1.taxacd asok_command: session ls {prefix=session ls} (starting...)
Jan 27 09:22:28 compute-1 ceph-mds[84859]: mds.cephfs.compute-1.taxacd Can't run that command on an inactive MDS!
Jan 27 09:22:28 compute-1 ceph-mds[84859]: mds.cephfs.compute-1.taxacd asok_command: status {prefix=status} (starting...)

● ceph-281e9bde-2795-59f4-98ac-90cf5b49a2de@mgr.compute-1.jqbgxp.service - Ceph mgr.compute-1.jqbgxp for 281e9bde-2795-59f4-98ac-90cf5b49a2de
     Loaded: loaded (/etc/systemd/system/ceph-281e9bde-2795-59f4-98ac-90cf5b49a2de@.service; enabled; preset: disabled)
     Active: active (running) since Tue 2026-01-27 08:30:09 UTC; 52min ago
   Main PID: 81953 (conmon)
         IO: 0B read, 215.0K written
      Tasks: 23 (limit: 48560)
     Memory: 478.8M (peak: 479.1M)
        CPU: 23.637s
     CGroup: /system.slice/system-ceph\x2d281e9bde\x2d2795\x2d59f4\x2d98ac\x2d90cf5b49a2de.slice/ceph-281e9bde-2795-59f4-98ac-90cf5b49a2de@mgr.compute-1.jqbgxp.service
             ├─libpod-payload-2e8b86fae08488082046be8e33ef9fc03ecfc0973404b56eb059b1b63639578d
             │ ├─81955 /run/podman-init -- /usr/bin/ceph-mgr -n mgr.compute-1.jqbgxp -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ └─81957 /usr/bin/ceph-mgr -n mgr.compute-1.jqbgxp -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             └─runtime
               └─81953 /usr/bin/conmon --api-version 1 -c 2e8b86fae08488082046be8e33ef9fc03ecfc0973404b56eb059b1b63639578d -u 2e8b86fae08488082046be8e33ef9fc03ecfc0973404b56eb059b1b63639578d -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/2e8b86fae08488082046be8e33ef9fc03ecfc0973404b56eb059b1b63639578d/userdata -p /run/containers/storage/overlay-containers/2e8b86fae08488082046be8e33ef9fc03ecfc0973404b56eb059b1b63639578d/userdata/pidfile -n ceph-281e9bde-2795-59f4-98ac-90cf5b49a2de-mgr-compute-1-jqbgxp --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/2e8b86fae08488082046be8e33ef9fc03ecfc0973404b56eb059b1b63639578d --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/2e8b86fae08488082046be8e33ef9fc03ecfc0973404b56eb059b1b63639578d/userdata/oci-log --conmon-pidfile /run/ceph-281e9bde-2795-59f4-98ac-90cf5b49a2de@mgr.compute-1.jqbgxp.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 2e8b86fae08488082046be8e33ef9fc03ecfc0973404b56eb059b1b63639578d

Jan 27 08:30:29 compute-1 ceph-mgr[81957]: mgr[py] Module volumes has missing NOTIFY_TYPES member
Jan 27 08:30:29 compute-1 ceph-mgr[81957]: mgr[py] Loading python module 'zabbix'
Jan 27 08:30:29 compute-1 ceph-281e9bde-2795-59f4-98ac-90cf5b49a2de-mgr-compute-1-jqbgxp[81953]: 2026-01-27T08:30:29.356+0000 7fc826f76140 -1 mgr[py] Module volumes has missing NOTIFY_TYPES member
Jan 27 08:30:29 compute-1 ceph-mgr[81957]: mgr[py] Module zabbix has missing NOTIFY_TYPES member
Jan 27 08:30:29 compute-1 ceph-281e9bde-2795-59f4-98ac-90cf5b49a2de-mgr-compute-1-jqbgxp[81953]: 2026-01-27T08:30:29.600+0000 7fc826f76140 -1 mgr[py] Module zabbix has missing NOTIFY_TYPES member
Jan 27 08:30:29 compute-1 ceph-mgr[81957]: ms_deliver_dispatch: unhandled message 0x560a97945600 mon_map magic: 0 v1 from mon.2 v2:192.168.122.101:3300/0
Jan 27 08:30:29 compute-1 ceph-mgr[81957]: client.0 ms_handle_reset on v2:192.168.122.100:6800/510010839
Jan 27 08:45:30 compute-1 ceph-mgr[81957]: client.0 ms_handle_reset on v2:192.168.122.100:6800/510010839
Jan 27 09:00:30 compute-1 ceph-mgr[81957]: client.0 ms_handle_reset on v2:192.168.122.100:6800/510010839
Jan 27 09:15:30 compute-1 ceph-mgr[81957]: client.0 ms_handle_reset on v2:192.168.122.100:6800/510010839

● ceph-281e9bde-2795-59f4-98ac-90cf5b49a2de@mon.compute-1.service - Ceph mon.compute-1 for 281e9bde-2795-59f4-98ac-90cf5b49a2de
     Loaded: loaded (/etc/systemd/system/ceph-281e9bde-2795-59f4-98ac-90cf5b49a2de@.service; enabled; preset: disabled)
     Active: active (running) since Tue 2026-01-27 08:29:58 UTC; 52min ago
   Main PID: 81592 (conmon)
         IO: 0B read, 424.6M written
      Tasks: 27 (limit: 48560)
     Memory: 95.1M (peak: 102.6M)
        CPU: 34.152s
     CGroup: /system.slice/system-ceph\x2d281e9bde\x2d2795\x2d59f4\x2d98ac\x2d90cf5b49a2de.slice/ceph-281e9bde-2795-59f4-98ac-90cf5b49a2de@mon.compute-1.service
             ├─libpod-payload-251b1a4718b2fd4c67af53f2ed308c3c23cf8f9f405ab5e74c4b1afc3d7c00dd
             │ ├─81594 /run/podman-init -- /usr/bin/ceph-mon -n mon.compute-1 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false
             │ └─81596 /usr/bin/ceph-mon -n mon.compute-1 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false
             └─runtime
               └─81592 /usr/bin/conmon --api-version 1 -c 251b1a4718b2fd4c67af53f2ed308c3c23cf8f9f405ab5e74c4b1afc3d7c00dd -u 251b1a4718b2fd4c67af53f2ed308c3c23cf8f9f405ab5e74c4b1afc3d7c00dd -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/251b1a4718b2fd4c67af53f2ed308c3c23cf8f9f405ab5e74c4b1afc3d7c00dd/userdata -p /run/containers/storage/overlay-containers/251b1a4718b2fd4c67af53f2ed308c3c23cf8f9f405ab5e74c4b1afc3d7c00dd/userdata/pidfile -n ceph-281e9bde-2795-59f4-98ac-90cf5b49a2de-mon-compute-1 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/251b1a4718b2fd4c67af53f2ed308c3c23cf8f9f405ab5e74c4b1afc3d7c00dd --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/251b1a4718b2fd4c67af53f2ed308c3c23cf8f9f405ab5e74c4b1afc3d7c00dd/userdata/oci-log --conmon-pidfile /run/ceph-281e9bde-2795-59f4-98ac-90cf5b49a2de@mon.compute-1.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 251b1a4718b2fd4c67af53f2ed308c3c23cf8f9f405ab5e74c4b1afc3d7c00dd

Jan 27 09:22:58 compute-1 ceph-mon[81596]: from='client.? 192.168.122.102:0/94017620' entity='client.admin' cmd=[{"prefix": "osd stat"}]: dispatch
Jan 27 09:22:58 compute-1 ceph-mon[81596]: from='client.18534 -' entity='client.admin' cmd=[{"prefix": "osd perf", "target": ["mon-mgr", ""]}]: dispatch
Jan 27 09:22:58 compute-1 ceph-mon[81596]: from='client.18540 -' entity='client.admin' cmd=[{"prefix": "osd pool autoscale-status", "target": ["mon-mgr", ""]}]: dispatch
Jan 27 09:22:58 compute-1 ceph-mon[81596]: pgmap v1807: 305 pgs: 305 active+clean; 41 MiB data, 244 MiB used, 21 GiB / 21 GiB avail
Jan 27 09:22:58 compute-1 ceph-mon[81596]: from='client.? 192.168.122.100:0/998336395' entity='client.admin' cmd=[{"prefix": "osd pool ls", "detail": "detail"}]: dispatch
Jan 27 09:22:58 compute-1 ceph-mon[81596]: from='client.28205 -' entity='client.admin' cmd=[{"prefix": "osd pool autoscale-status", "target": ["mon-mgr", ""]}]: dispatch
Jan 27 09:22:58 compute-1 ceph-mon[81596]: from='client.? 192.168.122.101:0/1196541295' entity='client.admin' cmd=[{"prefix": "osd pool ls", "detail": "detail"}]: dispatch
Jan 27 09:22:58 compute-1 ceph-mon[81596]: from='client.? 192.168.122.101:0/1442867532' entity='client.admin' cmd=[{"prefix": "osd stat"}]: dispatch
Jan 27 09:22:58 compute-1 ceph-mon[81596]: mon.compute-1@2(peon) e3 handle_command mon_command({"prefix": "fs dump", "format": "json-pretty"} v 0) v1
Jan 27 09:22:58 compute-1 ceph-mon[81596]: log_channel(audit) log [DBG] : from='client.? 192.168.122.101:0/202423810' entity='client.admin' cmd=[{"prefix": "fs dump", "format": "json-pretty"}]: dispatch

● ceph-281e9bde-2795-59f4-98ac-90cf5b49a2de@osd.1.service - Ceph osd.1 for 281e9bde-2795-59f4-98ac-90cf5b49a2de
     Loaded: loaded (/etc/systemd/system/ceph-281e9bde-2795-59f4-98ac-90cf5b49a2de@.service; enabled; preset: disabled)
     Active: active (running) since Tue 2026-01-27 08:29:26 UTC; 53min ago
   Main PID: 78929 (conmon)
         IO: 102.5M read, 1.9G written
      Tasks: 60 (limit: 48560)
     Memory: 530.8M (peak: 576.0M)
        CPU: 29.400s
     CGroup: /system.slice/system-ceph\x2d281e9bde\x2d2795\x2d59f4\x2d98ac\x2d90cf5b49a2de.slice/ceph-281e9bde-2795-59f4-98ac-90cf5b49a2de@osd.1.service
             ├─libpod-payload-6c1249c6f24e9c6538782f26c6c9ae2b2774052438800f108c52dd0b0dd395ef
             │ ├─78931 /run/podman-init -- /usr/bin/ceph-osd -n osd.1 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ └─78933 /usr/bin/ceph-osd -n osd.1 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             └─runtime
               └─78929 /usr/bin/conmon --api-version 1 -c 6c1249c6f24e9c6538782f26c6c9ae2b2774052438800f108c52dd0b0dd395ef -u 6c1249c6f24e9c6538782f26c6c9ae2b2774052438800f108c52dd0b0dd395ef -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/6c1249c6f24e9c6538782f26c6c9ae2b2774052438800f108c52dd0b0dd395ef/userdata -p /run/containers/storage/overlay-containers/6c1249c6f24e9c6538782f26c6c9ae2b2774052438800f108c52dd0b0dd395ef/userdata/pidfile -n ceph-281e9bde-2795-59f4-98ac-90cf5b49a2de-osd-1 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/6c1249c6f24e9c6538782f26c6c9ae2b2774052438800f108c52dd0b0dd395ef --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/6c1249c6f24e9c6538782f26c6c9ae2b2774052438800f108c52dd0b0dd395ef/userdata/oci-log --conmon-pidfile /run/ceph-281e9bde-2795-59f4-98ac-90cf5b49a2de@osd.1.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 6c1249c6f24e9c6538782f26c6c9ae2b2774052438800f108c52dd0b0dd395ef

Jan 27 09:22:32 compute-1 ceph-osd[78933]: monclient: _check_auth_tickets
Jan 27 09:22:32 compute-1 ceph-osd[78933]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2026-01-27T09:22:00.835217+0000)
Jan 27 09:22:32 compute-1 ceph-osd[78933]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.285714
Jan 27 09:22:32 compute-1 ceph-osd[78933]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0555556
Jan 27 09:22:32 compute-1 ceph-osd[78933]: prioritycache tune_memory target: 4294967296 mapped: 109051904 unmapped: 22003712 heap: 131055616 old mem: 2845415833 new mem: 2845415833
Jan 27 09:22:32 compute-1 ceph-osd[78933]: bluestore.MempoolThread(0x560c07a77b60) _resize_shards cache_size: 2845415833 kv_alloc: 1207959552 kv_used: 2144 kv_onode_alloc: 234881024 kv_onode_used: 464 meta_alloc: 1140850688 meta_used: 1315406 data_alloc: 218103808 data_used: 7208960
Jan 27 09:22:32 compute-1 ceph-osd[78933]: monclient: tick
Jan 27 09:22:32 compute-1 ceph-osd[78933]: monclient: _check_auth_tickets
Jan 27 09:22:32 compute-1 ceph-osd[78933]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2026-01-27T09:22:01.835401+0000)
Jan 27 09:22:32 compute-1 ceph-osd[78933]: do_command 'log dump' '{prefix=log dump}'

● ceph-281e9bde-2795-59f4-98ac-90cf5b49a2de@rgw.rgw.compute-1.nigpsg.service - Ceph rgw.rgw.compute-1.nigpsg for 281e9bde-2795-59f4-98ac-90cf5b49a2de
     Loaded: loaded (/etc/systemd/system/ceph-281e9bde-2795-59f4-98ac-90cf5b49a2de@.service; enabled; preset: disabled)
     Active: active (running) since Tue 2026-01-27 08:30:50 UTC; 52min ago
   Main PID: 83596 (conmon)
         IO: 0B read, 3.4M written
      Tasks: 605 (limit: 48560)
     Memory: 119.2M (peak: 120.3M)
        CPU: 22.687s
     CGroup: /system.slice/system-ceph\x2d281e9bde\x2d2795\x2d59f4\x2d98ac\x2d90cf5b49a2de.slice/ceph-281e9bde-2795-59f4-98ac-90cf5b49a2de@rgw.rgw.compute-1.nigpsg.service
             ├─libpod-payload-39cc3372cbea0c2530037f484bf587c49489c87fa8290ae449ca851706b4c249
             │ ├─83598 /run/podman-init -- /usr/bin/radosgw -n client.rgw.rgw.compute-1.nigpsg -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ └─83600 /usr/bin/radosgw -n client.rgw.rgw.compute-1.nigpsg -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             └─runtime
               └─83596 /usr/bin/conmon --api-version 1 -c 39cc3372cbea0c2530037f484bf587c49489c87fa8290ae449ca851706b4c249 -u 39cc3372cbea0c2530037f484bf587c49489c87fa8290ae449ca851706b4c249 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/39cc3372cbea0c2530037f484bf587c49489c87fa8290ae449ca851706b4c249/userdata -p /run/containers/storage/overlay-containers/39cc3372cbea0c2530037f484bf587c49489c87fa8290ae449ca851706b4c249/userdata/pidfile -n ceph-281e9bde-2795-59f4-98ac-90cf5b49a2de-rgw-rgw-compute-1-nigpsg --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/39cc3372cbea0c2530037f484bf587c49489c87fa8290ae449ca851706b4c249 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/39cc3372cbea0c2530037f484bf587c49489c87fa8290ae449ca851706b4c249/userdata/oci-log --conmon-pidfile /run/ceph-281e9bde-2795-59f4-98ac-90cf5b49a2de@rgw.rgw.compute-1.nigpsg.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 39cc3372cbea0c2530037f484bf587c49489c87fa8290ae449ca851706b4c249

Jan 27 09:22:55 compute-1 radosgw[83600]: beast: 0x7f73821636f0: 192.168.122.100 - anonymous [27/Jan/2026:09:22:55.160 +0000] "HEAD / HTTP/1.0" 200 0 - - - latency=0.000000000s
Jan 27 09:22:55 compute-1 radosgw[83600]: ====== starting new request req=0x7f73821636f0 =====
Jan 27 09:22:55 compute-1 radosgw[83600]: ====== req done req=0x7f73821636f0 op status=0 http_status=200 latency=0.000000000s ======
Jan 27 09:22:55 compute-1 radosgw[83600]: beast: 0x7f73821636f0: 192.168.122.102 - anonymous [27/Jan/2026:09:22:55.476 +0000] "HEAD / HTTP/1.0" 200 0 - - - latency=0.000000000s
Jan 27 09:22:57 compute-1 radosgw[83600]: ====== starting new request req=0x7f73821636f0 =====
Jan 27 09:22:57 compute-1 radosgw[83600]: ====== req done req=0x7f73821636f0 op status=0 http_status=200 latency=0.000000000s ======
Jan 27 09:22:57 compute-1 radosgw[83600]: beast: 0x7f73821636f0: 192.168.122.100 - anonymous [27/Jan/2026:09:22:57.163 +0000] "HEAD / HTTP/1.0" 200 0 - - - latency=0.000000000s
Jan 27 09:22:57 compute-1 radosgw[83600]: ====== starting new request req=0x7f73821636f0 =====
Jan 27 09:22:57 compute-1 radosgw[83600]: ====== req done req=0x7f73821636f0 op status=0 http_status=200 latency=0.000000000s ======
Jan 27 09:22:57 compute-1 radosgw[83600]: beast: 0x7f73821636f0: 192.168.122.102 - anonymous [27/Jan/2026:09:22:57.478 +0000] "HEAD / HTTP/1.0" 200 0 - - - latency=0.000000000s

● ceph-osd-losetup-0.service - Ceph OSD losetup
     Loaded: loaded (/etc/systemd/system/ceph-osd-losetup-0.service; enabled; preset: disabled)
     Active: active (exited) since Tue 2026-01-27 08:26:37 UTC; 56min ago
   Main PID: 72473 (code=exited, status=0/SUCCESS)
        CPU: 18ms

Jan 27 08:26:37 compute-1 systemd[1]: Starting Ceph OSD losetup...
Jan 27 08:26:37 compute-1 bash[72474]: /dev/loop3: [64513]:4328451 (/var/lib/ceph-osd-0.img)
Jan 27 08:26:37 compute-1 systemd[1]: Finished Ceph OSD losetup.

● chronyd.service - NTP client/server
     Loaded: loaded (/usr/lib/systemd/system/chronyd.service; enabled; preset: enabled)
     Active: active (running) since Tue 2026-01-27 08:24:18 UTC; 58min ago
       Docs: man:chronyd(8)
             man:chrony.conf(5)
   Main PID: 58510 (chronyd)
         IO: 0B read, 0B written
      Tasks: 1 (limit: 48560)
     Memory: 1.0M (peak: 1.9M)
        CPU: 64ms
     CGroup: /system.slice/chronyd.service
             └─58510 /usr/sbin/chronyd -F 2

Jan 27 08:24:18 compute-1 systemd[1]: Starting NTP client/server...
Jan 27 08:24:18 compute-1 chronyd[58510]: chronyd version 4.8 starting (+CMDMON +REFCLOCK +RTC +PRIVDROP +SCFILTER +SIGND +NTS +SECHASH +IPV6 +DEBUG)
Jan 27 08:24:18 compute-1 chronyd[58510]: Frequency -26.334 +/- 0.403 ppm read from /var/lib/chrony/drift
Jan 27 08:24:18 compute-1 chronyd[58510]: Loaded seccomp filter (level 2)
Jan 27 08:24:18 compute-1 systemd[1]: Started NTP client/server.
Jan 27 08:26:27 compute-1 chronyd[58510]: Selected source 138.197.164.54 (pool.ntp.org)

● cloud-config.service - Cloud-init: Config Stage
     Loaded: loaded (/usr/lib/systemd/system/cloud-config.service; enabled; preset: disabled)
     Active: active (exited) since Tue 2026-01-27 07:47:12 UTC; 1h 35min ago
   Main PID: 1004 (code=exited, status=0/SUCCESS)
        CPU: 382ms

Jan 27 07:47:12 np0005597077.novalocal systemd[1]: Starting Cloud-init: Config Stage...
Jan 27 07:47:12 np0005597077.novalocal cloud-init[1061]: Cloud-init v. 24.4-8.el9 running 'modules:config' at Tue, 27 Jan 2026 07:47:12 +0000. Up 11.23 seconds.
Jan 27 07:47:12 np0005597077.novalocal systemd[1]: Finished Cloud-init: Config Stage.

● cloud-final.service - Cloud-init: Final Stage
     Loaded: loaded (/usr/lib/systemd/system/cloud-final.service; enabled; preset: disabled)
     Active: active (exited) since Tue 2026-01-27 07:47:13 UTC; 1h 35min ago
   Main PID: 1070 (code=exited, status=0/SUCCESS)
        CPU: 567ms

Jan 27 07:47:12 np0005597077.novalocal systemd[1]: Starting Cloud-init: Final Stage...
Jan 27 07:47:13 np0005597077.novalocal cloud-init[1155]: Cloud-init v. 24.4-8.el9 running 'modules:final' at Tue, 27 Jan 2026 07:47:12 +0000. Up 11.65 seconds.
Jan 27 07:47:13 np0005597077.novalocal cloud-init[1237]: #############################################################
Jan 27 07:47:13 np0005597077.novalocal cloud-init[1238]: -----BEGIN SSH HOST KEY FINGERPRINTS-----
Jan 27 07:47:13 np0005597077.novalocal cloud-init[1250]: 256 SHA256:uscyvvTGOknl+W15I8ARrCKO1rQWna9jmyqiwsIaDwM root@np0005597077.novalocal (ED25519)
Jan 27 07:47:13 np0005597077.novalocal cloud-init[1252]: 3072 SHA256:+apmuuL8uIRXQTKJBPLEisczWul48DW95ArsozrSs9U root@np0005597077.novalocal (RSA)
Jan 27 07:47:13 np0005597077.novalocal cloud-init[1253]: -----END SSH HOST KEY FINGERPRINTS-----
Jan 27 07:47:13 np0005597077.novalocal cloud-init[1255]: #############################################################
Jan 27 07:47:13 np0005597077.novalocal cloud-init[1155]: Cloud-init v. 24.4-8.el9 finished at Tue, 27 Jan 2026 07:47:13 +0000. Datasource DataSourceConfigDrive [net,ver=2][source=/dev/sr0].  Up 11.95 seconds
Jan 27 07:47:13 np0005597077.novalocal systemd[1]: Finished Cloud-init: Final Stage.

● cloud-init-local.service - Cloud-init: Local Stage (pre-network)
     Loaded: loaded (/usr/lib/systemd/system/cloud-init-local.service; enabled; preset: disabled)
     Active: active (exited) since Tue 2026-01-27 07:47:09 UTC; 1h 35min ago
   Main PID: 777 (code=exited, status=0/SUCCESS)
        CPU: 823ms

Jan 27 07:47:07 localhost systemd[1]: Starting Cloud-init: Local Stage (pre-network)...
Jan 27 07:47:08 localhost cloud-init[841]: Cloud-init v. 24.4-8.el9 running 'init-local' at Tue, 27 Jan 2026 07:47:08 +0000. Up 7.21 seconds.
Jan 27 07:47:09 np0005597077.novalocal systemd[1]: Finished Cloud-init: Local Stage (pre-network).

● cloud-init.service - Cloud-init: Network Stage
     Loaded: loaded (/usr/lib/systemd/system/cloud-init.service; enabled; preset: disabled)
     Active: active (exited) since Tue 2026-01-27 07:47:12 UTC; 1h 35min ago
   Main PID: 885 (code=exited, status=0/SUCCESS)
        CPU: 1.107s

Jan 27 07:47:12 np0005597077.novalocal cloud-init[923]: |          o      |
Jan 27 07:47:12 np0005597077.novalocal cloud-init[923]: |     . . . .     |
Jan 27 07:47:12 np0005597077.novalocal cloud-init[923]: |    + + o .      |
Jan 27 07:47:12 np0005597077.novalocal cloud-init[923]: |E  = + =So .     |
Jan 27 07:47:12 np0005597077.novalocal cloud-init[923]: |. o = ..+ o      |
Jan 27 07:47:12 np0005597077.novalocal cloud-init[923]: |*. . .o= . o .   |
Jan 27 07:47:12 np0005597077.novalocal cloud-init[923]: |+B.  .O+= . = o  |
Jan 27 07:47:12 np0005597077.novalocal cloud-init[923]: |B....+B@.  . o . |
Jan 27 07:47:12 np0005597077.novalocal cloud-init[923]: +----[SHA256]-----+
Jan 27 07:47:12 np0005597077.novalocal systemd[1]: Finished Cloud-init: Network Stage.

○ cpupower.service - Configure CPU power related settings
     Loaded: loaded (/usr/lib/systemd/system/cpupower.service; disabled; preset: disabled)
     Active: inactive (dead)

● crond.service - Command Scheduler
     Loaded: loaded (/usr/lib/systemd/system/crond.service; enabled; preset: enabled)
     Active: active (running) since Tue 2026-01-27 07:47:12 UTC; 1h 35min ago
   Main PID: 1011 (crond)
         IO: 196.0K read, 12.0K written
      Tasks: 1 (limit: 48560)
     Memory: 1.3M (peak: 4.7M)
        CPU: 191ms
     CGroup: /system.slice/crond.service
             └─1011 /usr/sbin/crond -n

Jan 27 08:13:01 compute-1 anacron[29959]: Job `cron.daily' terminated
Jan 27 08:33:01 compute-1 anacron[29959]: Job `cron.weekly' started
Jan 27 08:33:01 compute-1 anacron[29959]: Job `cron.weekly' terminated
Jan 27 08:53:01 compute-1 anacron[29959]: Job `cron.monthly' started
Jan 27 08:53:01 compute-1 anacron[29959]: Job `cron.monthly' terminated
Jan 27 08:53:01 compute-1 anacron[29959]: Normal exit (3 jobs run)
Jan 27 09:01:01 compute-1 CROND[227584]: (root) CMD (run-parts /etc/cron.hourly)
Jan 27 09:01:01 compute-1 run-parts[227587]: (/etc/cron.hourly) starting 0anacron
Jan 27 09:01:01 compute-1 run-parts[227593]: (/etc/cron.hourly) finished 0anacron
Jan 27 09:01:01 compute-1 CROND[227583]: (root) CMDEND (run-parts /etc/cron.hourly)

○ d6ce9deaaecdb1627bbc6b1f3be04c03bb6975dd82af97ea8a1c73c9cca6abd1-309390b80c059bce.service - /usr/bin/podman healthcheck run d6ce9deaaecdb1627bbc6b1f3be04c03bb6975dd82af97ea8a1c73c9cca6abd1
     Loaded: loaded (/run/systemd/transient/d6ce9deaaecdb1627bbc6b1f3be04c03bb6975dd82af97ea8a1c73c9cca6abd1-309390b80c059bce.service; transient)
  Transient: yes
     Active: inactive (dead) since Tue 2026-01-27 09:22:33 UTC; 24s ago
   Duration: 96ms
TriggeredBy: ● d6ce9deaaecdb1627bbc6b1f3be04c03bb6975dd82af97ea8a1c73c9cca6abd1-309390b80c059bce.timer
    Process: 236615 ExecStart=/usr/bin/podman healthcheck run d6ce9deaaecdb1627bbc6b1f3be04c03bb6975dd82af97ea8a1c73c9cca6abd1 (code=exited, status=0/SUCCESS)
   Main PID: 236615 (code=exited, status=0/SUCCESS)
        CPU: 100ms

Jan 27 09:22:33 compute-1 podman[236615]: 2026-01-27 09:22:33.93480754 +0000 UTC m=+0.070158927 container health_status d6ce9deaaecdb1627bbc6b1f3be04c03bb6975dd82af97ea8a1c73c9cca6abd1 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=ovn_metadata_agent, health_status=healthy, health_failing_streak=0, health_log=, config_data={'cgroupns': 'host', 'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'EDPM_CONFIG_HASH': '249f42cc0a5de6940e06c976a81a3e64ae1c330f940cff0a51730e3f74af51fa-443ed18f8996048d3715b9ec80abd55220542871602df33775d8611b19ccacea-443ed18f8996048d3715b9ec80abd55220542871602df33775d8611b19ccacea-443ed18f8996048d3715b9ec80abd55220542871602df33775d8611b19ccacea-0823bd3e096c75f72e4a95820d41b0d4b6a1172bd2892ddb9f29b788a11bc87d'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_metadata_agent', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified', 'net': 'host', 'pid': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/run/openvswitch:/run/openvswitch:z', '/var/lib/openstack/neutron-ovn-metadata-agent:/etc/neutron.conf.d:z', '/run/netns:/run/netns:shared', '/var/lib/kolla/config_files/ovn_metadata_agent.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/neutron:/var/lib/neutron:shared,z', '/var/lib/neutron/ovn_metadata_haproxy_wrapper:/usr/local/bin/haproxy:ro', '/var/lib/neutron/kill_scripts:/etc/neutron/kill_scripts:ro', '/var/lib/openstack/cacerts/neutron-metadata/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_metadata_agent:/openstack:ro,z']}, org.label-schema.schema-version=1.0, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251202, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, tcib_managed=true, config_id=ovn_metadata_agent, container_name=ovn_metadata_agent, io.buildah.version=1.41.3, tcib_build_tag=c3923531bcda0b0811b2d5053f189beb, managed_by=edpm_ansible)

● dbus-broker.service - D-Bus System Message Bus
     Loaded: loaded (/usr/lib/systemd/system/dbus-broker.service; enabled; preset: enabled)
     Active: active (running) since Tue 2026-01-27 07:47:07 UTC; 1h 35min ago
TriggeredBy: ● dbus.socket
       Docs: man:dbus-broker-launch(1)
   Main PID: 744 (dbus-broker-lau)
         IO: 0B read, 0B written
      Tasks: 2 (limit: 48560)
     Memory: 2.9Unit display-manager.service could not be found.
M (peak: 3.7M)
        CPU: 4.028s
     CGroup: /system.slice/dbus-broker.service
             ├─744 /usr/bin/dbus-broker-launch --scope system --audit
             └─773 dbus-broker --log 4 --controller 9 --machine-id 85ac68c10a6e7ae08ceb898dbdca0cb5 --max-bytes 536870912 --max-fds 4096 --max-matches 131072 --audit

Jan 27 08:21:29 compute-1 dbus-broker-launch[744]: Noticed file-system modification, trigger reload.
Jan 27 08:21:29 compute-1 dbus-broker-launch[744]: Noticed file-system modification, trigger reload.
Jan 27 08:22:21 compute-1 dbus-broker-launch[773]: avc:  op=load_policy lsm=selinux seqno=7 res=1
Jan 27 08:22:32 compute-1 dbus-broker-launch[773]: avc:  op=load_policy lsm=selinux seqno=8 res=1
Jan 27 08:38:31 compute-1 dbus-broker-launch[773]: avc:  op=load_policy lsm=selinux seqno=9 res=1
Jan 27 08:42:45 compute-1 dbus-broker-launch[773]: avc:  op=load_policy lsm=selinux seqno=11 res=1
Jan 27 08:43:33 compute-1 dbus-broker-launch[744]: Noticed file-system modification, trigger reload.
Jan 27 08:43:33 compute-1 dbus-broker-launch[773]: avc:  op=load_policy lsm=selinux seqno=12 res=1
Jan 27 08:43:33 compute-1 dbus-broker-launch[744]: Noticed file-system modification, trigger reload.
Jan 27 08:45:05 compute-1 dbus-broker-launch[773]: avc:  op=load_policy lsm=selinux seqno=13 res=1

○ dm-event.service - Device-mapper event daemon
     Loaded: loaded (/usr/lib/systemd/system/dm-event.service; static)
     Active: inactive (dead)
TriggeredBy: ● dm-event.socket
       Docs: man:dmeventd(8)

○ dnf-makecache.service - dnf makecache
     Loaded: loaded (/usr/lib/systemd/system/dnf-makecache.service; static)
     Active: inactive (dead) since Tue 2026-01-27 08:19:33 UTC; 1h 3min ago
TriggeredBy: ● dnf-makecache.timer
   Main PID: 34005 (code=exited, status=0/SUCCESS)
        CPU: 1.788s

Jan 27 08:19:32 compute-1 dnf[34005]: NFV SIG OpenvSwitch                             111 kB/s | 3.0 kB     00:00
Jan 27 08:19:32 compute-1 dnf[34005]: repo-setup-centos-appstream                     166 kB/s | 4.4 kB     00:00
Jan 27 08:19:32 compute-1 dnf[34005]: repo-setup-centos-baseos                        173 kB/s | 3.9 kB     00:00
Jan 27 08:19:33 compute-1 dnf[34005]: repo-setup-centos-highavailability              149 kB/s | 3.9 kB     00:00
Jan 27 08:19:33 compute-1 dnf[34005]: repo-setup-centos-powertools                    175 kB/s | 4.3 kB     00:00
Jan 27 08:19:33 compute-1 dnf[34005]: Extra Packages for Enterprise Linux 9 - x86_64  256 kB/s |  33 kB     00:00
Jan 27 08:19:33 compute-1 dnf[34005]: Metadata cache created.
Jan 27 08:19:33 compute-1 systemd[1]: dnf-makecache.service: Deactivated successfully.
Jan 27 08:19:33 compute-1 systemd[1]: Finished dnf makecache.
Jan 27 08:19:33 compute-1 systemd[1]: dnf-makecache.service: Consumed 1.788s CPU time.

○ dracut-cmdline.service - dracut cmdline hook
     Loaded: loaded (/usr/lib/systemd/system/dracut-cmdline.service; static)
     Active: inactive (dead) since Tue 2026-01-27 07:47:05 UTC; 1h 35min ago
   Duration: 1.658s
       Docs: man:dracut-cmdline.service(8)
             man:dracut.bootup(7)
   Main PID: 327 (code=exited, status=0/SUCCESS)
        CPU: 125ms

Jan 27 07:47:03 localhost systemd[1]: Starting dracut cmdline hook...
Jan 27 07:47:03 localhost dracut-cmdline[327]: dracut-9 dracut-057-102.git20250818.el9
Jan 27 07:47:03 localhost dracut-cmdline[327]: Using kernel command line parameters:    BOOT_IMAGE=(hd0,msdos1)/boot/vmlinuz-5.14.0-661.el9.x86_64 root=UUID=22ac9141-3960-4912-b20e-19fc8a328d40 ro console=ttyS0,115200n8 no_timer_check net.ifnames=0 crashkernel=1G-2G:192M,2G-64G:256M,64G-:512M
Jan 27 07:47:03 localhost systemd[1]: Finished dracut cmdline hook.
Jan 27 07:47:05 localhost systemd[1]: dracut-cmdline.service: Deactivated successfully.
Jan 27 07:47:05 localhost systemd[1]: Stopped dracut cmdline hook.

○ dracut-initqueue.service - dracut initqueue hook
     Loaded: loaded (/usr/lib/systemd/system/dracut-initqueue.service; static)
     Active: inactive (dead) since Tue 2026-01-27 07:47:05 UTC; 1h 35min ago
   Duration: 794ms
       Docs: man:dracut-initqueue.service(8)
             man:dracut.bootup(7)
   Main PID: 501 (code=exited, status=0/SUCCESS)
        CPU: 32ms

Jan 27 07:47:03 localhost systemd[1]: Starting dracut initqueue hook...
Jan 27 07:47:04 localhost systemd[1]: Finished dracut initqueue hook.
Jan 27 07:47:05 localhost systemd[1]: dracut-initqueue.service: Deactivated successfully.
Jan 27 07:47:05 localhost systemd[1]: Stopped dracut initqueue hook.

○ dracut-mount.service - dracut mount hook
     Loaded: loaded (/usr/lib/systemd/system/dracut-mount.service; static)
     Active: inactive (dead) since Tue 2026-01-27 07:47:05 UTC; 1h 35min ago
   Duration: 145ms
       Docs: man:dracut-mount.service(8)
             man:dracut.bootup(7)
   Main PID: 572 (code=exited, status=0/SUCCESS)
        CPU: 14ms

Jan 27 07:47:04 localhost systemd[1]: Starting dracut mount hook...
Jan 27 07:47:04 localhost systemd[1]: Finished dracut mount hook.
Jan 27 07:47:05 localhost systemd[1]: dracut-mount.service: Deactivated successfully.
Jan 27 07:47:05 localhost systemd[1]: Stopped dracut mount hook.

○ dracut-pre-mount.service - dracut pre-mount hook
     Loaded: loaded (/usr/lib/systemd/system/dracut-pre-mount.service; static)
     Active: inactive (dead) since Tue 2026-01-27 07:47:05 UTC; 1h 35min ago
   Duration: 752ms
       Docs: man:dracut-pre-mount.service(8)
             man:dracut.bootup(7)
   Main PID: 549 (code=exited, status=0/SUCCESS)
        CPU: 9ms

Jan 27 07:47:04 localhost systemd[1]: Starting dracut pre-mount hook...
Jan 27 07:47:04 localhost systemd[1]: Finished dracut pre-mount hook.
Jan 27 07:47:05 localhost systemd[1]: dracut-pre-mount.service: Deactivated successfully.
Jan 27 07:47:05 localhost systemd[1]: Stopped dracut pre-mount hook.

○ dracut-pre-pivot.service - dracut pre-pivot and cleanup hook
     Loaded: loaded (/usr/lib/systemd/system/dracut-pre-pivot.service; static)
     Active: inactive (dead) since Tue 2026-01-27 07:47:05 UTC; 1h 35min ago
   Duration: 35ms
       Docs: man:dracut-pre-pivot.service(8)
             man:dracut.bootup(7)
   Main PID: 577 (code=exited, status=0/SUCCESS)
        CPU: 91ms

Jan 27 07:47:04 localhost systemd[1]: Starting dracut pre-pivot and cleanup hook...
Jan 27 07:47:04 localhost systemd[1]: Finished dracut pre-pivot and cleanup hook.
Jan 27 07:47:05 localhost systemd[1]: dracut-pre-pivot.service: Deactivated successfully.
Jan 27 07:47:05 localhost systemd[1]: Stopped dracut pre-pivot and cleanup hook.

○ dracut-pre-trigger.service - dracut pre-trigger hook
     Loaded: loaded (/usr/lib/systemd/system/dracut-pre-trigger.service; static)
     Active: inactive (dead) since Tue 2026-01-27 07:47:05 UTC; 1h 35min ago
   Duration: 1.345s
       Docs: man:dracut-pre-trigger.service(8)
             man:dracut.bootup(7)
   Main PID: 466 (code=exited, status=0/SUCCESS)
        CPU: 17ms

Jan 27 07:47:03 localhost systemd[1]: Starting dracut pre-trigger hook...
Jan 27 07:47:03 localhost systemd[1]: Finished dracut pre-trigger hook.
Jan 27 07:47:05 localhost systemd[1]: dracut-pre-trigger.service: Deactivated successfully.
Jan 27 07:47:05 localhost systemd[1]: Stopped dracut pre-trigger hook.

○ dracut-pre-udev.service - dracut pre-udev hook
     Loaded: loaded (/usr/lib/systemd/system/dracut-pre-udev.service; static)
     Active: inactive (dead) since Tue 2026-01-27 07:47:05 UTC; 1h 35min ago
   Duration: 1.421s
       Docs: man:dracut-pre-udev.service(8)
             man:dracut.bootup(7)
   Main PID: 416 (code=exited, status=0/SUCCESS)
        CPU: 226ms

Jan 27 07:47:03 localhost systemd[1]: Starting dracut pre-udev hook...
Jan 27 07:47:03 localhost rpc.statd[443]: Version 2.5.4 starting
Jan 27 07:47:03 localhost rpc.statd[443]: Initializing NSM state
Jan 27 07:47:03 localhost rpc.idmapd[448]: Setting log level to 0
Jan 27 07:47:03 localhost systemd[1]: Finished dracut pre-udev hook.
Jan 27 07:47:04 localhost rpc.idmapd[448]: exiting on signal 15
Jan 27 07:47:05 localhost systemd[1]: dracut-pre-udev.service: Deactivated successfully.
Jan 27 07:47:05 localhost systemd[1]: Stopped dracut pre-udev hook.

○ dracut-shutdown-onfailure.service - Service executing upon dracut-shutdown failure to perform cleanup
     Loaded: loaded (/usr/lib/systemd/system/dracut-shutdown-onfailure.service; static)
     Active: inactive (dead)
       Docs: man:dracut-shutdown.service(8)

● dracut-shutdown.service - Restore /run/initramfs on shutdown
     Loaded: loaded (/usr/lib/systemd/system/dracut-shutdown.service; static)
     Active: active (exited) since Tue 2026-01-27 07:47:07 UTC; 1h 35min ago
       Docs: man:dracut-shutdown.service(8)
   Main PID: 778 (code=exited, status=0/SUCCESS)
        CPU: 3ms

Jan 27 07:47:07 localhost systemd[1]: Starting Restore /run/initramfs on shutdown...
Jan 27 07:47:07 localhost systemd[1]: Finished Restore /run/initramfs on shutdown.

● edpm-container-shutdown.service - EDPM Container Shutdown
     Loaded: loaded (/etc/systemd/system/edpm-container-shutdown.service; enabled; preset: enabled)
     Active: active (exited) since Tue 2026-01-27 08:24:47 UTC; 58min ago
       Docs: https://github.com/openstack-k8s-operators/docs
   Main PID: 61504 (code=exited, status=0/SUCCESS)
        CPU: 4ms

Jan 27 08:24:47 compute-1 systemd[1]: Starting EDPM Container Shutdown...
Jan 27 08:24:47 compute-1 systemd[1]: Finished EDPM Container Shutdown.

○ edpm_libvirt_guests.service - Suspend libvirt Guests in edpm
     Loaded: loaded (/etc/systemd/system/edpm_libvirt_guests.service; enabled; preset: disabled)
     Active: inactive (dead)
       Docs: man:libvirtd(8)
             https://libvirt.org

● edpm_nova_compute.service - nova_compute container
     Loaded: loaded (/etc/systemd/system/edpm_nova_compute.service; enabled; preset: disabled)
     Active: active (running) since Tue 2026-01-27 08:48:54 UTC; 34min ago
    Process: 221450 ExecStart=/usr/bin/podman start nova_compute (code=exited, status=0/SUCCESS)
   Main PID: 221466 (conmon)
         IO: 0B read, 91.0K written
      Tasks: 1 (limit: 48560)
     Memory: 680.0K (peak: 19.7M)
        CPU: 336ms
     CGroup: /system.slice/edpm_nova_compute.service
             └─221466 /usr/bin/conmon --api-version 1 -c a3bb16dbb05ddc47f45ec9373ca7302ec69431e14964c6960ac6f45103f0676a -u a3bb16dbb05ddc47f45ec9373ca7302ec69431e14964c6960ac6f45103f0676a -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/a3bb16dbb05ddc47f45ec9373ca7302ec69431e14964c6960ac6f45103f0676a/userdata -p /run/containers/storage/overlay-containers/a3bb16dbb05ddc47f45ec9373ca7302ec69431e14964c6960ac6f45103f0676a/userdata/pidfile -n nova_compute --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/a3bb16dbb05ddc47f45ec9373ca7302ec69431e14964c6960ac6f45103f0676a --full-attach -s -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/a3bb16dbb05ddc47f45ec9373ca7302ec69431e14964c6960ac6f45103f0676a/userdata/oci-log --conmon-pidfile /run/nova_compute.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg a3bb16dbb05ddc47f45ec9373ca7302ec69431e14964c6960ac6f45103f0676a

Jan 27 09:22:29 compute-1 nova_compute[221466]: 2026-01-27 09:22:29.287 221470 DEBUG nova.compute.resource_tracker [None req-b051c06b-b04d-4ebb-a974-55605b45294f - - - - - -] Final resource view: name=compute-1.ctlplane.example.com phys_ram=7679MB used_ram=512MB phys_disk=20GB used_disk=0GB total_vcpus=8 used_vcpus=0 pci_stats=[] _report_final_resource_view /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:1066[00m
Jan 27 09:22:29 compute-1 nova_compute[221466]: 2026-01-27 09:22:29.359 221470 DEBUG oslo_concurrency.processutils [None req-b051c06b-b04d-4ebb-a974-55605b45294f - - - - - -] Running cmd (subprocess): ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:384[00m
Jan 27 09:22:29 compute-1 nova_compute[221466]: 2026-01-27 09:22:29.874 221470 DEBUG oslo_concurrency.processutils [None req-b051c06b-b04d-4ebb-a974-55605b45294f - - - - - -] CMD "ceph df --format=json --id openstack --conf /etc/ceph/ceph.conf" returned: 0 in 0.515s execute /usr/lib/python3.9/site-packages/oslo_concurrency/processutils.py:422[00m
Jan 27 09:22:29 compute-1 nova_compute[221466]: 2026-01-27 09:22:29.878 221470 DEBUG nova.compute.provider_tree [None req-b051c06b-b04d-4ebb-a974-55605b45294f - - - - - -] Inventory has not changed in ProviderTree for provider: 364728e3-e139-45c8-a3e1-efd0dc8a9c80 update_inventory /usr/lib/python3.9/site-packages/nova/compute/provider_tree.py:180[00m
Jan 27 09:22:29 compute-1 nova_compute[221466]: 2026-01-27 09:22:29.905 221470 DEBUG nova.scheduler.client.report [None req-b051c06b-b04d-4ebb-a974-55605b45294f - - - - - -] Inventory has not changed for provider 364728e3-e139-45c8-a3e1-efd0dc8a9c80 based on inventory data: {'VCPU': {'total': 8, 'reserved': 0, 'min_unit': 1, 'max_unit': 8, 'step_size': 1, 'allocation_ratio': 4.0}, 'MEMORY_MB': {'total': 7679, 'reserved': 512, 'min_unit': 1, 'max_unit': 7679, 'step_size': 1, 'allocation_ratio': 1.0}, 'DISK_GB': {'total': 20, 'reserved': 0, 'min_unit': 1, 'max_unit': 20, 'step_size': 1, 'allocation_ratio': 0.9}} set_inventory_for_provider /usr/lib/python3.9/site-packages/nova/scheduler/client/report.py:940[00m
Jan 27 09:22:29 compute-1 nova_compute[221466]: 2026-01-27 09:22:29.907 221470 DEBUG nova.compute.resource_tracker [None req-b051c06b-b04d-4ebb-a974-55605b45294f - - - - - -] Compute_service record updated for compute-1.ctlplane.example.com:compute-1.ctlplane.example.com _update_available_resource /usr/lib/python3.9/site-packages/nova/compute/resource_tracker.py:995[00m
Jan 27 09:22:29 compute-1 nova_compute[221466]: 2026-01-27 09:22:29.907 221470 DEBUG oslo_concurrency.lockutils [None req-b051c06b-b04d-4ebb-a974-55605b45294f - - - - - -] Lock "compute_resources" "released" by "nova.compute.resource_tracker.ResourceTracker._update_available_resource" :: held 0.759s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423[00m
Jan 27 09:22:30 compute-1 nova_compute[221466]: 2026-01-27 09:22:30.902 221470 DEBUG oslo_service.periodic_task [None req-b051c06b-b04d-4ebb-a974-55605b45294f - - - - - -] Running periodic task ComputeManager._check_instance_build_time run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210[00m
Jan 27 09:22:32 compute-1 nova_compute[221466]: 2026-01-27 09:22:32.386 221470 DEBUG oslo_service.periodic_task [None req-b051c06b-b04d-4ebb-a974-55605b45294f - - - - - -] Running periodic task ComputeManager._poll_volume_usage run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210[00m
Jan 27 09:22:39 compute-1 nova_compute[221466]: 2026-01-27 09:22:39.387 221470 DEBUG oslo_service.periodic_task [None req-b051c06b-b04d-4ebb-a974-55605b45294f - - - - - -] Running periodic task ComputeManager._poll_rescued_instances run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210[00m

● edpm_ovn_controller.service - ovn_controller container
     Loaded: loaded (/etc/systemd/system/edpm_ovn_controller.service; enabled; preset: disabled)
     Active: active (running) since Tue 2026-01-27 08:39:38 UTC; 43min ago
   Main PID: 130301 (conmon)
         IO: 0B read, 130.5K written
      Tasks: 1 (limit: 48560)
     Memory: 692.0K (peak: 19.4M)
        CPU: 224ms
     CGroup: /system.slice/edpm_ovn_controller.service
             └─130301 /usr/bin/conmon --api-version 1 -c f524c08bb2ae37ade38d0ec5ff5b0696715a77e37f29e059b5b993958b3ea20d -u f524c08bb2ae37ade38d0ec5ff5b0696715a77e37f29e059b5b993958b3ea20d -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/f524c08bb2ae37ade38d0ec5ff5b0696715a77e37f29e059b5b993958b3ea20d/userdata -p /run/containers/storage/overlay-containers/f524c08bb2ae37ade38d0ec5ff5b0696715a77e37f29e059b5b993958b3ea20d/userdata/pidfile -n ovn_controller --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/f524c08bb2ae37ade38d0ec5ff5b0696715a77e37f29e059b5b993958b3ea20d --full-attach -s -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/f524c08bb2ae37ade38d0ec5ff5b0696715a77e37f29e059b5b993958b3ea20d/userdata/oci-log --conmon-pidfile /run/ovn_controller.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg f524c08bb2ae37ade38d0ec5ff5b0696715a77e37f29e059b5b993958b3ea20d

Jan 27 08:39:38 compute-1 ovn_controller[130301]: 2026-01-27T08:39:38Z|00023|features|INFO|OVS DB schema supports 4 flow table prefixes, our IDL supports: 4
Jan 27 08:39:38 compute-1 ovn_controller[130301]: 2026-01-27T08:39:38Z|00024|main|INFO|Setting flow table prefixes: ip_src, ip_dst, ipv6_src, ipv6_dst.
Jan 27 08:39:38 compute-1 ovn_controller[130301]: 2026-01-27T08:39:38Z|00001|pinctrl(ovn_pinctrl0)|INFO|unix:/var/run/openvswitch/br-int.mgmt: connecting to switch
Jan 27 08:39:38 compute-1 ovn_controller[130301]: 2026-01-27T08:39:38Z|00002|rconn(ovn_pinctrl0)|INFO|unix:/var/run/openvswitch/br-int.mgmt: connecting...
Jan 27 08:39:38 compute-1 ovn_controller[130301]: 2026-01-27T08:39:38Z|00001|statctrl(ovn_statctrl3)|INFO|unix:/var/run/openvswitch/br-int.mgmt: connecting to switch
Jan 27 08:39:38 compute-1 ovn_controller[130301]: 2026-01-27T08:39:38Z|00002|rconn(ovn_statctrl3)|INFO|unix:/var/run/openvswitch/br-int.mgmt: connecting...
Jan 27 08:39:38 compute-1 ovn_controller[130301]: 2026-01-27T08:39:38Z|00003|rconn(ovn_pinctrl0)|INFO|unix:/var/run/openvswitch/br-int.mgmt: connected
Jan 27 08:39:38 compute-1 ovn_controller[130301]: 2026-01-27T08:39:38Z|00003|rconn(ovn_statctrl3)|INFO|unix:/var/run/openvswitch/br-int.mgmt: connected
Jan 27 08:40:08 compute-1 ovn_controller[130301]: 2026-01-27T08:40:08Z|00025|memory|INFO|16256 kB peak resident set size after 30.0 seconds
Jan 27 08:40:08 compute-1 ovn_controller[130301]: 2026-01-27T08:40:08Z|00026|memory|INFO|idl-cells-OVN_Southbound:273 idl-cells-Open_vSwitch:642 ofctrl_desired_flow_usage-KB:7 ofctrl_installed_flow_usage-KB:5 ofctrl_sb_flow_ref_usage-KB:3

● edpm_ovn_metadata_agent.service - ovn_metadata_agent container
     Loaded: loaded (/etc/systemd/system/edpm_ovn_metadata_agent.service; enabled; preset: disabled)
     Active: active (running) since Tue 2026-01-27 08:40:52 UTC; 42min ago
   Main PID: 139973 (conmon)
         IO: 0B read, 127.0K written
      Tasks: 1 (limit: 48560)
     Memory: 716.0K (peak: 18.5M)
        CPU: 269ms
     CGroup: /system.slice/edpm_ovn_metadata_agent.service
             └─139973 /usr/bin/conmon --api-version 1 -c d6ce9deaaecdb1627bbc6b1f3be04c03bb6975dd82af97ea8a1c73c9cca6abd1 -u d6ce9deaaecdb1627bbc6b1f3be04c03bb6975dd82af97ea8a1c73c9cca6abd1 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/d6ce9deaaecdb1627bbc6b1f3be04c03bb6975dd82af97ea8a1c73c9cca6abd1/userdata -p /run/containers/storage/overlay-containers/d6ce9deaaecdb1627bbc6b1f3be04c03bb6975dd82af97ea8a1c73c9cca6abd1/userdata/pidfile -n ovn_metadata_agent --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/d6ce9deaaecdb1627bbc6b1f3be04c03bb6975dd82af97ea8a1c73c9cca6abd1 --full-attach -s -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/d6ce9deaaecdb1627bbc6b1f3be04c03bb6975dd82af97ea8a1c73c9cca6abd1/userdata/oci-log --conmon-pidfile /run/ovn_metadata_agent.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg d6ce9deaaecdb1627bbc6b1f3be04c03bb6975dd82af97ea8a1c73c9cca6abd1

Jan 27 09:19:54 compute-1 ovn_metadata_agent[139973]: 2026-01-27 09:19:54.646 139978 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" "released" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: held 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423[00m
Jan 27 09:20:54 compute-1 ovn_metadata_agent[139973]: 2026-01-27 09:20:54.646 139978 DEBUG oslo_concurrency.lockutils [-] Acquiring lock "_check_child_processes" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404[00m
Jan 27 09:20:54 compute-1 ovn_metadata_agent[139973]: 2026-01-27 09:20:54.646 139978 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" acquired by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: waited 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409[00m
Jan 27 09:20:54 compute-1 ovn_metadata_agent[139973]: 2026-01-27 09:20:54.646 139978 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" "released" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: held 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423[00m
Jan 27 09:21:54 compute-1 ovn_metadata_agent[139973]: 2026-01-27 09:21:54.647 139978 DEBUG oslo_concurrency.lockutils [-] Acquiring lock "_check_child_processes" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404[00m
Jan 27 09:21:54 compute-1 ovn_metadata_agent[139973]: 2026-01-27 09:21:54.648 139978 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" acquired by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409[00m
Jan 27 09:21:54 compute-1 ovn_metadata_agent[139973]: 2026-01-27 09:21:54.648 139978 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" "released" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: held 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423[00m
Jan 27 09:22:54 compute-1 ovn_metadata_agent[139973]: 2026-01-27 09:22:54.648 139978 DEBUG oslo_concurrency.lockutils [-] Acquiring lock "_check_child_processes" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404[00m
Jan 27 09:22:54 compute-1 ovn_metadata_agent[139973]: 2026-01-27 09:22:54.650 139978 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" acquired by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: waited 0.002s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409[00m
Jan 27 09:22:54 compute-1 ovn_metadata_agent[139973]: 2026-01-27 09:22:54.650 139978 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" "released" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: held 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423[00m

○ emergency.service - Emergency Shell
     Loaded: loaded (/usr/lib/systemd/system/emergency.service; static)
     Active: inactive (dead)
       Docs: man:sulogin(8)

○ f524c08bb2ae37ade38d0ec5ff5b0696715a77e37f29e059b5b993958b3ea20d-183c56ee99fd276c.service - /usr/bin/podman healthcheck run f524c08bb2ae37ade38d0ec5ff5b0696715a77e37f29e059b5b993958b3ea20d
     Loaded: loaded (/run/systemd/transient/f524c08bb2ae37ade38d0ec5ff5b0696715a77e37f29e059b5b993958b3ea20d-183c56ee99fd276c.service; transient)
  Transient: yes
     Active: inactive (dead) since Tue 2026-01-27 09:22:55 UTC; 2s ago
   Duration: 117ms
TriggeredBy: ● f524c08bb2ae37ade38d0ec5ff5b0696715a77e37f29e059b5b993958b3ea20d-183c56ee99fd276c.timer
    Process: 240341 ExecStart=/usr/bin/podman healthcheck run f524c08bb2ae37ade38d0ec5ff5b0696715a77e37f29e059b5b993958b3ea20d (code=exited, status=0/SUCCESS)
   Main PID: 240341 (code=exited, status=0/SUCCESS)
        CPU: 84ms

Jan 27 09:22:55 compute-1 podman[240341]: 2026-01-27 09:22:55.952412269 +0000 UTC m=+0.090881580 container health_status f524c08bb2ae37ade38d0ec5ff5b0696715a77e37f29e059b5b993958b3ea20d (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'EDPM_CONFIG_HASH': '249f42cc0a5de6940e06c976a81a3e64ae1c330f940cff0a51730e3f74af51fa-443ed18f8996048d3715b9ec80abd55220542871602df33775d8611b19ccacea-443ed18f8996048d3715b9ec80abd55220542871602df33775d8611b19ccacea-443ed18f8996048d3715b9ec80abd55220542871602df33775d8611b19ccacea'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:rUnit fcoe.service could not be found.
Unit hv_kvp_daemon.service could not be found.
o,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, config_id=ovn_controller, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, container_name=ovn_controller, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, org.label-schema.license=GPLv2, managed_by=edpm_ansible, org.label-schema.build-date=20251202, org.label-schema.schema-version=1.0, tcib_build_tag=c3923531bcda0b0811b2d5053f189beb, tcib_managed=true)

● getty@tty1.service - Getty on tty1
     Loaded: loaded (/usr/lib/systemd/system/getty@.service; enabled; preset: enabled)
     Active: active (running) since Tue 2026-01-27 07:47:12 UTC; 1h 35min ago
       Docs: man:agetty(8)
             man:systemd-getty-generator(8)
             http://0pointer.de/blog/projects/serial-console.html
   Main PID: 1012 (agetty)
         IO: 0B read, 0B written
      Tasks: 1 (limit: 48560)
     Memory: 276.0K (peak: 500.0K)
        CPU: 6ms
     CGroup: /system.slice/system-getty.slice/getty@tty1.service
             └─1012 /sbin/agetty -o "-p -- \\u" --noclear - linux

Jan 27 07:47:12 np0005597077.novalocal systemd[1]: Started Getty on tty1.

● gssproxy.service - GSSAPI Proxy Daemon
     Loaded: loaded (/usr/lib/systemd/system/gssproxy.service; disabled; preset: disabled)
     Active: active (running) since Tue 2026-01-27 07:47:09 UTC; 1h 35min ago
   Main PID: 872 (gssproxy)
         IO: 0B read, 0B written
      Tasks: 6 (limit: 48560)
     Memory: 1.8M (peak: 3.4M)
        CPU: 25ms
     CGroup: /system.slice/gssproxy.service
             └─872 /usr/sbin/gssproxy -D

Jan 27 07:47:09 np0005597077.novalocal systemd[1]: Starting GSSAPI Proxy Daemon...
Jan 27 07:47:09 np0005597077.novalocal systemd[1]: Started GSSAPI Proxy Daemon.

○ import-state.service - Import network configuration from initramfs
     Loaded: loaded (/usr/lib/systemd/system/import-state.service; enabled; preset: enabled)
     Active: inactive (dead)

○ initrd-cleanup.service - Cleaning Up and Shutting Down Daemons
     Loaded: loaded (/usr/lib/systemd/system/initrd-cleanup.service; static)
     Active: inactive (dead) since Tue 2026-01-27 07:47:05 UTC; 1h 35min ago
   Main PID: 619 (code=exited, status=0/SUCCESS)
        CPU: 10ms

Jan 27 07:47:04 localhost systemd[1]: Starting Cleaning Up and Shutting Down Daemons...
Jan 27 07:47:05 localhost systemd[1]: initrd-cleanup.service: Deactivated successfully.
Jan 27 07:47:05 localhost systemd[1]: Finished Cleaning Up and Shutting Down Daemons.

○ initrd-parse-etc.service - Mountpoints Configured in the Real Root
     Loaded: loaded (/usr/lib/systemd/system/initrd-parse-etc.service; static)
     Active: inactive (dead) since Tue 2026-01-27 07:47:04 UTC; 1h 35min ago
   Main PID: 571 (code=exited, status=0/SUCCESS)
        CPU: 16ms

Jan 27 07:47:04 localhost systemd[1]: Starting Mountpoints Configured in the Real Root...
Jan 27 07:47:04 localhost systemd[1]: initrd-parse-etc.service: Deactivated successfully.
Jan 27 07:47:04 localhost systemd[1]: Finished Mountpoints Configured in the Real Root.

○ initrd-switch-root.service - Switch Root
     Loaded: loaded (/usr/lib/systemd/system/initrd-switch-root.service; static)
     Active: inactive (dead) since Tue 2026-01-27 07:47:06 UTC; 1h 35min ago
   Main PID: 624 (code=exited, status=0/SUCCESS)
        CPU: 6ms

Jan 27 07:47:05 localhost systemd[1]: Starting Switch Root...

○ initrd-udevadm-cleanup-db.service - Cleanup udev Database
     Loaded: loaded (/usr/lib/systemd/system/initrd-udevadm-cleanup-db.service; static)
     Active: inactive (dead) since Tue 2026-01-27 07:47:05 UTC; 1h 35min ago
   Main PID: 622 (code=exited, status=0/SUCCESS)
        CPU: 8ms

Jan 27 07:47:05 localhost systemd[1]: Starting Cleanup udev Database...
Jan 27 07:47:05 localhost systemd[1]: initrd-udevadm-cleanup-db.service: Deactivated successfully.
Jan 27 07:47:05 localhost systemd[1]: Finished Cleanup udev Database.

○ ip6tables.service - IPv6 firewall with ip6tables
     Loaded: loaded (/usr/lib/systemd/system/ip6tables.service; disabled; preset: disabled)
     Active: inactive (dead)

○ iptables.service - IPv4 firewall with iptables
     Loaded: loaded (/usr/lib/systemd/system/iptables.service; disabled; preset: disabled)
     Active: inactive (dead) since Tue 2026-01-27 08:25:00 UTC; 57min ago
   Duration: 37min 52.804s
   Main PID: 779 (code=exited, status=0/SUCCESS)
        CPU: 122ms

Jan 27 07:47:07 localhost systemd[1]: Starting IPv4 firewall with iptables...
Jan 27 07:47:07 localhost iptables.init[779]: iptables: Applying firewall rules: [  OK  ]
Jan 27 07:47:07 localhost systemd[1]: Finished IPv4 firewall with iptables.
Jan 27 08:25:00 compute-1 systemd[1]: Stopping IPv4 firewall with iptables...
Jan 27 08:25:00 compute-1 iptables.init[62753]: iptables: Setting chains to policy ACCEPT: raw mangle filter nat [  OK  ]
Jan 27 08:25:00 compute-1 iptables.init[62753]: iptables: Flushing firewall rules: [  OK  ]
Jan 27 08:25:00 compute-1 systemd[1]: iptables.service: Deactivated successfully.
Jan 27 08:25:00 compute-1 systemd[1]: Stopped IPv4 firewall with iptables.

● irqbalance.service - irqbalance daemon
     Loaded: loaded (/usr/lib/systemd/system/irqbalance.service; enabled; preset: enabled)
     Active: active (running) since Tue 2026-01-27 07:47:07 UTC; 1h 35min ago
       Docs: man:irqbalance(1)
             https://github.com/Irqbalance/irqbalance
   Main PID: 788 (irqbalance)
         IO: 0B read, 0B written
      Tasks: 2 (limit: 48560)
     Memory: 1.1M (peak: 1.3M)
        CPU: 494ms
     CGroup: /system.slice/irqbalance.service
             └─788 /usr/sbin/irqbalance

Jan 27 07:47:18 np0005597077.novalocal irqbalance[788]: Cannot change IRQ 28 affinity: Operation not permitted
Jan 27 07:47:18 np0005597077.novalocal irqbalance[788]: IRQ 28 affinity is now unmanaged
Jan 27 07:47:18 np0005597077.novalocal irqbalance[788]: Cannot change IRQ 34 affinity: Operation not permitted
Jan 27 07:47:18 np0005597077.novalocal irqbalance[788]: IRQ 34 affinity is now unmanaged
Jan 27 07:47:18 np0005597077.novalocal irqbalance[788]: Cannot change IRQ 32 affinity: Operation not permitted
Jan 27 07:47:18 np0005597077.novalocal irqbalance[788]: IRQ 32 affinity is now unmanaged
Jan 27 07:47:18 np0005597077.novalocal irqbalance[788]: Cannot change IRQ 30 affinity: Operation not permitted
Jan 27 07:47:18 np0005597077.novalocal irqbalance[788]: IRQ 30 affinity is now unmanaged
Jan 27 07:47:18 np0005597077.novalocal irqbalance[788]: Cannot change IRQ 29 affinity: Operation not permitted
Jan 27 07:47:18 np0005597077.novalocal irqbalance[788]: IRQ 29 affinity is now unmanaged

○ iscsi-init.service - One time configuration for iscsi.service
     Loaded: loaded (/usr/lib/systemd/system/iscsi-init.service; disabled; preset: disabled)
     Active: inactive (dead)
  Condition: start condition failed at Tue 2026-01-27 08:47:06 UTC; 35min ago

Jan 27 08:46:24 compute-1 systemd[1]: One time configuration for iscsi.service was skipped because of an unmet condition check (ConditionPathExists=!/etc/iscsi/initiatorname.iscsi).
Jan 27 08:47:06 compute-1 systemd[1]: One time configuration for iscsi.service was skipped because of an unmet condition check (ConditionPathExists=!/etc/iscsi/initiatorname.iscsi).

○ iscsi-onboot.service - Special handling of early boot iSCSI sessions
     Loaded: loaded (/usr/lib/systemd/system/iscsi-onboot.service; enabled; preset: enabled)
     Active: inactive (dead)
       Docs: man:iscsiadm(8)
             man:iscsid(8)

● iscsi-shutdown.service - Logout off all iSCSI sessions on shutdown
     Loaded: loaded (/usr/lib/systemd/system/iscsi-shutdown.service; static)
     Active: active (exited) since Tue 2026-01-27 08:46:24 UTC; 36min ago
       Docs: man:iscsid(8)
             man:iscsiadm(8)
   Main PID: 200236 (code=exited, status=0/SUCCESS)
        CPU: 3ms

Jan 27 08:46:24 compute-1 systemd[1]: Starting Logout off all iSCSI sessions on shutdown...
Jan 27 08:46:24 computUnit lvm2-activation-early.service could not be found.
e-1 systemd[1]: Finished Logout off all iSCSI sessions on shutdown.

○ iscsi-starter.service
     Loaded: loaded (/usr/lib/systemd/system/iscsi-starter.service; enabled; preset: disabled)
     Active: inactive (dead)

○ iscsi.service - Login and scanning of iSCSI devices
     Loaded: loaded (/usr/lib/systemd/system/iscsi.service; indirect; preset: enabled)
     Active: inactive (dead)
       Docs: man:iscsiadm(8)
             man:iscsid(8)

● iscsid.service - Open-iSCSI
     Loaded: loaded (/usr/lib/systemd/system/iscsid.service; enabled; preset: disabled)
     Active: active (running) since Tue 2026-01-27 08:47:06 UTC; 35min ago
TriggeredBy: ● iscsid.socket
       Docs: man:iscsid(8)
             man:iscsiuio(8)
             man:iscsiadm(8)
   Main PID: 206933 (iscsid)
     Status: "Ready to process requests"
         IO: 0B read, 0B written
      Tasks: 1 (limit: 48560)
     Memory: 1.9M (peak: 2.0M)
        CPU: 18ms
     CGroup: /system.slice/iscsid.service
             └─206933 /usr/sbin/iscsid -f

Jan 27 08:47:06 compute-1 systemd[1]: Starting Open-iSCSI...
Jan 27 08:47:06 compute-1 systemd[1]: Started Open-iSCSI.

○ iscsiuio.service - iSCSI UserSpace I/O driver
     Loaded: loaded (/usr/lib/systemd/system/iscsiuio.service; disabled; preset: disabled)
     Active: inactive (dead)
TriggeredBy: ○ iscsiuio.socket
       Docs: man:iscsiuio(8)

● kdump.service - Crash recovery kernel arming
     Loaded: loaded (/usr/lib/systemd/system/kdump.service; enabled; preset: enabled)
     Active: active (exited) since Tue 2026-01-27 07:47:28 UTC; 1h 35min ago
   Main PID: 1010 (code=exited, status=0/SUCCESS)
        CPU: 18.566s

Jan 27 07:47:27 np0005597077.novalocal dracut[1304]: Linked:         0 files
Jan 27 07:47:27 np0005597077.novalocal dracut[1304]: Compared:       0 xattrs
Jan 27 07:47:27 np0005597077.novalocal dracut[1304]: Compared:       0 files
Jan 27 07:47:27 np0005597077.novalocal dracut[1304]: Saved:          0 B
Jan 27 07:47:27 np0005597077.novalocal dracut[1304]: Duration:       0.000570 seconds
Jan 27 07:47:27 np0005597077.novalocal dracut[1304]: *** Hardlinking files done ***
Jan 27 07:47:27 np0005597077.novalocal dracut[1304]: *** Creating initramfs image file '/boot/initramfs-5.14.0-661.el9.x86_64kdump.img' done ***
Jan 27 07:47:28 np0005597077.novalocal kdumpctl[1020]: kdump: kexec: loaded kdump kernel
Jan 27 07:47:28 np0005597077.novalocal kdumpctl[1020]: kdump: Starting kdump: [OK]
Jan 27 07:47:28 np0005597077.novalocal systemd[1]: Finished Crash recovery kernel arming.

● kmod-static-nodes.service - Create List of Static Device Nodes
     Loaded: loaded (/usr/lib/systemd/system/kmod-static-nodes.service; static)
     Active: active (exited) since Tue 2026-01-27 07:47:06 UTC; 1h 35min ago
   Main PID: 674 (code=exited, status=0/SUCCESS)
        CPU: 8ms

Jan 27 07:47:06 localhost systemd[1]: Finished Create List of Static Device Nodes.

● ldconfig.service - Rebuild Dynamic Linker Cache
     Loaded: loaded (/usr/lib/systemd/system/ldconfig.service; static)
     Active: active (exited) since Tue 2026-01-27 07:47:07 UTC; 1h 35min ago
       Docs: man:ldconfig(8)
   Main PID: 696 (code=exited, status=0/SUCCESS)
        CPU: 57ms

Jan 27 07:47:06 localhost systemd[1]: Starting Rebuild Dynamic Linker Cache...
Jan 27 07:47:07 localhost systemd[1]: Finished Rebuild Dynamic Linker Cache.

○ libvirtd.service
     Loaded: masked (Reason: Unit libvirtd.service is masked.)
     Active: inactive (dead)
TriggeredBy: ○ libvirtd-ro.socket
             ○ libvirtd.socket
             ○ libvirtd-admin.socket

○ loadmodules.service - Load legacy module configuration
     Loaded: loaded (/usr/lib/systemd/system/loadmodules.service; enabled; preset: enabled)
     Active: inactive (dead)

○ logrotate.service - Rotate log files
     Loaded: loaded (/usr/lib/systemd/system/logrotate.service; static)
     Active: inactive (dead)
TriggeredBy: ● logrotate.timer
       Docs: man:logrotate(8)
             man:logrotate.conf(5)

○ lvm2-lvmpolld.service - LVM2 poll daemon
     Loaded: loaded (/usr/lib/systemd/system/lvm2-lvmpolld.service; static)
     Active: inactive (dead)
TriggeredBy: ● lvm2-lvmpolld.socket
       Docs: man:lvmpolld(8)

● lvm2-monitor.service - Monitoring of LVM2 mirrors, snapshots etc. using dmeventd or progress polling
     Loaded: loaded (/usr/lib/systemd/system/lvm2-monitor.service; enabled; preset: enabled)
     Active: active (exited) since Tue 2026-01-27 08:19:30 UTC; 1h 3min ago
       Docs: man:dmeventd(8)
             man:lvcreate(8)
             man:lvchange(8)
             man:vgchange(8)
   Main PID: 34006 (code=exited, status=0/SUCCESS)
        CPU: 22ms

Jan 27 08:19:30 compute-1 systemd[1]: Starting Monitoring of LVM2 mirrors, snapshots etc. using dmeventd or progress polling...
Jan 27 08:19:30 compute-1 systemd[1]: Finished Monitoring of LVM2 mirrors, snapshots etc. using dmeventd or progress polling.

○ mdmonitor.service - Software RAID monitoring and management
     Loaded: loaded (/usr/lib/systemd/system/mdmonitor.service; enabled; preset: enabled)
     Active: inactive (dead)

○ microcode.service - Load CPU microcode update
     Loaded: loaded (/usr/lib/systemd/system/microcode.service; enabled; preset: enabled)
     Active: inactive (dead)
  Condition: start condition failed at Tue 2026-01-27 07:47:07 UTC; 1h 35min ago

Jan 27 07:47:07 localhost systemd[1]: Load CPU microcode update was skipped because of an unmet condition check (ConditionPathExists=/sys/devices/system/cpu/microcode/reload).

○ modprobe@configfs.service - Load Kernel Module configfs
     Loaded: loaded (/usr/lib/systemd/system/modprobe@.service; static)
     Active: inactive (dead) since Tue 2026-01-27 07:47:07 UTC; 1h 35min ago
       Docs: man:modprobe(8)
   Main PID: 760 (code=exited, status=0/SUCCESS)
        CPU: 6ms

Jan 27 07:47:07 localhost systemd[1]: Starting Load Kernel Module configfs...
Jan 27 07:47:07 localhost systemd[1]: modprobe@configfs.service: Deactivated successfully.
Jan 27 07:47:07 localhost systemd[1]: Finished Load Kernel Module configfs.

○ modprobe@drm.service - Load Kernel Module drm
     Loaded: loaded (/usr/lib/systemd/system/modprobe@.service; static)
     Active: inactive (dead) since Tue 2026-01-27 07:47:06 UTC; 1h 35min ago
       Docs: man:modprobe(8)
   Main PID: 676 (code=exited, status=0/SUCCESS)
        CPU: 94ms

Jan 27 07:47:06 localhost systemd[1]: modprobe@drm.service: Deactivated successfully.
Jan 27 07:47:06 localhost systemd[1]: Finished Load Kernel Module drm.

○ modprobe@efi_pstore.service - Load Kernel Module efi_pstore
     Loaded: loaded (/usr/lib/systemd/system/modprobe@.service; static)
     Active: inactive (dead) since Tue 2026-01-27 07:47:06 UTC; 1h 35min ago
       Docs: man:modprobe(8)
   Main PID: 677 (code=exited, status=0/SUCCESS)
        CPU: 3ms

Jan 27 07:47:06 localhost systemd[1]: modprobe@efi_pstore.service: Deactivated successfully.
Jan 27 07:47:06 localhost systemd[1]: Finished Load Kernel Module efi_pstore.

○ modprobe@fuse.service - Load Kernel Module fuse
     Loaded: loaded (/usr/lib/systemd/system/modprobe@.service; static)
     Active: inactive (dead) since Tue 2026-01-27 07:47:06 UTC; 1h 35min ago
       Docs: man:modprobe(8)
   Main PID: 678 (code=exited, status=0/SUCCESS)
        CPU: 22ms

Jan 27 07:47:06 localhost systemd[1]: modprobe@fuse.service: Deactivated successfully.
Jan 27 07:47:06 localhost systemd[1]: Finished Load Kernel Module fuse.

● multipathd.service - Device-Mapper Multipath Device Controller
     Loaded: loaded (/usr/lib/systemd/system/multipathd.service; enabled; preset: enabled)
     Active: active (running) since Tue 2026-01-27 08:47:11 UTC; 35min ago
TriggeredBy: ● multipathd.socket
   Main PID: 207091 (multipathd)
     Status: "up"
         IO: 0B read, 0B written
      Tasks: 7
     Memory: 18.4M (peak: 18.7M)
        CPU: 269ms
     CGroup: /system.slice/multipathd.service
             └─207091 /sbin/multipathd -d -s

Jan 27 08:47:11 compute-1 systemd[1]: Starting Device-Mapper Multipath Device Controller...
Jan 27 08:47:11 compute-1 multipathd[207091]: --------start up--------
Jan 27 08:47:11 compute-1 multipathd[207091]: read /etc/multipath.conf
Jan 27 08:47:11 compute-1 multipathd[207091]: path checkers start up
Jan 27 08:47:11 compute-1 systemd[1]: Started Device-Mapper Multipath Device Controller.

○ netns-placeholder.service - Create netns directory
     Loaded: loaded (/etc/systemd/system/netns-placeholder.service; enabled; preset: enabled)
     Active: inactive (dead) since Tue 2026-01-27 08:40:23 UTC; 42min ago
   Main PID: 137111 (code=exited, status=0/SUCCESS)
        CPU: 7ms

Jan 27 08:40:23 compute-1 systemd[1]: Starting Create netns directory...
Jan 27 08:40:23 compute-1 systemd[1]: netns-placeholder.service: Deactivated successfully.
Jan 27 08:40:23 compute-1 systemd[1]: Finished Create netns directory.

○ network.service - LSB: Bring up/down networking
     Loaded: loaded (/etc/rc.d/init.d/network; generated)
     Active: inactive (dead)
       Docs: man:systemd-sysv-generator(8)

● NetworkManager-wait-online.service - Network Manager Wait Online
     Loaded: loaded (/usr/lib/systemd/system/NetworkManager-wait-online.service; enabled; preset: disabled)
     Active: active (exited) since Tue 2026-01-27 08:22:41 UTC; 1h 0min ago
       Docs: man:NetworkManager-wait-online.service(8)
   Main PID: 48964 (code=exited, status=0/SUCCESS)
        CPU: 18ms

Jan 27 08:22:41 compute-1 systemd[1]: Starting Network Manager Wait Online...
Jan 27 08:22:41 compute-1 systemd[1]: Finished Network Manager Wait Online.

● NetworkManager.service - Network Manager
     Loaded: loaded (/usr/lib/systemd/system/NetworkManager.service; enabled; preset: enabled)
    Drop-In: /usr/lib/systemd/system/NetworkManager.service.d
             └─NetworkManager-ovs.conf
     Active: active (running) since Tue 2026-01-27 08:22:41 UTC; 1h 0min ago
       Docs: man:NetworkManager(8)
   Main PID: 48943 (NetworkManager)
         IO: 104.0K read, 341.0K written
      Tasks: 3 (limit: 48560)
     Memory: 5.4M (peak: 6.4M)
        CPU: 31.676s
     CGroup: /system.slice/NetworkManager.service
             └─48943 /usr/sbin/NetworkManager --no-daemon

Jan 27 08:39:38 compute-1 NetworkManager[48943]: <info>  [1769503178.5632] device (br-int)[Open vSwitch Interface]: state change: unmanaged -> unavailable (reason 'managed', managed-type: 'external')
Jan 27 08:39:38 compute-1 NetworkManager[48943]: <warn>  [1769503178.5634] device (br-int)[Open vSwitch Interface]: error setting IPv4 forwarding to '1': No such file or directory
Jan 27 08:39:38 compute-1 NetworkManager[48943]: <info>  [1769503178.5639] manager: (br-int): new Open vSwitch Port device (/org/freedesktop/NetworkManager/Devices/17)
Jan 27 08:39:38 compute-1 NetworkManager[48943]: <info>  [1769503178.5644] manager: (br-int): new Open vSwitch Bridge device (/org/freedesktop/NetworkManager/Devices/18)
Jan 27 08:39:38 compute-1 NetworkManager[48943]: <info>  [1769503178.5647] device (br-int)[Open vSwitch Interface]: state change: unavailable -> disconnected (reason 'none', managed-type: 'full')
Jan 27 08:39:38 compute-1 NetworkManager[48943]: <info>  [1769503178.5979] manager: (ovn-d032c5-0): new Open vSwitch Port device (/org/freedesktop/NetworkManager/Devices/19)
Jan 27 08:39:38 compute-1 NetworkManager[48943]: <info>  [1769503178.5986] manager: (ovn-a901be-0): new Open vSwitch Port device (/org/freedesktop/NetworkManager/Devices/20)
Jan 27 08:39:38 compute-1 NetworkManager[48943]: <info>  [1769503178.6191] device (genev_sys_6081): carrier: link connected
Jan 27 08:39:38 compute-1 NetworkManager[48943]: <info>  [1769503178.6195] manager: (genev_sys_6081): new Generic device (/org/freedesktop/NetworkManager/Devices/21)
Jan 27 08:39:38 compute-1 NetworkManager[48943]: <info>  [1769503178.6864] manager: (ovn-fd4963-0): new Open vSwitch Port device (/org/freedesktop/NetworkManager/Devices/22)

○ nfs-idmapd.service - NFSv4 ID-name mapping service
     Loaded: loaded (/usr/lib/systemd/system/nfs-idmapd.service; static)
     Active: inactive (dead)
       Docs: man:idmapd(8)

○ nfs-mountd.service - NFS Mount Daemon
     Loaded: loaded (/usr/lib/systemd/sUnit ntpd.service could not be found.
Unit ntpdate.service could not be found.
ystem/nfs-mountd.service; static)
     Active: inactive (dead)
       Docs: man:rpc.mountd(8)

○ nfs-server.service - NFS server and services
     Loaded: loaded (/usr/lib/systemd/system/nfs-server.service; disabled; preset: disabled)
     Active: inactive (dead)
       Docs: man:rpc.nfsd(8)
             man:exportfs(8)

○ nfs-utils.service - NFS server and client services
     Loaded: loaded (/usr/lib/systemd/system/nfs-utils.service; static)
     Active: inactive (dead)

○ nfsdcld.service - NFSv4 Client Tracking Daemon
     Loaded: loaded (/usr/lib/systemd/system/nfsdcld.service; static)
     Active: inactive (dead)
       Docs: man:nfsdcld(8)

● nftables.service - Netfilter Tables
     Loaded: loaded (/usr/lib/systemd/system/nftables.service; enabled; preset: disabled)
     Active: active (exited) since Tue 2026-01-27 08:25:03 UTC; 57min ago
       Docs: man:nft(8)
   Main PID: 63143 (code=exited, status=0/SUCCESS)
        CPU: 26ms

Jan 27 08:25:03 compute-1 systemd[1]: Starting Netfilter Tables...
Jan 27 08:25:03 compute-1 systemd[1]: Finished Netfilter Tables.

● nis-domainname.service - Read and set NIS domainname from /etc/sysconfig/network
     Loaded: loaded (/usr/lib/systemd/system/nis-domainname.service; enabled; preset: enabled)
     Active: active (exited) since Tue 2026-01-27 07:47:06 UTC; 1h 35min ago
   Main PID: 679 (code=exited, status=0/SUCCESS)
        CPU: 5ms

Jan 27 07:47:06 localhost systemd[1]: Finished Read and set NIS domainname from /etc/sysconfig/network.

○ nvmefc-boot-connections.service - Auto-connect to subsystems on FC-NVME devices found during boot
     Loaded: loaded (/usr/lib/systemd/system/nvmefc-boot-connections.service; enabled; preset: enabled)
     Active: inactive (dead)

● openvswitch.service - Open vSwitch
     Loaded: loaded (/usr/lib/systemd/system/openvswitch.service; enabled; preset: disabled)
     Active: active (exited) since Tue 2026-01-27 08:22:26 UTC; 1h 0min ago
   Main PID: 47259 (code=exited, status=0/SUCCESS)
        CPU: 3ms

Jan 27 08:22:26 compute-1 systemd[1]: Starting Open vSwitch...
Jan 27 08:22:26 compute-1 systemd[1]: Finished Open vSwitch.

● ovs-delete-transient-ports.service - Open vSwitch Delete Transient Ports
     Loaded: loaded (/usr/lib/systemd/system/ovs-delete-transient-ports.service; static)
     Active: active (exited) since Tue 2026-01-27 08:22:26 UTC; 1h 0min ago
   Main PID: 47195 (code=exited, status=0/SUCCESS)
        CPU: 32ms

Jan 27 08:22:26 compute-1 systemd[1]: Starting Open vSwitch Delete Transient Ports...
Jan 27 08:22:26 compute-1 systemd[1]: Finished Open vSwitch Delete Transient Ports.

● ovs-vswitchd.service - Open vSwitch Forwarding Unit
     Loaded: loaded (/usr/lib/systemd/system/ovs-vswitchd.service; static)
     Active: active (running) since Tue 2026-01-27 08:22:26 UTC; 1h 0min ago
   Main PID: 47250 (ovs-vswitchd)
         IO: 3.4M read, 24.0K written
      Tasks: 13 (limit: 48560)
     Memory: 243.0M (peak: 248.6M)
        CPU: 9.944s
     CGroup: /system.slice/ovs-vswitchd.service
             └─47250 ovs-vswitchd unix:/var/run/openvswitch/db.sock -vconsole:emer -vsyslog:err -vfile:info --mlockall --user openvswitch:hugetlbfs --no-chdir --log-file=/var/log/openvswitch/ovs-vswitchd.log --pidfile=/var/run/openvswitch/ovs-vswitchd.pid --detach

Jan 27 08:22:26 compute-1 systemd[1]: Starting Open vSwitch Forwarding Unit...
Jan 27 08:22:26 compute-1 ovs-ctl[47240]: Inserting openvswitch module [  OK  ]
Jan 27 08:22:26 compute-1 ovs-ctl[47208]: Starting ovs-vswitchd [  OK  ]
Jan 27 08:22:26 compute-1 ovs-ctl[47208]: Enabling remote OVSDB managers [  OK  ]
Jan 27 08:22:26 compute-1 systemd[1]: Started Open vSwitch Forwarding Unit.
Jan 27 08:22:26 compute-1 ovs-vsctl[47258]: ovs|00001|vsctl|INFO|Called as ovs-vsctl --no-wait add Open_vSwitch . external-ids hostname=compute-1

● ovsdb-server.service - Open vSwitch Database Unit
     Loaded: loaded (/usr/lib/systemd/system/ovsdb-server.service; static)
     Active: active (running) since Tue 2026-01-27 08:22:26 UTC; 1h 0min ago
   Main PID: 47167 (ovsdb-sUnit pacemaker.service could not be found.
Unit plymouth-quit-wait.service could not be found.
Unit plymouth-start.service could not be found.
erver)
         IO: 1.2M read, 228.5K written
      Tasks: 1 (limit: 48560)
     Memory: 4.7M (peak: 38.5M)
        CPU: 12.222s
     CGroup: /system.slice/ovsdb-server.service
             └─47167 ovsdb-server /etc/openvswitch/conf.db -vconsole:emer -vsyslog:err -vfile:info --remote=punix:/var/run/openvswitch/db.sock --private-key=db:Open_vSwitch,SSL,private_key --certificate=db:Open_vSwitch,SSL,certificate --bootstrap-ca-cert=db:Open_vSwitch,SSL,ca_cert --user openvswitch:hugetlbfs --no-chdir --log-file=/var/log/openvswitch/ovsdb-server.log --pidfile=/var/run/openvswitch/ovsdb-server.pid --detach

Jan 27 08:22:25 compute-1 chown[47114]: /usr/bin/chown: cannot access '/run/openvswitch': No such file or directory
Jan 27 08:22:26 compute-1 ovs-ctl[47119]: /etc/openvswitch/conf.db does not exist ... (warning).
Jan 27 08:22:26 compute-1 ovs-ctl[47119]: Creating empty database /etc/openvswitch/conf.db [  OK  ]
Jan 27 08:22:26 compute-1 ovs-ctl[47119]: Starting ovsdb-server [  OK  ]
Jan 27 08:22:26 compute-1 ovs-vsctl[47168]: ovs|00001|vsctl|INFO|Called as ovs-vsctl --no-wait -- init -- set Open_vSwitch . db-version=8.5.1
Jan 27 08:22:26 compute-1 ovs-vsctl[47188]: ovs|00001|vsctl|INFO|Called as ovs-vsctl --no-wait set Open_vSwitch . ovs-version=3.3.5-115.el9s "external-ids:system-id=\"96b682e2-5ea4-4739-b5dd-969936b1365d\"" "external-ids:rundir=\"/var/run/openvswitch\"" "system-type=\"centos\"" "system-version=\"9\""
Jan 27 08:22:26 compute-1 ovs-ctl[47119]: Configuring Open vSwitch system IDs [  OK  ]
Jan 27 08:22:26 compute-1 ovs-vsctl[47194]: ovs|00001|vsctl|INFO|Called as ovs-vsctl --no-wait add Open_vSwitch . external-ids hostname=compute-1
Jan 27 08:22:26 compute-1 ovs-ctl[47119]: Enabling remote OVSDB managers [  OK  ]
Jan 27 08:22:26 compute-1 systemd[1]: Started Open vSwitch Database Unit.

● polkit.service - Authorization Manager
     Loaded: loaded (/usr/lib/systemd/system/polkit.service; static)
     Active: active (running) since Tue 2026-01-27 08:21:35 UTC; 1h 1min ago
       Docs: man:polkit(8)
   Main PID: 43431 (polkitd)
         IO: 19.0M read, 0B written
      Tasks: 12 (limit: 48560)
     Memory: 25.3M (peak: 26.5M)
        CPU: 1.099s
     CGroup: /system.slice/polkit.service
             └─43431 /usr/lib/polkit-1/polkitd --no-debug

Jan 27 08:43:38 compute-1 polkitd[43431]: Collecting garbage unconditionally...
Jan 27 08:43:38 compute-1 polkitd[43431]: Loading rules from directory /etc/polkit-1/rules.d
Jan 27 08:43:38 compute-1 polkitd[43431]: Loading rules from directory /usr/share/polkit-1/rules.d
Jan 27 08:43:38 compute-1 polkitd[43431]: Finished loading, compiling and executing 3 rules
Jan 27 08:45:23 compute-1 polkitd[43431]: Registered Authentication Agent for unix-process:192112:350109 (system bus name :1.1832 [pkttyagent --process 192112 --notify-fd 4 --fallback], object path /org/freedesktop/PolicyKit1/AuthenticationAgent, locale en_US.UTF-8)
Jan 27 08:45:23 compute-1 polkitd[43431]: Unregistered Authentication Agent for unix-process:192112:350109 (system bus name :1.1832, object path /org/freedesktop/PolicyKit1/AuthenticationAgent, locale en_US.UTF-8) (disconnected from bus)
Jan 27 08:45:23 compute-1 polkitd[43431]: Registered Authentication Agent for unix-process:192111:350109 (system bus name :1.1833 [pkttyagent --process 192111 --notify-fd 4 --fallback], object path /org/freedesktop/PolicyKit1/AuthenticationAgent, locale en_US.UTF-8)
Jan 27 08:45:23 compute-1 polkitd[43431]: Unregistered Authentication Agent for unix-process:192111:350109 (system bus name :1.1833, object path /org/freedesktop/PolicyKit1/AuthenticationAgent, locale en_US.UTF-8) (disconnected from bus)
Jan 27 08:45:25 compute-1 polkitd[43431]: Registered Authentication Agent for unix-process:192578:350360 (system bus name :1.1836 [pkttyagent --process 192578 --notify-fd 5 --fallback], object path /org/freedesktop/PolicyKit1/AuthenticationAgent, locale en_US.UTF-8)
Jan 27 08:45:25 compute-1 polkitd[43431]: Unregistered Authentication Agent for unix-process:192578:350360 (system bus name :1.1836, object path /org/freeUnit power-profiles-daemon.service could not be found.
Unit rpc-svcgssd.service could not be found.
desktop/PolicyKit1/AuthenticationAgent, locale en_US.UTF-8) (disconnected from bus)

○ raid-check.service - RAID setup health check
     Loaded: loaded (/usr/lib/systemd/system/raid-check.service; static)
     Active: inactive (dead)
TriggeredBy: ○ raid-check.timer

○ rbdmap.service - Map RBD devices
     Loaded: loaded (/usr/lib/systemd/system/rbdmap.service; disabled; preset: disabled)
     Active: inactive (dead)

○ rc-local.service - /etc/rc.d/rc.local Compatibility
     Loaded: loaded (/usr/lib/systemd/system/rc-local.service; static)
     Active: inactive (dead)
       Docs: man:systemd-rc-local-generator(8)

○ rescue.service - Rescue Shell
     Loaded: loaded (/usr/lib/systemd/system/rescue.service; static)
     Active: inactive (dead)
       Docs: man:sulogin(8)

○ rpc-gssd.service - RPC security service for NFS client and server
     Loaded: loaded (/usr/lib/systemd/system/rpc-gssd.service; static)
     Active: inactive (dead)
  Condition: start condition failed at Tue 2026-01-27 07:47:09 UTC; 1h 35min ago
       Docs: man:rpc.gssd(8)

Jan 27 07:47:09 np0005597077.novalocal systemd[1]: RPC security service for NFS client and server was skipped because of an unmet condition check (ConditionPathExists=/etc/krb5.keytab).

● rpc-statd-notify.service - Notify NFS peers of a restart
     Loaded: loaded (/usr/lib/systemd/system/rpc-statd-notify.service; static)
     Active: active (exited) since Tue 2026-01-27 07:47:12 UTC; 1h 35min ago
       Docs: man:sm-notify(8)
             man:rpc.statd(8)
        CPU: 11ms

Jan 27 07:47:12 np0005597077.novalocal systemd[1]: Starting Notify NFS peers of a restart...
Jan 27 07:47:12 np0005597077.novalocal sm-notify[1006]: Version 2.5.4 starting
Jan 27 07:47:12 np0005597077.novalocal systemd[1]: Started Notify NFS peers of a restart.

○ rpc-statd.service - NFS status monitor for NFSv2/3 locking.
     Loaded: loaded (/usr/lib/systemd/system/rpc-statd.service; static)
     Active: inactive (dead)
       Docs: man:rpc.statd(8)

● rpcbind.service - RPC Bind
     Loaded: loaded (/usr/lib/systemd/system/rpcbind.service; enabled; preset: enabled)
     Active: active (running) since Tue 2026-01-27 07:47:06 UTC; 1h 35min ago
TriggeredBy: ● rpcbind.socket
       Docs: man:rpcbind(8)
   Main PID: 701 (rpcbind)
         IO: 0B read, 0B written
      Tasks: 1 (limit: 48560)
     Memory: 2.5M (peak: 2.8M)
        CPU: 44ms
     CGroup: /system.slice/rpcbind.service
             └─701 /usr/bin/rpcbind -w -f

Jan 27 07:47:06 localhost systemd[1]: Starting RPC Bind...
Jan 27 07:47:06 localhost systemd[1]: Started RPC Bind.

● rsyslog.service - System Logging Service
     Loaded: loaded (/usr/lib/systemd/system/rsyslog.service; enabled; preset: enabled)
     Active: active (running) since Tue 2026-01-27 07:47:12 UTC; 1h 35min ago
       Docs: man:rsyslogd(8)
             https://www.rsyslog.com/doc/
   Main PID: 1007 (rsyslogd)
         IO: 0B read, 9.8M written
      Tasks: 3 (limit: 48560)
     Memory: 11.4M (peak: 12.1M)
        CPU: 7.196s
     CGroup: /system.slice/rsyslog.service
             └─1007 /usr/sbin/rsyslogd -n

Jan 27 08:21:11 compute-1 rsyslogd[1007]: imjournal: journal files changed, reloading...  [v8.2510.0-2.el9 try https://www.rsyslog.com/e/0 ]
Jan 27 08:26:12 compute-1 rsyslogd[1007]: imjournal: journal files changed, reloading...  [v8.2510.0-2.el9 try https://www.rsyslog.com/e/0 ]
Jan 27 08:39:38 compute-1 rsyslogd[1007]: imjournal: journal files changed, reloading...  [v8.2510.0-2.el9 try https://www.rsyslog.com/e/0 ]
Jan 27 08:46:22 compute-1 rsyslogd[1007]: imjournal: journal files changed, reloading...  [v8.2510.0-2.el9 try https://www.rsyslog.com/e/0 ]
Jan 27 08:46:22 compute-1 rsyslogd[1007]: imjournal: journal files changed, reloading...  [v8.2510.0-2.el9 try https://www.rsyslog.com/e/0 ]
Jan 27 08:46:22 compute-1 rsyslogd[1007]: imjournal: journal files changed, reloading...  [v8.2510.0-2.el9 try https://www.rsyslog.com/e/0 ]
Jan 27 08:48:50 compute-1 rsyslogd[1007]: imjournal: journal files changed, reloading...  [v8.2510.0-2.el9 try httUnit sntp.service could not be found.
Unit sshd-keygen.service could not be found.
ps://www.rsyslog.com/e/0 ]
Jan 27 08:48:50 compute-1 rsyslogd[1007]: imjournal: journal files changed, reloading...  [v8.2510.0-2.el9 try https://www.rsyslog.com/e/0 ]
Jan 27 09:08:36 compute-1 rsyslogd[1007]: imjournal: journal files changed, reloading...  [v8.2510.0-2.el9 try https://www.rsyslog.com/e/0 ]
Jan 27 09:22:33 compute-1 rsyslogd[1007]: imjournal: journal files changed, reloading...  [v8.2510.0-2.el9 try https://www.rsyslog.com/e/0 ]

○ selinux-autorelabel-mark.service - Mark the need to relabel after reboot
     Loaded: loaded (/usr/lib/systemd/system/selinux-autorelabel-mark.service; enabled; preset: enabled)
     Active: inactive (dead)
  Condition: start condition failed at Tue 2026-01-27 07:47:06 UTC; 1h 35min ago

Jan 27 07:47:06 localhost systemd[1]: Mark the need to relabel after reboot was skipped because of an unmet condition check (ConditionSecurity=!selinux).

● serial-getty@ttyS0.service - Serial Getty on ttyS0
     Loaded: loaded (/usr/lib/systemd/system/serial-getty@.service; enabled-runtime; preset: disabled)
     Active: active (running) since Tue 2026-01-27 07:47:12 UTC; 1h 35min ago
       Docs: man:agetty(8)
             man:systemd-getty-generator(8)
             http://0pointer.de/blog/projects/serial-console.html
   Main PID: 1013 (agetty)
         IO: 0B read, 0B written
      Tasks: 1 (limit: 48560)
     Memory: 240.0K (peak: 720.0K)
        CPU: 10ms
     CGroup: /system.slice/system-serial\x2dgetty.slice/serial-getty@ttyS0.service
             └─1013 /sbin/agetty -o "-p -- \\u" --keep-baud 115200,57600,38400,9600 - vt220

Jan 27 07:47:12 np0005597077.novalocal systemd[1]: Started Serial Getty on ttyS0.

○ sshd-keygen@ecdsa.service - OpenSSH ecdsa Server Key Generation
     Loaded: loaded (/usr/lib/systemd/system/sshd-keygen@.service; disabled; preset: disabled)
    Drop-In: /usr/lib/systemd/system/sshd-keygen@.service.d
             └─disable-sshd-keygen-if-cloud-init-active.conf
     Active: inactive (dead)
  Condition: start condition failed at Tue 2026-01-27 08:43:42 UTC; 39min ago

Jan 27 07:47:07 localhost systemd[1]: OpenSSH ecdsa Server Key Generation was skipped because of an unmet condition check (ConditionPathExists=!/run/systemd/generator.early/multi-user.target.wants/cloud-init.target).
Jan 27 08:43:42 compute-1 systemd[1]: OpenSSH ecdsa Server Key Generation was skipped because of an unmet condition check (ConditionPathExists=!/run/systemd/generator.early/multi-user.target.wants/cloud-init.target).

○ sshd-keygen@ed25519.service - OpenSSH ed25519 Server Key Generation
     Loaded: loaded (/usr/lib/systemd/system/sshd-keygen@.service; disabled; preset: disabled)
    Drop-In: /usr/lib/systemd/system/sshd-keygen@.service.d
             └─disable-sshd-keygen-if-cloud-init-active.conf
     Active: inactive (dead)
  Condition: start condition failed at Tue 2026-01-27 08:43:42 UTC; 39min ago

Jan 27 07:47:07 localhost systemd[1]: OpenSSH ed25519 Server Key Generation was skipped because of an unmet condition check (ConditionPathExists=!/run/systemd/generator.early/multi-user.target.wants/cloud-init.target).
Jan 27 08:43:42 compute-1 systemd[1]: OpenSSH ed25519 Server Key Generation was skipped because of an unmet condition check (ConditionPathExists=!/run/systemd/generator.early/multi-user.target.wants/cloud-init.target).

○ sshd-keygen@rsa.service - OpenSSH rsa Server Key Generation
     Loaded: loaded (/usr/lib/systemd/system/sshd-keygen@.service; disabled; preset: disabled)
    Drop-In: /usr/lib/systemd/system/sshd-keygen@.service.d
             └─disable-sshd-keygen-if-cloud-init-active.conf
     Active: inactive (dead)
  Condition: start condition failed at Tue 2026-01-27 08:43:42 UTC; 39min ago

Jan 27 07:47:07 localhost systemd[1]: OpenSSH rsa Server Key Generation was skipped because of an unmet condition check (ConditionPathExists=!/run/systemd/generator.early/multi-user.target.wants/cloud-init.target).
Jan 27 08:43:42 compute-1 systemd[1]: OpenSSH rsa Server Key Generation was skipped because of an unmet condition check (ConditionPathExists=Unit syslog.service could not be found.
!/run/systemd/generator.early/multi-user.target.wants/cloud-init.target).

● sshd.service - OpenSSH server daemon
     Loaded: loaded (/usr/lib/systemd/system/sshd.service; enabled; preset: enabled)
     Active: active (running) since Tue 2026-01-27 08:43:42 UTC; 39min ago
       Docs: man:sshd(8)
             man:sshd_config(5)
   Main PID: 165764 (sshd)
         IO: 0B read, 0B written
      Tasks: 1 (limit: 48560)
     Memory: 3.4M (peak: 7.0M)
        CPU: 334ms
     CGroup: /system.slice/sshd.service
             └─165764 "sshd: /usr/sbin/sshd -D [listener] 0 of 10-100 startups"

Jan 27 08:53:14 compute-1 sshd-session[223296]: Received disconnect from 45.148.10.151 port 61392:11:  [preauth]
Jan 27 08:53:14 compute-1 sshd-session[223296]: Disconnected from authenticating user root 45.148.10.151 port 61392 [preauth]
Jan 27 09:00:54 compute-1 sshd-session[227562]: Received disconnect from 91.224.92.78 port 30786:11:  [preauth]
Jan 27 09:00:54 compute-1 sshd-session[227562]: Disconnected from authenticating user root 91.224.92.78 port 30786 [preauth]
Jan 27 09:08:56 compute-1 sshd-session[230209]: Received disconnect from 91.224.92.54 port 49851:11:  [preauth]
Jan 27 09:08:56 compute-1 sshd-session[230209]: Disconnected from authenticating user root 91.224.92.54 port 49851 [preauth]
Jan 27 09:17:06 compute-1 sshd-session[232977]: Received disconnect from 45.148.10.147 port 32004:11:  [preauth]
Jan 27 09:17:06 compute-1 sshd-session[232977]: Disconnected from authenticating user root 45.148.10.147 port 32004 [preauth]
Jan 27 09:22:15 compute-1 sshd-session[234639]: Accepted publickey for zuul from 192.168.122.10 port 60520 ssh2: ECDSA SHA256:f5Z0m2dkHn65zqcIWhGOpceeRGGTJBJfAENb5pouMns
Jan 27 09:22:16 compute-1 sshd-session[234639]: pam_unix(sshd:session): session opened for user zuul(uid=1000) by zuul(uid=0)

○ sssd-kcm.service - SSSD Kerberos Cache Manager
     Loaded: loaded (/usr/lib/systemd/system/sssd-kcm.service; indirect; preset: disabled)
     Active: inactive (dead)
TriggeredBy: ● sssd-kcm.socket
       Docs: man:sssd-kcm(5)

○ sssd.service - System Security Services Daemon
     Loaded: loaded (/usr/lib/systemd/system/sssd.service; enabled; preset: enabled)
     Active: inactive (dead)
  Condition: start condition failed at Tue 2026-01-27 07:47:07 UTC; 1h 35min ago

Jan 27 07:47:07 localhost systemd[1]: System Security Services Daemon was skipped because no trigger condition checks were met.

○ sysstat-collect.service - system activity accounting tool
     Loaded: loaded (/usr/lib/systemd/system/sysstat-collect.service; static)
     Active: inactive (dead)
TriggeredBy: ○ sysstat-collect.timer
       Docs: man:sa1(8)

○ sysstat-summary.service - Generate a daily summary of process accounting
     Loaded: loaded (/usr/lib/systemd/system/sysstat-summary.service; static)
     Active: inactive (dead)
TriggeredBy: ○ sysstat-summary.timer
       Docs: man:sa2(8)

○ sysstat.service - Resets System Activity Logs
     Loaded: loaded (/usr/lib/systemd/system/sysstat.service; enabled; preset: enabled)
     Active: inactive (dead)

○ systemd-ask-password-console.service - Dispatch Password Requests to Console
     Loaded: loaded (/usr/lib/systemd/system/systemd-ask-password-console.service; static)
     Active: inactive (dead)
TriggeredBy: ● systemd-ask-password-console.path
       Docs: man:systemd-ask-password-console.service(8)

○ systemd-ask-password-wall.service - Forward Password Requests to Wall
     Loaded: loaded (/usr/lib/systemd/system/systemd-ask-password-wall.service; static)
     Active: inactive (dead)
TriggeredBy: ● systemd-ask-password-wall.path
       Docs: man:systemd-ask-password-wall.service(8)

○ systemd-binfmt.service - Set Up Additional Binary Formats
     Loaded: loaded (/usr/lib/systemd/system/systemd-binfmt.service; static)
     Active: inactive (dead)
  Condition: start condition failed at Tue 2026-01-27 07:47:06 UTC; 1h 35min ago
       Docs: man:systemd-binfmt.service(8)
             man:binfmt.d(5)
             https://docs.kernel.org/admin-guide/binfmt-misc.html
             https://www.freedesktop.org/wiki/Software/systemd/APIFileSystems

Jan 27 07:47:06 localhost systemd[1]: Set Up Additional Binary Formats was skipped because no trigger condition checks were met.

○ systemd-boot-random-seed.service - Update Boot Loader Random Seed
     Loaded: loaded (/usr/lib/systemd/system/systemd-boot-random-seed.service; static)
     Active: inactive (dead)
  Condition: start condition failed at Tue 2026-01-27 07:47:06 UTC; 1h 35min ago
       Docs: man:systemd-boot-random-seed.service(8)
             man:random(4)

Jan 27 07:47:06 localhost systemd[1]: Update Boot Loader Random Seed was skipped because no trigger condition checks were met.

● systemd-boot-update.service - Automatic Boot Loader Update
     Loaded: loaded (/usr/lib/systemd/system/systemd-boot-update.service; enabled; preset: enabled)
     Active: active (exited) since Tue 2026-01-27 07:47:06 UTC; 1h 35min ago
       Docs: man:bootctl(1)
   Main PID: 697 (code=exited, status=0/SUCCESS)
        CPU: 10ms

Jan 27 07:47:06 localhost systemd[1]: Starting Automatic Boot Loader Update...
Jan 27 07:47:06 localhost bootctl[697]: Couldn't find EFI system partition, skipping.
Jan 27 07:47:06 localhost systemd[1]: Finished Automatic Boot Loader Update.

○ systemd-firstboot.service - First Boot Wizard
     Loaded: loaded (/usr/lib/systemd/system/systemd-firstboot.service; static)
     Active: inactive (dead)
  Condition: start condition failed at Tue 2026-01-27 07:47:06 UTC; 1h 35min ago
       Docs: man:systemd-firstboot(1)

Jan 27 07:47:06 localhost systemd[1]: First Boot Wizard was skipped because of an unmet condition check (ConditionFirstBoot=yes).

○ systemd-fsck-root.service - File System Check on Root Device
     Loaded: loaded (/usr/lib/systemd/system/systemd-fsck-root.service; static)
     Active: inactive (dead) since Tue 2026-01-27 07:47:06 UTC; 1h 35min ago
   Duration: 2.052s
       Docs: man:systemd-fsck-root.service(8)
   Main PID: 554 (code=exited, status=0/SUCCESS)
        CPU: 18ms

Jan 27 07:47:04 localhost systemd[1]: Starting File System Check on /dev/disk/by-uuid/22ac9141-3960-4912-b20e-19fc8a328d40...
Jan 27 07:47:04 localhost systemd-fsck[556]: /usr/sbin/fsck.xfs: XFS file system.
Jan 27 07:47:04 localhost systemd[1]: Finished File System Check on /dev/disk/by-uuid/22ac9141-3960-4912-b20e-19fc8a328d40.

● systemd-hostnamed.service - Hostname Service
     Loaded: loaded (/usr/lib/systemd/system/systemd-hostnamed.service; static)
    Drop-In: /usr/lib/systemd/system/systemd-hostnamed.service.d
             └─disable-privatedevices.conf
     Active: active (running) since Tue 2026-01-27 09:22:37 UTC; 21s ago
       Docs: man:systemd-hostnamed.service(8)
             man:hostname(5)
             man:machine-info(5)
             man:org.freedesktop.resolve1(5)
   Main PID: 237147 (systemd-hostnam)
         IO: 8.0K read, 0B written
      Tasks: 1 (limit: 48560)
     Memory: 2.7M (peak: 3.8M)
        CPU: 106ms
     CGroup: /system.slice/systemd-hostnamed.service
             └─237147 /usr/lib/systemd/systemd-hostnamed

Jan 27 09:22:37 compute-1 systemd[1]: Starting Hostname Service...
Jan 27 09:22:37 compute-1 systemd[1]: Started Hostname Service.

● systemd-hwdb-update.service - Rebuild Hardware Database
     Loaded: loaded (/usr/lib/systemd/system/systemd-hwdb-update.service; static)
     Active: active (exited) since Tue 2026-01-27 07:47:07 UTC; 1h 35min ago
       Docs: man:hwdb(7)
             man:systemd-hwdb(8)
   Main PID: 689 (code=exited, status=0/SUCCESS)
        CPU: 589ms

Jan 27 07:47:06 localhost systemd[1]: Starting Rebuild Hardware Database...
Jan 27 07:47:07 localhost systemd[1]: Finished Rebuild Hardware Database.

○ systemd-initctl.service - initctl Compatibility Daemon
     Loaded: loaded (/usr/lib/systemd/system/systemd-initctl.service; static)
     Active: inactive (dead)
TriggeredBy: ● systemd-initctl.socket
       Docs: man:systemd-initctl.service(8)

● systemd-journal-catalog-update.service - Rebuild Journal Catalog
     Loaded: loaded (/usr/lib/systemd/system/systemd-journal-catalog-update.service; static)
     Active: active (exited) since Tue 2026-01-27 07:47:06 UTC; 1h 35min ago
       Docs: man:systemd-journald.service(8)
             man:journald.conf(5)
   Main PID: 702 (code=exited, status=0/SUCCESS)
        CPU: 17ms

Jan 27 07:47:06 localhost systemd[1]: Starting Rebuild Journal Catalog...
Jan 27 07:47:06 localhost systemd[1]: Finished Rebuild Journal Catalog.

● systemd-journal-flush.service - Flush Journal to Persistent Storage
     Loaded: loaded (/usr/lib/systemd/system/systemd-journal-flush.service; static)
     Active: active (exited) since Tue 2026-01-27 07:47:06 UTC; 1h 35min ago
       Docs: man:systemd-journald.service(8)
             man:journald.conf(5)
   Main PID: 690 (code=exited, status=0/SUCCESS)
        CPU: 14ms

Jan 27 07:47:06 localhost systemd[1]: Starting Flush Journal to Persistent Storage...
Jan 27 07:47:06 localhost systemd[1]: Finished Flush Journal to Persistent Storage.

● systemd-journald.service - Journal Service
     Loaded: loaded (/usr/lib/systemd/system/systemd-journald.service; static)
     Active: active (running) since Tue 2026-01-27 07:47:06 UTC; 1h 35min ago
TriggeredBy: ● systemd-journald-dev-log.socket
             ● systemd-journald.socket
       Docs: man:systemd-journald.service(8)
             man:journald.conf(5)
   Main PID: 680 (systemd-journal)
     Status: "Processing requests..."
         IO: 0B read, 0B written
      Tasks: 1 (limit: 48560)
     Memory: 48.0M (peak: 55.9M)
        CPU: 8.094s
     CGroup: /system.slice/systemd-journald.service
             └─680 /usr/lib/systemd/systemd-journald

Jan 27 07:47:06 localhost systemd-journald[680]: Journal started
Jan 27 07:47:06 localhost systemd-journald[680]: Runtime Journal (/run/log/journal/85ac68c10a6e7ae08ceb898dbdca0cb5) is 8.0M, max 153.6M, 145.6M free.
Jan 27 07:47:06 localhost systemd[1]: systemd-journald.service: Deactivated successfully.
Jan 27 07:47:06 localhost systemd-journald[680]: Runtime Journal (/run/log/journal/85ac68c10a6e7ae08ceb898dbdca0cb5) is 8.0M, max 153.6M, 145.6M free.
Jan 27 07:47:06 localhost systemd-journald[680]: Received client request to flush runtime journal.

● systemd-logind.service - User Login Management
     Loaded: loaded (/usr/lib/systemd/system/systemd-logind.service; static)
    Drop-In: /usr/lib/systemd/system/systemd-logind.service.d
             └─10-grub2-logind-service.conf
     Active: active (running) since Tue 2026-01-27 07:47:07 UTC; 1h 35min ago
       Docs: man:sd-login(3)
             man:systemd-logind.service(8)
             man:logind.conf(5)
             man:org.freedesktop.login1(5)
   Main PID: 806 (systemd-logind)
     Status: "Processing requests..."
         IO: 4.0K read, 0B written
      Tasks: 1 (limit: 48560)
     Memory: 6.3M (peak: 7.4M)
        CPU: 2.513s
     CGroup: /system.slice/systemd-logind.service
             └─806 /usr/lib/systemd/systemd-logind

Jan 27 08:45:56 compute-1 systemd-logind[806]: Removed session 49.
Jan 27 08:46:01 compute-1 systemd-logind[806]: New session 50 of user zuul.
Jan 27 08:47:02 compute-1 systemd-logind[806]: Watching system buttons on /dev/input/event0 (Power Button)
Jan 27 08:47:02 compute-1 systemd-logind[806]: Watching system buttons on /dev/input/event1 (AT Translated Set 2 keyboard)
Jan 27 08:48:08 compute-1 systemd-logind[806]: New session 51 of user zuul.
Jan 27 08:48:08 compute-1 systemd-logind[806]: Session 51 logged out. Waiting for processes to exit.
Jan 27 08:48:08 compute-1 systemd-logind[806]: Removed session 51.
Jan 27 08:48:56 compute-1 systemd-logind[806]: Session 50 logged out. Waiting for processes to exit.
Jan 27 08:48:56 compute-1 systemd-logind[806]: Removed session 50.
Jan 27 09:22:15 compute-1 systemd-logind[806]: New session 52 of user zuul.

○ systemd-machine-id-commit.service - Commit a transient machine-id on disk
     Loaded: loaded (/usr/lib/systemd/system/systemd-machine-id-commit.service; static)
     Active: inactive (dead)
  Condition: start condition failed at Tue 2026-01-27 07:47:06 UTC; 1h 35min ago
     Unit systemd-networkd-wait-online.service could not be found.
  Docs: man:systemd-machine-id-commit.service(8)

Jan 27 07:47:06 localhost systemd[1]: Commit a transient machine-id on disk was skipped because of an unmet condition check (ConditionPathIsMountPoint=/etc/machine-id).

● systemd-machined.service - Virtual Machine and Container Registration Service
     Loaded: loaded (/usr/lib/systemd/system/systemd-machined.service; static)
     Active: active (running) since Tue 2026-01-27 08:45:16 UTC; 37min ago
       Docs: man:systemd-machined.service(8)
             man:org.freedesktop.machine1(5)
   Main PID: 190815 (systemd-machine)
     Status: "Processing requests..."
         IO: 0B read, 0B written
      Tasks: 1 (limit: 48560)
     Memory: 1.4M (peak: 1.9M)
        CPU: 670ms
     CGroup: /system.slice/systemd-machined.service
             └─190815 /usr/lib/systemd/systemd-machined

Jan 27 08:45:16 compute-1 systemd[1]: Starting Virtual Machine and Container Registration Service...
Jan 27 08:45:16 compute-1 systemd[1]: Started Virtual Machine and Container Registration Service.

● systemd-modules-load.service - Load Kernel Modules
     Loaded: loaded (/usr/lib/systemd/system/systemd-modules-load.service; static)
     Active: active (exited) since Tue 2026-01-27 08:46:57 UTC; 36min ago
       Docs: man:systemd-modules-load.service(8)
             man:modules-load.d(5)
   Main PID: 205289 (code=exited, status=0/SUCCESS)
        CPU: 10ms

Jan 27 08:46:57 compute-1 systemd[1]: Starting Load Kernel Modules...
Jan 27 08:46:57 compute-1 systemd[1]: Finished Load Kernel Modules.

● systemd-network-generator.service - Generate network units from Kernel command line
     Loaded: loaded (/usr/lib/systemd/system/systemd-network-generator.service; enabled; preset: enabled)
     Active: active (exited) since Tue 2026-01-27 07:47:06 UTC; 1h 35min ago
       Docs: man:systemd-network-generator.service(8)
   Main PID: 681 (code=exited, status=0/SUCCESS)
        CPU: 10ms

Jan 27 07:47:06 localhost systemd[1]: Finished Generate network units from Kernel command line.

○ systemd-pcrmachine.service - TPM2 PCR Machine ID Measurement
     Loaded: loaded (/usr/lib/systemd/system/systemd-pcrmachine.service; static)
     Active: inactive (dead)
  Condition: start condition failed at Tue 2026-01-27 07:47:06 UTC; 1h 35min ago
       Docs: man:systemd-pcrmachine.service(8)

○ systemd-pcrphase-initrd.service - TPM2 PCR Barrier (initrd)
     Loaded: loaded (/usr/lib/systemd/system/systemd-pcrphase-initrd.service; static)
     Active: inactive (dead)
       Docs: man:systemd-pcrphase-initrd.service(8)

○ systemd-pcrphase-sysinit.service - TPM2 PCR Barrier (Initialization)
     Loaded: loaded (/usr/lib/systemd/system/systemd-pcrphase-sysinit.service; static)
     Active: inactive (dead)
  Condition: start condition failed at Tue 2026-01-27 07:47:07 UTC; 1h 35min ago
       Docs: man:systemd-pcrphase-sysinit.service(8)

Jan 27 07:47:07 localhost systemd[1]: TPM2 PCR Barrier (Initialization) was skipped because of an unmet condition check (ConditionPathExists=/sys/firmware/efi/efivars/StubPcrKernelImage-4a67b082-0a4c-41cf-b6c7-440b29bb8c4f).

○ systemd-pcrphase.service - TPM2 PCR Barrier (User)
     Loaded: loaded (/usr/lib/systemd/system/systemd-pcrphase.service; static)
     Active: inactive (dead)
  Condition: start condition failed at Tue 2026-01-27 07:47:09 UTC; 1h 35min ago
       Docs: man:systemd-pcrphase.service(8)

Jan 27 07:47:09 np0005597077.novalocal systemd[1]: TPM2 PCR Barrier (User) was skipped because of an unmet condition check (ConditionPathExists=/sys/firmware/efi/efivars/StubPcrKernelImage-4a67b082-0a4c-41cf-b6c7-440b29bb8c4f).

○ systemd-pstore.service - Platform Persistent Storage Archival
     Loaded: loaded (/usr/lib/systemd/system/systemd-pstore.service; enabled; preset: enabled)
     Active: inactive (dead)
  Condition: start condition failed at Tue 2026-01-27 07:47:06 UTC; 1h 35min ago
       Docs: man:systemd-pstore(8)

Jan 27 07:47:06 localhost systemd[1]: Platform Persistent Storage Archival was skipped because of an unmet condition check (ConditionDirectoUnit systemd-timesyncd.service could not be found.
ryNotEmpty=/sys/fs/pstore).

● systemd-random-seed.service - Load/Save OS Random Seed
     Loaded: loaded (/usr/lib/systemd/system/systemd-random-seed.service; static)
     Active: active (exited) since Tue 2026-01-27 07:47:06 UTC; 1h 35min ago
       Docs: man:systemd-random-seed.service(8)
             man:random(4)
   Main PID: 691 (code=exited, status=0/SUCCESS)
        CPU: 13ms

Jan 27 07:47:06 localhost systemd[1]: Starting Load/Save OS Random Seed...
Jan 27 07:47:06 localhost systemd[1]: Finished Load/Save OS Random Seed.

● systemd-remount-fs.service - Remount Root and Kernel File Systems
     Loaded: loaded (/usr/lib/systemd/system/systemd-remount-fs.service; enabled-runtime; preset: disabled)
     Active: active (exited) since Tue 2026-01-27 07:47:06 UTC; 1h 35min ago
       Docs: man:systemd-remount-fs.service(8)
             https://www.freedesktop.org/wiki/Software/systemd/APIFileSystems
   Main PID: 682 (code=exited, status=0/SUCCESS)
        CPU: 22ms

Jan 27 07:47:06 localhost systemd[1]: Finished Remount Root and Kernel File Systems.

○ systemd-repart.service - Repartition Root Disk
     Loaded: loaded (/usr/lib/systemd/system/systemd-repart.service; static)
     Active: inactive (dead)
  Condition: start condition failed at Tue 2026-01-27 07:47:06 UTC; 1h 35min ago
       Docs: man:systemd-repart.service(8)

○ systemd-rfkill.service - Load/Save RF Kill Switch Status
     Loaded: loaded (/usr/lib/systemd/system/systemd-rfkill.service; static)
     Active: inactive (dead)
TriggeredBy: ● systemd-rfkill.socket
       Docs: man:systemd-rfkill.service(8)

● systemd-sysctl.service - Apply Kernel Variables
     Loaded: loaded (/usr/lib/systemd/system/systemd-sysctl.service; static)
     Active: active (exited) since Tue 2026-01-27 08:21:49 UTC; 1h 1min ago
       Docs: man:systemd-sysctl.service(8)
             man:sysctl.d(5)
   Main PID: 44918 (code=exited, status=0/SUCCESS)
        CPU: 26ms

Jan 27 08:21:49 compute-1 systemd[1]: Starting Apply Kernel Variables...
Jan 27 08:21:49 compute-1 systemd[1]: Finished Apply Kernel Variables.

○ systemd-sysext.service - Merge System Extension Images into /usr/ and /opt/
     Loaded: loaded (/usr/lib/systemd/system/systemd-sysext.service; disabled; preset: disabled)
     Active: inactive (dead)
       Docs: man:systemd-sysext.service(8)

● systemd-sysusers.service - Create System Users
     Loaded: loaded (/usr/lib/systemd/system/systemd-sysusers.service; static)
     Active: active (exited) since Tue 2026-01-27 07:47:06 UTC; 1h 35min ago
       Docs: man:sysusers.d(5)
             man:systemd-sysusers.service(8)
   Main PID: 692 (code=exited, status=0/SUCCESS)
        CPU: 19ms

Jan 27 07:47:06 localhost systemd[1]: Starting Create System Users...
Jan 27 07:47:06 localhost systemd[1]: Finished Create System Users.

○ systemd-tmpfiles-clean.service - Cleanup of Temporary Directories
     Loaded: loaded (/usr/lib/systemd/system/systemd-tmpfiles-clean.service; static)
     Active: inactive (dead) since Tue 2026-01-27 08:02:13 UTC; 1h 20min ago
TriggeredBy: ● systemd-tmpfiles-clean.timer
       Docs: man:tmpfiles.d(5)
             man:systemd-tmpfiles(8)
   Main PID: 29964 (code=exited, status=0/SUCCESS)
        CPU: 41ms

Jan 27 08:02:13 compute-1 systemd[1]: Starting Cleanup of Temporary Directories...
Jan 27 08:02:13 compute-1 systemd[1]: systemd-tmpfiles-clean.service: Deactivated successfully.
Jan 27 08:02:13 compute-1 systemd[1]: Finished Cleanup of Temporary Directories.

● systemd-tmpfiles-setup-dev.service - Create Static Device Nodes in /dev
     Loaded: loaded (/usr/lib/systemd/system/systemd-tmpfiles-setup-dev.service; static)
     Active: active (exited) since Tue 2026-01-27 07:47:06 UTC; 1h 35min ago
       Docs: man:tmpfiles.d(5)
             man:systemd-tmpfiles(8)
   Main PID: 694 (code=exited, status=0/SUCCESS)
        CPU: 43ms

Jan 27 07:47:06 localhost systemd[1]: Starting Create Static Device Nodes in /dev...
Jan 27 07:47:06 localhost systemd[1]: Finished Create Static Device Nodes in /dev.

● systemd-tmpfiles-setup.service Unit systemd-tmpfiles.service could not be found.
- Create Volatile Files and Directories
     Loaded: loaded (/usr/lib/systemd/system/systemd-tmpfiles-setup.service; static)
     Active: active (exited) since Tue 2026-01-27 07:47:06 UTC; 1h 35min ago
       Docs: man:tmpfiles.d(5)
             man:systemd-tmpfiles(8)
   Main PID: 698 (code=exited, status=0/SUCCESS)
        CPU: 101ms

Jan 27 07:47:06 localhost systemd[1]: Starting Create Volatile Files and Directories...
Jan 27 07:47:06 localhost systemd[1]: Finished Create Volatile Files and Directories.

● systemd-udev-settle.service - Wait for udev To Complete Device Initialization
     Loaded: loaded (/usr/lib/systemd/system/systemd-udev-settle.service; static)
     Active: active (exited) since Tue 2026-01-27 08:46:52 UTC; 36min ago
       Docs: man:systemd-udev-settle.service(8)
   Main PID: 204383 (code=exited, status=0/SUCCESS)
        CPU: 14ms

Jan 27 08:46:52 compute-1 systemd[1]: Starting Wait for udev To Complete Device Initialization...
Jan 27 08:46:52 compute-1 udevadm[204383]: systemd-udev-settle.service is deprecated. Please fix multipathd.service not to pull it in.
Jan 27 08:46:52 compute-1 systemd[1]: Finished Wait for udev To Complete Device Initialization.

● systemd-udev-trigger.service - Coldplug All udev Devices
     Loaded: loaded (/usr/lib/systemd/system/systemd-udev-trigger.service; static)
    Drop-In: /usr/lib/systemd/system/systemd-udev-trigger.service.d
             └─systemd-udev-trigger-no-reload.conf
     Active: active (exited) since Tue 2026-01-27 07:47:06 UTC; 1h 35min ago
       Docs: man:udev(7)
             man:systemd-udevd.service(8)
   Main PID: 686 (code=exited, status=0/SUCCESS)
        CPU: 98ms

Jan 27 07:47:06 localhost systemd[1]: Finished Coldplug All udev Devices.

● systemd-udevd.service - Rule-based Manager for Device Events and Files
     Loaded: loaded (/usr/lib/systemd/system/systemd-udevd.service; static)
     Active: active (running) since Tue 2026-01-27 07:47:07 UTC; 1h 35min ago
TriggeredBy: ● systemd-udevd-control.socket
             ● systemd-udevd-kernel.socket
       Docs: man:systemd-udevd.service(8)
             man:udev(7)
   Main PID: 731 (systemd-udevd)
     Status: "Processing with 32 children at max"
         IO: 43.7M read, 29.3M written
      Tasks: 1
     Memory: 25.9M (peak: 93.2M)
        CPU: 4.566s
     CGroup: /system.slice/systemd-udevd.service
             └─udev
               └─731 /usr/lib/systemd/systemd-udevd

Jan 27 08:26:34 compute-1 lvm[72233]: VG ceph_vg0 finished
Jan 27 08:26:37 compute-1 lvm[72475]: PV /dev/loop3 online, VG ceph_vg0 is complete.
Jan 27 08:26:37 compute-1 lvm[72475]: VG ceph_vg0 finished
Jan 27 08:29:17 compute-1 lvm[77238]: PV /dev/loop3 online, VG ceph_vg0 is complete.
Jan 27 08:29:17 compute-1 lvm[77238]: VG ceph_vg0 finished
Jan 27 08:39:38 compute-1 systemd-udevd[130434]: Network interface NamePolicy= disabled on kernel command line.
Jan 27 08:47:02 compute-1 lvm[205576]: PV /dev/loop3 online, VG ceph_vg0 is complete.
Jan 27 08:47:02 compute-1 lvm[205576]: VG ceph_vg0 finished
Jan 27 09:22:25 compute-1 lvm[235362]: PV /dev/loop3 online, VG ceph_vg0 is complete.
Jan 27 09:22:25 compute-1 lvm[235362]: VG ceph_vg0 finished

● systemd-update-done.service - Update is Completed
     Loaded: loaded (/usr/lib/systemd/system/systemd-update-done.service; static)
     Active: active (exited) since Tue 2026-01-27 07:47:07 UTC; 1h 35min ago
       Docs: man:systemd-update-done.service(8)
   Main PID: 732 (code=exited, status=0/SUCCESS)
        CPU: 14ms

Jan 27 07:47:07 localhost systemd[1]: Starting Update is Completed...
Jan 27 07:47:07 localhost systemd[1]: Finished Update is Completed.

○ systemd-update-utmp-runlevel.service - Record Runlevel Change in UTMP
     Loaded: loaded (/usr/lib/systemd/system/systemd-update-utmp-runlevel.service; static)
     Active: inactive (dead) since Tue 2026-01-27 07:47:12 UTC; 1h 35min ago
       Docs: man:systemd-update-utmp-runlevel.service(8)
             man:utmp(5)
   Main PID: 1018 (code=exited, status=0/SUCCESS)
        CPU: 8ms

Jan 27 07:47:12 np0005Unit tlp.service could not be found.
597077.novalocal systemd[1]: Starting Record Runlevel Change in UTMP...
Jan 27 07:47:12 np0005597077.novalocal systemd[1]: systemd-update-utmp-runlevel.service: Deactivated successfully.
Jan 27 07:47:12 np0005597077.novalocal systemd[1]: Finished Record Runlevel Change in UTMP.

● systemd-update-utmp.service - Record System Boot/Shutdown in UTMP
     Loaded: loaded (/usr/lib/systemd/system/systemd-update-utmp.service; static)
     Active: active (exited) since Tue 2026-01-27 07:47:06 UTC; 1h 35min ago
       Docs: man:systemd-update-utmp.service(8)
             man:utmp(5)
   Main PID: 730 (code=exited, status=0/SUCCESS)
        CPU: 8ms

Jan 27 07:47:06 localhost systemd[1]: Starting Record System Boot/Shutdown in UTMP...
Jan 27 07:47:06 localhost systemd[1]: Finished Record System Boot/Shutdown in UTMP.

● systemd-user-sessions.service - Permit User Sessions
     Loaded: loaded (/usr/lib/systemd/system/systemd-user-sessions.service; static)
     Active: active (exited) since Tue 2026-01-27 07:47:12 UTC; 1h 35min ago
       Docs: man:systemd-user-sessions.service(8)
   Main PID: 1009 (code=exited, status=0/SUCCESS)
        CPU: 13ms

Jan 27 07:47:12 np0005597077.novalocal systemd[1]: Starting Permit User Sessions...
Jan 27 07:47:12 np0005597077.novalocal systemd[1]: Finished Permit User Sessions.

○ systemd-vconsole-setup.service - Setup Virtual Console
     Loaded: loaded (/usr/lib/systemd/system/systemd-vconsole-setup.service; static)
     Active: inactive (dead) since Tue 2026-01-27 07:47:05 UTC; 1h 35min ago
   Duration: 1.781s
       Docs: man:systemd-vconsole-setup.service(8)
             man:vconsole.conf(5)
   Main PID: 314 (code=exited, status=0/SUCCESS)
        CPU: 182ms

Jan 27 07:47:03 localhost systemd[1]: Finished Setup Virtual Console.
Jan 27 07:47:05 localhost systemd[1]: systemd-vconsole-setup.service: Deactivated successfully.
Jan 27 07:47:05 localhost systemd[1]: Stopped Setup Virtual Console.
Notice: journal has been rotated since unit was started, output may be incomplete.

● tuned.service - Dynamic System Tuning Daemon
     Loaded: loaded (/usr/lib/systemd/system/tuned.service; enabled; preset: enabled)
     Active: active (running) since Tue 2026-01-27 08:34:11 UTC; 48min ago
       Docs: man:tuned(8)
             man:tuned.conf(5)
             man:tuned-adm(8)
   Main PID: 92931 (tuned)
         IO: 0B read, 0B written
      Tasks: 4 (limit: 48560)
     Memory: 13.7M (peak: 14.1M)
        CPU: 903ms
     CGroup: /system.slice/tuned.service
             └─92931 /usr/bin/python3 -Es /usr/sbin/tuned -l -P

Jan 27 08:34:11 compute-1 systemd[1]: Starting Dynamic System Tuning Daemon...
Jan 27 08:34:11 compute-1 systemd[1]: Started Dynamic System Tuning Daemon.

○ unbound-anchor.service - update of the root trust anchor for DNSSEC validation in unbound
     Loaded: loaded (/usr/lib/systemd/system/unbound-anchor.service; static)
     Active: inactive (dead)
TriggeredBy: ● unbound-anchor.timer
       Docs: man:unbound-anchor(8)

● user-runtime-dir@1000.service - User Runtime Directory /run/user/1000
     Loaded: loaded (/usr/lib/systemd/system/user-runtime-dir@.service; static)
     Active: active (exited) since Tue 2026-01-27 07:47:29 UTC; 1h 35min ago
       Docs: man:user@.service(5)
   Main PID: 4306 (code=exited, status=0/SUCCESS)
        CPU: 26ms

Jan 27 07:47:29 np0005597077.novalocal systemd[1]: Starting User Runtime Directory /run/user/1000...
Jan 27 07:47:29 np0005597077.novalocal systemd[1]: Finished User Runtime Directory /run/user/1000.

● user-runtime-dir@42477.service - User Runtime Directory /run/user/42477
     Loaded: loaded (/usr/lib/systemd/system/user-runtime-dir@.service; static)
     Active: active (exited) since Tue 2026-01-27 08:28:46 UTC; 54min ago
       Docs: man:user@.service(5)
   Main PID: 72550 (code=exited, status=0/SUCCESS)
        CPU: 14ms

Jan 27 08:28:46 compute-1 systemd[1]: Starting User Runtime Directory /run/user/42477...
Jan 27 08:28:46 compute-1 systemd[1]: Finished User Runtime Directory /run/user/42477.

● user@1000.service - User Manager for UID 1000
     Loaded: loaded (/usr/lib/systemd/system/user@.service; static)
    Drop-In: /usr/lib/systemd/system/user@.service.d
             └─10-login-barrier.conf
     Active: active (running) since Tue 2026-01-27 07:47:30 UTC; 1h 35min ago
       Docs: man:user@.service(5)
   Main PID: 4307 (systemd)
     Status: "Ready."
         IO: 676.0K read, 4.0K written
      Tasks: 5
     Memory: 9.1M (peak: 14.9M)
        CPU: 783ms
     CGroup: /user.slice/user-1000.slice/user@1000.service
             ├─app.slice
             │ └─dbus-broker.service
             │   ├─29571 /usr/bin/dbus-broker-launch --scope user
             │   └─29572 dbus-broker --log 4 --controller 9 --machine-id 85ac68c10a6e7ae08ceb898dbdca0cb5 --max-bytes 100000000000000 --max-fds 25000000000000 --max-matches 5000000000
             ├─init.scope
             │ ├─4307 /usr/lib/systemd/systemd --user
             │ └─4309 "(sd-pam)"
             └─user.slice
               └─podman-pause-444cdfba.scope
                 └─29556 catatonit -P

Jan 27 07:59:09 np0005597077.novalocal dbus-broker-launch[29571]: Policy to allow eavesdropping in /usr/share/dbus-1/session.conf +31: Eavesdropping is deprecated and ignored
Jan 27 07:59:09 np0005597077.novalocal dbus-broker-launch[29571]: Policy to allow eavesdropping in /usr/share/dbus-1/session.conf +33: Eavesdropping is deprecated and ignored
Jan 27 07:59:09 np0005597077.novalocal systemd[4307]: Started D-Bus User Message Bus.
Jan 27 07:59:09 np0005597077.novalocal dbus-broker-lau[29571]: Ready
Jan 27 07:59:09 np0005597077.novalocal systemd[4307]: selinux: avc:  op=load_policy lsm=selinux seqno=4 res=1
Jan 27 07:59:09 np0005597077.novalocal systemd[4307]: Created slice Slice /user.
Jan 27 07:59:09 np0005597077.novalocal systemd[4307]: podman-29551.scope: unit configures an IP firewall, but not running as root.
Jan 27 07:59:09 np0005597077.novalocal systemd[4307]: (This warning is only shown for the first unit using IP firewalling.)
Jan 27 07:59:09 np0005597077.novalocal systemd[4307]: Started podman-29551.scope.
Jan 27 07:59:10 np0005597077.novalocal systemd[4307]: Started podman-pause-444cdfba.scope.

● user@42477.service - User Manager for UID 42477
     Loaded: loaded (/usr/lib/systemd/system/user@.service; static)
    Drop-In: /usr/lib/systemd/system/user@.service.d
             └─10-login-barrier.conf
     Active: active (running) since Tue 2026-01-27 08:28:46 UTC; 54min ago
       Docs: man:user@.service(5)
   Main PID: 72551 (systemd)
     Status: "Ready."
         IO: 0B read, 0B written
      Tasks: 2
     Memory: 8.6M (peak: 12.2M)
        CPU: 438ms
     CGroup: /user.slice/user-42477.slice/user@42477.service
             └─init.scope
               ├─72551 /usr/lib/systemd/systemd --user
               └─72553 "(sd-pam)"

Jan 27 08:28:46 compute-1 systemd[72551]: Finished Create User's Volatile Files and Directories.
Jan 27 08:28:46 compute-1 systemd[72551]: Reached target Basic System.
Jan 27 08:28:46 compute-1 systemd[72551]: Reached target Main User Target.
Jan 27 08:28:46 compute-1 systemd[72551]: Startup finished in 108ms.
Jan 27 08:28:46 compute-1 systemd[1]: Started User Manager for UID 42477.
Jan 27 08:30:49 compute-1 systemd[72551]: Starting Mark boot as successful...
Jan 27 08:30:49 compute-1 systemd[72551]: Finished Mark boot as successful.
Jan 27 08:34:13 compute-1 systemd[72551]: Created slice User Background Tasks Slice.
Jan 27 08:34:13 compute-1 systemd[72551]: Starting Cleanup of User's Temporary Files and Directories...
Jan 27 08:34:13 compute-1 systemd[72551]: Finished Cleanup of User's Temporary Files and Directories.

○ virtinterfaced.service - libvirt interface daemon
     Loaded: loaded (/usr/lib/systemd/system/virtinterfaced.service; disabled; preset: disabled)
     Active: inactive (dead)
TriggeredBy: ○ virtinterfaced.socket
             ○ virtinterfaced-admin.socket
             ○ virtinterfaced-ro.socket
       Docs: man:virtinterfaced(8)
             https://libvirt.org/

○ virtlockd.service - libvirt locking daemon
     Loaded: loaded (/usr/lib/systemd/system/virtlockd.service; disabled; preset: disabled)
     Active: inactive (dead)
TriggeredBy: ○ virtlockd-admin.socket
             ● virtlockd.socket
       Docs: man:virtlockd(8)
             https://libvirt.org/

● virtlogd.service - libvirt logging daemon
     Loaded: loaded (/usr/lib/systemd/system/virtlogd.service; enabled; preset: disabled)
     Active: active (running) since Tue 2026-01-27 08:45:13 UTC; 37min ago
TriggeredBy: ● virtlogd-admin.socket
             ● virtlogd.socket
       Docs: man:virtlogd(8)
             https://libvirt.org/
   Main PID: 190186 (virtlogd)
         IO: 644.0K read, 0B written
      Tasks: 1 (limit: 48560)
     Memory: 3.1M (peak: 3.3M)
        CPU: 81ms
     CGroup: /system.slice/virtlogd.service
             └─190186 /usr/sbin/virtlogd

Jan 27 08:45:13 compute-1 systemd[1]: Starting libvirt logging daemon...
Jan 27 08:45:13 compute-1 systemd[1]: Started libvirt logging daemon.

○ virtnetworkd.service - libvirt network daemon
     Loaded: loaded (/usr/lib/systemd/system/virtnetworkd.service; disabled; preset: disabled)
     Active: inactive (dead)
TriggeredBy: ○ virtnetworkd.socket
             ○ virtnetworkd-ro.socket
             ○ virtnetworkd-admin.socket
       Docs: man:virtnetworkd(8)
             https://libvirt.org/

● virtnodedevd.service - libvirt nodedev daemon
     Loaded: loaded (/usr/lib/systemd/system/virtnodedevd.service; enabled; preset: disabled)
     Active: active (running) since Tue 2026-01-27 08:49:06 UTC; 33min ago
TriggeredBy: ● virtnodedevd-admin.socket
             ● virtnodedevd.socket
             ● virtnodedevd-ro.socket
       Docs: man:virtnodedevd(8)
             https://libvirt.org/
   Main PID: 222034 (virtnodedevd)
         IO: 4.3M read, 0B written
      Tasks: 20 (limit: 48560)
     Memory: 9.8M (peak: 11.2M)
        CPU: 1.993s
     CGroup: /system.slice/virtnodedevd.service
             └─222034 /usr/sbin/virtnodedevd --timeout 120

Jan 27 08:49:06 compute-1 systemd[1]: Starting libvirt nodedev daemon...
Jan 27 08:49:06 compute-1 systemd[1]: Started libvirt nodedev daemon.

○ virtnwfilterd.service - libvirt nwfilter daemon
     Loaded: loaded (/usr/lib/systemd/system/virtnwfilterd.service; disabled; preset: disabled)
     Active: inactive (dead)
TriggeredBy: ○ virtnwfilterd.socket
             ○ virtnwfilterd-admin.socket
             ○ virtnwfilterd-ro.socket
       Docs: man:virtnwfilterd(8)
             https://libvirt.org/

○ virtproxyd.service - libvirt proxy daemon
     Loaded: loaded (/usr/lib/systemd/system/virtproxyd.service; enabled; preset: disabled)
     Active: inactive (dead) since Tue 2026-01-27 08:47:15 UTC; 35min ago
   Duration: 2min 14ms
TriggeredBy: ● virtproxyd.socket
             ● virtproxyd-ro.socket
             ● virtproxyd-admin.socket
             ● virtproxyd-tls.socket
       Docs: man:virtproxyd(8)
             https://libvirt.org/
   Main PID: 190596 (code=exited, status=0/SUCCESS)
        CPU: 55ms

Jan 27 08:45:15 compute-1 systemd[1]: Starting libvirt proxy daemon...
Jan 27 08:45:15 compute-1 systemd[1]: Started libvirt proxy daemon.
Jan 27 08:47:15 compute-1 systemd[1]: virtproxyd.service: Deactivated successfully.

● virtqemud.service - libvirt QEMU daemon
     Loaded: loaded (/usr/lib/systemd/system/virtqemud.service; enabled; preset: enabled)
     Active: active (running) since Tue 2026-01-27 08:49:04 UTC; 33min ago
TriggeredBy: ● virtqemud-admin.socket
             ● virtqemud-ro.socket
             ● virtqemud.socket
       Docs: man:virtqemud(8)
             https://libvirt.org/
   Main PID: 221900 (virtqemud)
         IO: 48.2M read, 188.0K written
      Tasks: 18 (limit: 32768)
     Memory: 63.9M (peak: 93.4M)
        CPU: 2.534s
     CGroup: /system.slice/virtqemud.service
             └─221900 /usr/sbin/virtqemud --timeout 120

Jan 27 08:49:04 compute-1 systemd[1]: Starting libvirt QEMU daemon...
Jan 27 08:49:04 compute-1 systemd[1]: Started libvirt QEMU daemUnit ypbind.service could not be found.
Unit yppasswdd.service could not be found.
Unit ypserv.service could not be found.
Unit ypxfrd.service could not be found.
on.
Jan 27 09:22:25 compute-1 virtqemud[221900]: libvirt version: 11.10.0, package: 2.el9 (builder@centos.org, 2025-12-18-15:09:54, )
Jan 27 09:22:25 compute-1 virtqemud[221900]: hostname: compute-1
Jan 27 09:22:25 compute-1 virtqemud[221900]: Failed to connect socket to '/var/run/libvirt/virtnetworkd-sock-ro': No such file or directory
Jan 27 09:22:25 compute-1 virtqemud[221900]: Failed to connect socket to '/var/run/libvirt/virtnwfilterd-sock-ro': No such file or directory
Jan 27 09:22:25 compute-1 virtqemud[221900]: Failed to connect socket to '/var/run/libvirt/virtstoraged-sock-ro': No such file or directory

○ virtsecretd.service - libvirt secret daemon
     Loaded: loaded (/usr/lib/systemd/system/virtsecretd.service; enabled; preset: disabled)
     Active: inactive (dead) since Tue 2026-01-27 08:47:26 UTC; 35min ago
   Duration: 2min 7.916s
TriggeredBy: ● virtsecretd-admin.socket
             ● virtsecretd.socket
             ● virtsecretd-ro.socket
       Docs: man:virtsecretd(8)
             https://libvirt.org/
   Main PID: 191032 (code=exited, status=0/SUCCESS)
        CPU: 70ms

Jan 27 08:45:18 compute-1 systemd[1]: Starting libvirt secret daemon...
Jan 27 08:45:18 compute-1 systemd[1]: Started libvirt secret daemon.
Jan 27 08:47:26 compute-1 systemd[1]: virtsecretd.service: Deactivated successfully.

○ virtstoraged.service - libvirt storage daemon
     Loaded: loaded (/usr/lib/systemd/system/virtstoraged.service; disabled; preset: disabled)
     Active: inactive (dead)
TriggeredBy: ○ virtstoraged-ro.socket
             ○ virtstoraged.socket
             ○ virtstoraged-admin.socket
       Docs: man:virtstoraged(8)
             https://libvirt.org/

● -.slice - Root Slice
     Loaded: loaded
     Active: active since Tue 2026-01-27 07:47:02 UTC; 1h 35min ago
      Until: Tue 2026-01-27 07:47:02 UTC; 1h 35min ago
       Docs: man:systemd.special(7)
      Tasks: 1120
     Memory: 2.7G
        CPU: 28min 54.890s
     CGroup: /
             ├─240109 turbostat --debug sleep 10
             ├─240112 sleep 10
             ├─init.scope
             │ └─1 /usr/lib/systemd/systemd --switched-root --system --deserialize 31
             ├─machine.slice
             │ ├─libpod-a3bb16dbb05ddc47f45ec9373ca7302ec69431e14964c6960ac6f45103f0676a.scope
             │ │ └─container
             │ │   ├─221468 dumb-init --single-child -- kolla_start
             │ │   └─221470 /usr/bin/python3 /usr/bin/nova-compute
             │ ├─libpod-d6ce9deaaecdb1627bbc6b1f3be04c03bb6975dd82af97ea8a1c73c9cca6abd1.scope
             │ │ └─container
             │ │   ├─139975 dumb-init --single-child -- kolla_start
             │ │   ├─139978 "neutron-ovn-metadata-agent (/usr/bin/python3 /usr/bin/neutron-ovn-metadata-agent)"
             │ │   ├─140319 "neutron-ovn-metadata-agent (/usr/bin/python3 /usr/bin/neutron-ovn-metadata-agent)"
             │ │   └─140418 /usr/bin/python3 /bin/privsep-helper --config-file /etc/neutron/neutron.conf --config-dir /etc/neutron.conf.d --privsep_context neutron.privileged.namespace_cmd --privsep_sock_path /tmp/tmpm4k7fsyh/privsep.sock
             │ └─libpod-f524c08bb2ae37ade38d0ec5ff5b0696715a77e37f29e059b5b993958b3ea20d.scope
             │   └─container
             │     ├─130303 dumb-init --single-child -- kolla_start
             │     └─130306 /usr/bin/ovn-controller --pidfile unix:/run/openvswitch/db.sock -p /etc/pki/tls/private/ovndb.key -c /etc/pki/tls/certs/ovndb.crt -C /etc/pki/tls/certs/ovndbca.crt
             ├─system.slice
             │ ├─NetworkManager.service
             │ │ └─48943 /usr/sbin/NetworkManager --no-daemon
             │ ├─auditd.service
             │ │ ├─703 /sbin/auditd
             │ │ └─705 /usr/sbin/sedispatch
             │ ├─chronyd.service
             │ │ └─58510 /usr/sbin/chronyd -F 2
             │ ├─crond.service
             │ │ └─1011 /usr/sbin/crond -n
             │ ├─dbus-broker.service
             │ │ ├─744 /usr/bin/dbus-broker-launch --scope system --audit
             │ │ └─773 dbus-broker --log 4 --controller 9 --machine-id 85ac68c10a6e7ae08ceb898dbdca0cb5 --max-bytes 536870912 --max-fds 4096 --max-matches 131072 --audit
             │ ├─edpm_nova_compute.service
             │ │ └─221466 /usr/bin/conmon --api-version 1 -c a3bb16dbb05ddc47f45ec9373ca7302ec69431e14964c6960ac6f45103f0676a -u a3bb16dbb05ddc47f45ec9373ca7302ec69431e14964c6960ac6f45103f0676a -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/a3bb16dbb05ddc47f45ec9373ca7302ec69431e14964c6960ac6f45103f0676a/userdata -p /run/containers/storage/overlay-containers/a3bb16dbb05ddc47f45ec9373ca7302ec69431e14964c6960ac6f45103f0676a/userdata/pidfile -n nova_compute --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/a3bb16dbb05ddc47f45ec9373ca7302ec69431e14964c6960ac6f45103f0676a --full-attach -s -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/a3bb16dbb05ddc47f45ec9373ca7302ec69431e14964c6960ac6f45103f0676a/userdata/oci-log --conmon-pidfile /run/nova_compute.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg a3bb16dbb05ddc47f45ec9373ca7302ec69431e14964c6960ac6f45103f0676a
             │ ├─edpm_ovn_controller.service
             │ │ └─130301 /usr/bin/conmon --api-version 1 -c f524c08bb2ae37ade38d0ec5ff5b0696715a77e37f29e059b5b993958b3ea20d -u f524c08bb2ae37ade38d0ec5ff5b0696715a77e37f29e059b5b993958b3ea20d -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/f524c08bb2ae37ade38d0ec5ff5b0696715a77e37f29e059b5b993958b3ea20d/userdata -p /run/containers/storage/overlay-containers/f524c08bb2ae37ade38d0ec5ff5b0696715a77e37f29e059b5b993958b3ea20d/userdata/pidfile -n ovn_controller --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/f524c08bb2ae37ade38d0ec5ff5b0696715a77e37f29e059b5b993958b3ea20d --full-attach -s -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/f524c08bb2ae37ade38d0ec5ff5b0696715a77e37f29e059b5b993958b3ea20d/userdata/oci-log --conmon-pidfile /run/ovn_controller.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg f524c08bb2ae37ade38d0ec5ff5b0696715a77e37f29e059b5b993958b3ea20d
             │ ├─edpm_ovn_metadata_agent.service
             │ │ └─139973 /usr/bin/conmon --api-version 1 -c d6ce9deaaecdb1627bbc6b1f3be04c03bb6975dd82af97ea8a1c73c9cca6abd1 -u d6ce9deaaecdb1627bbc6b1f3be04c03bb6975dd82af97ea8a1c73c9cca6abd1 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/d6ce9deaaecdb1627bbc6b1f3be04c03bb6975dd82af97ea8a1c73c9cca6abd1/userdata -p /run/containers/storage/overlay-containers/d6ce9deaaecdb1627bbc6b1f3be04c03bb6975dd82af97ea8a1c73c9cca6abd1/userdata/pidfile -n ovn_metadata_agent --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/d6ce9deaaecdb1627bbc6b1f3be04c03bb6975dd82af97ea8a1c73c9cca6abd1 --full-attach -s -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/d6ce9deaaecdb1627bbc6b1f3be04c03bb6975dd82af97ea8a1c73c9cca6abd1/userdata/oci-log --conmon-pidfile /run/ovn_metadata_agent.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg d6ce9deaaecdb1627bbc6b1f3be04c03bb6975dd82af97ea8a1c73c9cca6abd1
             │ ├─gssproxy.service
             │ │ └─872 /usr/sbin/gssproxy -D
             │ ├─irqbalance.service
             │ │ └─788 /usr/sbin/irqbalance
             │ ├─iscsid.service
             │ │ └─206933 /usr/sbin/iscsid -f
             │ ├─multipathd.service
             │ │ └─207091 /sbin/multipathd -d -s
             │ ├─ovs-vswitchd.service
             │ │ └─47250 ovs-vswitchd unix:/var/run/openvswitch/db.sock -vconsole:emer -vsyslog:err -vfile:info --mlockall --user openvswitch:hugetlbfs --no-chdir --log-file=/var/log/openvswitch/ovs-vswitchd.log --pidfile=/var/run/openvswitch/ovs-vswitchd.pid --detach
             │ ├─ovsdb-server.service
             │ │ └─47167 ovsdb-server /etc/openvswitch/conf.db -vconsole:emer -vsyslog:err -vfile:info --remote=punix:/var/run/openvswitch/db.sock --private-key=db:Open_vSwitch,SSL,private_key --certificate=db:Open_vSwitch,SSL,certificate --bootstrap-ca-cert=db:Open_vSwitch,SSL,ca_cert --user openvswitch:hugetlbfs --no-chdir --log-file=/var/log/openvswitch/ovsdb-server.log --pidfile=/var/run/openvswitch/ovsdb-server.pid --detach
             │ ├─polkit.service
             │ │ └─43431 /usr/lib/polkit-1/polkitd --no-debug
             │ ├─rpcbind.service
             │ │ └─701 /usr/bin/rpcbind -w -f
             │ ├─rsyslog.service
             │ │ └─1007 /usr/sbin/rsyslogd -n
             │ ├─sshd.service
             │ │ └─165764 "sshd: /usr/sbin/sshd -D [listener] 0 of 10-100 startups"
             │ ├─system-ceph\x2d281e9bde\x2d2795\x2d59f4\x2d98ac\x2d90cf5b49a2de.slice
             │ │ ├─ceph-281e9bde-2795-59f4-98ac-90cf5b49a2de@crash.compute-1.service
             │ │ │ ├─libpod-payload-8039fbf5b15063d013ad0b7da5995720757f418eb92b3d7e17b4ed9709486f7c
             │ │ │ │ ├─76982 /run/podman-init -- /usr/bin/ceph-crash -n client.crash.compute-1
             │ │ │ │ └─76984 /usr/bin/python3 -s /usr/bin/ceph-crash -n client.crash.compute-1
             │ │ │ └─runtime
             │ │ │   └─76980 /usr/bin/conmon --api-version 1 -c 8039fbf5b15063d013ad0b7da5995720757f418eb92b3d7e17b4ed9709486f7c -u 8039fbf5b15063d013ad0b7da5995720757f418eb92b3d7e17b4ed9709486f7c -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/8039fbf5b15063d013ad0b7da5995720757f418eb92b3d7e17b4ed9709486f7c/userdata -p /run/containers/storage/overlay-containers/8039fbf5b15063d013ad0b7da5995720757f418eb92b3d7e17b4ed9709486f7c/userdata/pidfile -n ceph-281e9bde-2795-59f4-98ac-90cf5b49a2de-crash-compute-1 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/8039fbf5b15063d013ad0b7da5995720757f418eb92b3d7e17b4ed9709486f7c --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/8039fbf5b15063d013ad0b7da5995720757f418eb92b3d7e17b4ed9709486f7c/userdata/oci-log --conmon-pidfile /run/ceph-281e9bde-2795-59f4-98ac-90cf5b49a2de@crash.compute-1.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 8039fbf5b15063d013ad0b7da5995720757f418eb92b3d7e17b4ed9709486f7c
             │ │ ├─ceph-281e9bde-2795-59f4-98ac-90cf5b49a2de@mds.cephfs.compute-1.taxacd.service
             │ │ │ ├─libpod-payload-3a68f1d14d85f6e31a6938b8bcbd914609af59268da0b55b5b26224c4e100211
             │ │ │ │ ├─84857 /run/podman-init -- /usr/bin/ceph-mds -n mds.cephfs.compute-1.taxacd -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ │ │ └─84859 /usr/bin/ceph-mds -n mds.cephfs.compute-1.taxacd -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ │ └─runtime
             │ │ │   └─84855 /usr/bin/conmon --api-version 1 -c 3a68f1d14d85f6e31a6938b8bcbd914609af59268da0b55b5b26224c4e100211 -u 3a68f1d14d85f6e31a6938b8bcbd914609af59268da0b55b5b26224c4e100211 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/3a68f1d14d85f6e31a6938b8bcbd914609af59268da0b55b5b26224c4e100211/userdata -p /run/containers/storage/overlay-containers/3a68f1d14d85f6e31a6938b8bcbd914609af59268da0b55b5b26224c4e100211/userdata/pidfile -n ceph-281e9bde-2795-59f4-98ac-90cf5b49a2de-mds-cephfs-compute-1-taxacd --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/3a68f1d14d85f6e31a6938b8bcbd914609af59268da0b55b5b26224c4e100211 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/3a68f1d14d85f6e31a6938b8bcbd914609af59268da0b55b5b26224c4e100211/userdata/oci-log --conmon-pidfile /run/ceph-281e9bde-2795-59f4-98ac-90cf5b49a2de@mds.cephfs.compute-1.taxacd.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 3a68f1d14d85f6e31a6938b8bcbd914609af59268da0b55b5b26224c4e100211
             │ │ ├─ceph-281e9bde-2795-59f4-98ac-90cf5b49a2de@mgr.compute-1.jqbgxp.service
             │ │ │ ├─libpod-payload-2e8b86fae08488082046be8e33ef9fc03ecfc0973404b56eb059b1b63639578d
             │ │ │ │ ├─81955 /run/podman-init -- /usr/bin/ceph-mgr -n mgr.compute-1.jqbgxp -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ │ │ └─81957 /usr/bin/ceph-mgr -n mgr.compute-1.jqbgxp -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ │ └─runtime
             │ │ │   └─81953 /usr/bin/conmon --api-version 1 -c 2e8b86fae08488082046be8e33ef9fc03ecfc0973404b56eb059b1b63639578d -u 2e8b86fae08488082046be8e33ef9fc03ecfc0973404b56eb059b1b63639578d -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/2e8b86fae08488082046be8e33ef9fc03ecfc0973404b56eb059b1b63639578d/userdata -p /run/containers/storage/overlay-containers/2e8b86fae08488082046be8e33ef9fc03ecfc0973404b56eb059b1b63639578d/userdata/pidfile -n ceph-281e9bde-2795-59f4-98ac-90cf5b49a2de-mgr-compute-1-jqbgxp --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/2e8b86fae08488082046be8e33ef9fc03ecfc0973404b56eb059b1b63639578d --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/2e8b86fae08488082046be8e33ef9fc03ecfc0973404b56eb059b1b63639578d/userdata/oci-log --conmon-pidfile /run/ceph-281e9bde-2795-59f4-98ac-90cf5b49a2de@mgr.compute-1.jqbgxp.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 2e8b86fae08488082046be8e33ef9fc03ecfc0973404b56eb059b1b63639578d
             │ │ ├─ceph-281e9bde-2795-59f4-98ac-90cf5b49a2de@mon.compute-1.service
             │ │ │ ├─libpod-payload-251b1a4718b2fd4c67af53f2ed308c3c23cf8f9f405ab5e74c4b1afc3d7c00dd
             │ │ │ │ ├─81594 /run/podman-init -- /usr/bin/ceph-mon -n mon.compute-1 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false
             │ │ │ │ └─81596 /usr/bin/ceph-mon -n mon.compute-1 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false
             │ │ │ └─runtime
             │ │ │   └─81592 /usr/bin/conmon --api-version 1 -c 251b1a4718b2fd4c67af53f2ed308c3c23cf8f9f405ab5e74c4b1afc3d7c00dd -u 251b1a4718b2fd4c67af53f2ed308c3c23cf8f9f405ab5e74c4b1afc3d7c00dd -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/251b1a4718b2fd4c67af53f2ed308c3c23cf8f9f405ab5e74c4b1afc3d7c00dd/userdata -p /run/containers/storage/overlay-containers/251b1a4718b2fd4c67af53f2ed308c3c23cf8f9f405ab5e74c4b1afc3d7c00dd/userdata/pidfile -n ceph-281e9bde-2795-59f4-98ac-90cf5b49a2de-mon-compute-1 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/251b1a4718b2fd4c67af53f2ed308c3c23cf8f9f405ab5e74c4b1afc3d7c00dd --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/251b1a4718b2fd4c67af53f2ed308c3c23cf8f9f405ab5e74c4b1afc3d7c00dd/userdata/oci-log --conmon-pidfile /run/ceph-281e9bde-2795-59f4-98ac-90cf5b49a2de@mon.compute-1.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 251b1a4718b2fd4c67af53f2ed308c3c23cf8f9f405ab5e74c4b1afc3d7c00dd
             │ │ ├─ceph-281e9bde-2795-59f4-98ac-90cf5b49a2de@osd.1.service
             │ │ │ ├─libpod-payload-6c1249c6f24e9c6538782f26c6c9ae2b2774052438800f108c52dd0b0dd395ef
             │ │ │ │ ├─78931 /run/podman-init -- /usr/bin/ceph-osd -n osd.1 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ │ │ └─78933 /usr/bin/ceph-osd -n osd.1 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ │ └─runtime
             │ │ │   └─78929 /usr/bin/conmon --api-version 1 -c 6c1249c6f24e9c6538782f26c6c9ae2b2774052438800f108c52dd0b0dd395ef -u 6c1249c6f24e9c6538782f26c6c9ae2b2774052438800f108c52dd0b0dd395ef -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/6c1249c6f24e9c6538782f26c6c9ae2b2774052438800f108c52dd0b0dd395ef/userdata -p /run/containers/storage/overlay-containers/6c1249c6f24e9c6538782f26c6c9ae2b2774052438800f108c52dd0b0dd395ef/userdata/pidfile -n ceph-281e9bde-2795-59f4-98ac-90cf5b49a2de-osd-1 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/6c1249c6f24e9c6538782f26c6c9ae2b2774052438800f108c52dd0b0dd395ef --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/6c1249c6f24e9c6538782f26c6c9ae2b2774052438800f108c52dd0b0dd395ef/userdata/oci-log --conmon-pidfile /run/ceph-281e9bde-2795-59f4-98ac-90cf5b49a2de@osd.1.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 6c1249c6f24e9c6538782f26c6c9ae2b2774052438800f108c52dd0b0dd395ef
             │ │ └─ceph-281e9bde-2795-59f4-98ac-90cf5b49a2de@rgw.rgw.compute-1.nigpsg.service
             │ │   ├─libpod-payload-39cc3372cbea0c2530037f484bf587c49489c87fa8290ae449ca851706b4c249
             │ │   │ ├─83598 /run/podman-init -- /usr/bin/radosgw -n client.rgw.rgw.compute-1.nigpsg -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │   │ └─83600 /usr/bin/radosgw -n client.rgw.rgw.compute-1.nigpsg -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │   └─runtime
             │ │     └─83596 /usr/bin/conmon --api-version 1 -c 39cc3372cbea0c2530037f484bf587c49489c87fa8290ae449ca851706b4c249 -u 39cc3372cbea0c2530037f484bf587c49489c87fa8290ae449ca851706b4c249 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/39cc3372cbea0c2530037f484bf587c49489c87fa8290ae449ca851706b4c249/userdata -p /run/containers/storage/overlay-containers/39cc3372cbea0c2530037f484bf587c49489c87fa8290ae449ca851706b4c249/userdata/pidfile -n ceph-281e9bde-2795-59f4-98ac-90cf5b49a2de-rgw-rgw-compute-1-nigpsg --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/39cc3372cbea0c2530037f484bf587c49489c87fa8290ae449ca851706b4c249 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/39cc3372cbea0c2530037f484bf587c49489c87fa8290ae449ca851706b4c249/userdata/oci-log --conmon-pidfile /run/ceph-281e9bde-2795-59f4-98ac-90cf5b49a2de@rgw.rgw.compute-1.nigpsg.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 39cc3372cbea0c2530037f484bf587c49489c87fa8290ae449ca851706b4c249
             │ ├─system-getty.slice
             │ │ └─getty@tty1.service
             │ │   └─1012 /sbin/agetty -o "-p -- \\u" --noclear - linux
             │ ├─system-serial\x2dgetty.slice
             │ │ └─serial-getty@ttyS0.service
             │ │   └─1013 /sbin/agetty -o "-p -- \\u" --keep-baud 115200,57600,38400,9600 - vt220
             │ ├─systemd-hostnamed.service
             │ │ └─237147 /usr/lib/systemd/systemd-hostnamed
             │ ├─systemd-journald.service
             │ │ └─680 /usr/lib/systemd/systemd-journald
             │ ├─systemd-logind.service
             │ │ └─806 /usr/lib/systemd/systemd-logind
             │ ├─systemd-machined.service
             │ │ └─190815 /usr/lib/systemd/systemd-machined
             │ ├─systemd-udevd.service
             │ │ └─udev
             │ │   └─731 /usr/lib/systemd/systemd-udevd
             │ ├─tuned.service
             │ │ ├─ 92931 /usr/bin/python3 -Es /usr/sbin/tuned -l -P
             │ │ └─240595 /usr/bin/sh - /usr/sbin/virt-what
             │ ├─virtlogd.service
             │ │ └─190186 /usr/sbin/virtlogd
             │ ├─virtnodedevd.service
             │ │ └─222034 /usr/sbin/virtnodedevd --timeout 120
             │ └─virtqemud.service
             │   └─221900 /usr/sbin/virtqemud --timeout 120
             └─user.slice
               ├─user-1000.slice
               │ ├─session-1.scope
               │ │ └─4519 /usr/bin/python3
               │ ├─session-52.scope
               │ │ ├─234639 "sshd-session: zuul [priv]"
               │ │ ├─234642 "sshd-session: zuul@notty"
               │ │ ├─234643 sudo bash -c "rm -rf /var/tmp/sos-osp && mkdir /var/tmp/sos-osp && sos report --batch --all-logs --tmp-dir=/var/tmp/sos-osp  -p container,openstack_edpm,system,storage,virt"
               │ │ ├─234667 /usr/bin/python3 -s /sbin/sos report --batch --all-logs --tmp-dir=/var/tmp/sos-osp -p container,openstack_edpm,system,storage,virt
               │ │ ├─240108 timeout 15s turbostat --debug sleep 10
               │ │ ├─240545 timeout 300s systemctl status --all
               │ │ ├─240546 systemctl status --all
               │ │ ├─240589 timeout 300s tuned-adm recommend
               │ │ ├─240590 /usr/bin/python3 -Es /usr/sbin/tuned-adm recommend
               │ │ ├─240593 timeout 300s ceph fs status --format json-pretty
               │ │ └─240594 /usr/bin/python3 -s /usr/bin/ceph fs status --format json-pretty
               │ └─user@1000.service
               │   ├─app.slice
               │   │ └─dbus-broker.service
               │   │   ├─29571 /usr/bin/dbus-broker-launch --scope user
               │   │   └─29572 dbus-broker --log 4 --controller 9 --machine-id 85ac68c10a6e7ae08ceb898dbdca0cb5 --max-bytes 100000000000000 --max-fds 25000000000000 --max-matches 5000000000
               │   ├─init.scope
               │   │ ├─4307 /usr/lib/systemd/systemd --user
               │   │ └─4309 "(sd-pam)"
               │   └─user.slice
               │     └─podman-pause-444cdfba.scope
               │       └─29556 catatonit -P
               └─user-42477.slice
                 ├─session-21.scope
                 │ ├─72547 "sshd-session: ceph-admin [priv]"
                 │ └─72571 "sshd-session: ceph-admin"
                 ├─session-23.scope
                 │ ├─72565 "sshd-session: ceph-admin [priv]"
                 │ └─72572 "sshd-session: ceph-admin@notty"
                 ├─session-24.scope
                 │ ├─72623 "sshd-session: ceph-admin [priv]"
                 │ └─72626 "sshd-session: ceph-admin@notty"
                 ├─session-25.scope
                 │ ├─72677 "sshd-session: ceph-admin [priv]"
                 │ └─72680 "sshd-session: ceph-admin@notty"
                 ├─session-26.scope
                 │ ├─72731 "sshd-session: ceph-admin [priv]"
                 │ └─72734 "sshd-session: ceph-admin@notty"
                 ├─session-27.scope
                 │ ├─72785 "sshd-session: ceph-admin [priv]"
                 │ └─72788 "sshd-session: ceph-admin@notty"
                 ├─session-28.scope
                 │ ├─72839 "sshd-session: ceph-admin [priv]"
                 │ └─72842 "sshd-session: ceph-admin@notty"
                 ├─session-29.scope
                 │ ├─72893 "sshd-session: ceph-admin [priv]"
                 │ └─72896 "sshd-session: ceph-admin@notty"
                 ├─session-30.scope
                 │ ├─72947 "sshd-session: ceph-admin [priv]"
                 │ └─72950 "sshd-session: ceph-admin@notty"
                 ├─session-31.scope
                 │ ├─73001 "sshd-session: ceph-admin [priv]"
                 │ └─73004 "sshd-session: ceph-admin@notty"
                 ├─session-32.scope
                 │ ├─73028 "sshd-session: ceph-admin [priv]"
                 │ └─73031 "sshd-session: ceph-admin@notty"
                 ├─session-33.scope
                 │ ├─73082 "sshd-session: ceph-admin [priv]"
                 │ └─73085 "sshd-session: ceph-admin@notty"
                 └─user@42477.service
                   └─init.scope
                     ├─72551 /usr/lib/systemd/systemd --user
                     └─72553 "(sd-pam)"

Jan 27 09:00:30 compute-1 systemd[1]: libpod-conmon-60b97cc0f104261d7b36c795f8b389097a5888722aeb2c4e6cfaeaadad972630.scope: Deactivated successfully.
Jan 27 09:00:30 compute-1 systemd[1]: Started libpod-conmon-fbdd64e299b5ee7c3cca531c13e389352e17631c395cb3dca19318db592fdedb.scope.
Jan 27 09:00:30 compute-1 systemd[1]: Started libcrun container.
Jan 27 09:00:31 compute-1 systemd[1]: libpod-fbdd64e299b5ee7c3cca531c13e389352e17631c395cb3dca19318db592fdedb.scope: Deactivated successfully.
Jan 27 09:00:31 compute-1 systemd[1]: libpod-fbdd64e299b5ee7c3cca531c13e389352e17631c395cb3dca19318db592fdedb.scope: Consumed 1.259s CPU time.
Jan 27 09:00:31 compute-1 systemd[1]: var-lib-containers-storage-overlay-db303d38e7db45d217c7cd02e5f50b21b9105fd4d1266d8bbd9ff38d65b4522b-merged.mount: Deactivated successfully.
Jan 27 09:00:31 compute-1 systemd[1]: libpod-conmon-fbdd64e299b5ee7c3cca531c13e389352e17631c395cb3dca19318db592fdedb.scope: Deactivated successfully.
Jan 27 09:22:16 compute-1 systemd[1]: Started Session 52 of User zuul.
Jan 27 09:22:37 compute-1 systemd[1]: Starting Hostname Service...
Jan 27 09:22:37 compute-1 systemd[1]: Started Hostname Service.

● machine.slice - Virtual Machine and Container Slice
     Loaded: loaded (/usr/lib/systemd/system/machine.slice; static)
     Active: active since Tue 2026-01-27 08:29:07 UTC; 53min ago
      Until: Tue 2026-01-27 08:29:07 UTC; 53min ago
       Docs: man:systemd.special(7)
         IO: 65.1M read, 4.3M written
      Tasks: 34
     Memory: 405.9M (peak: 449.3M)
        CPU: 1min 21.139s
     CGroup: /machine.slice
             ├─libpod-a3bb16dbb05ddc47f45ec9373ca7302ec69431e14964c6960ac6f45103f0676a.scope
             │ └─container
             │   ├─221468 dumb-init --single-child -- kolla_start
             │   └─221470 /usr/bin/python3 /usr/bin/nova-compute
             ├─libpod-d6ce9deaaecdb1627bbc6b1f3be04c03bb6975dd82af97ea8a1c73c9cca6abd1.scope
             │ └─container
             │   ├─139975 dumb-init --single-child -- kolla_start
             │   ├─139978 "neutron-ovn-metadata-agent (/usr/bin/python3 /usr/bin/neutron-ovn-metadata-agent)"
             │   ├─140319 "neutron-ovn-metadata-agent (/usr/bin/python3 /usr/bin/neutron-ovn-metadata-agent)"
             │   └─140418 /usr/bin/python3 /bin/privsep-helper --config-file /etc/neutron/neutron.conf --config-dir /etc/neutron.conf.d --privsep_context neutron.privileged.namespace_cmd --privsep_sock_path /tmp/tmpm4k7fsyh/privsep.sock
             └─libpod-f524c08bb2ae37ade38d0ec5ff5b0696715a77e37f29e059b5b993958b3ea20d.scope
               └─container
                 ├─130303 dumb-init --single-child -- kolla_start
                 └─130306 /usr/bin/ovn-controller --pidfile unix:/run/openvswitch/db.sock -p /etc/pki/tls/private/ovndb.key -c /etc/pki/tls/certs/ovndb.crt -C /etc/pki/tls/certs/ovndbca.crt

Jan 27 09:00:31 compute-1 compassionate_golick[226212]:             "sectorsize": "2048",
Jan 27 09:00:31 compute-1 compassionate_golick[226212]:             "size": 493568.0,
Jan 27 09:00:31 compute-1 compassionate_golick[226212]:             "support_discard": "2048",
Jan 27 09:00:31 compute-1 compassionate_golick[226212]:             "type": "disk",
Jan 27 09:00:31 compute-1 compassionate_golick[226212]:             "vendor": "QEMU"
Jan 27 09:00:31 compute-1 compassionate_golick[226212]:         }
Jan 27 09:00:31 compute-1 compassionate_golick[226212]:     }
Jan 27 09:00:31 compute-1 compassionate_golick[226212]: ]
Jan 27 09:00:31 compute-1 podman[227471]: 2026-01-27 09:00:31.808086826 +0000 UTC m=+0.025770319 container died fbdd64e299b5ee7c3cca531c13e389352e17631c395cb3dca19318db592fdedb (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=compassionate_golick, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, ceph=True, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.39.3, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Jan 27 09:00:31 compute-1 podman[227471]: 2026-01-27 09:00:31.863540397 +0000 UTC m=+0.081223880 container remove fbdd64e299b5ee7c3cca531c13e389352e17631c395cb3dca19318db592fdedb (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=compassionate_golick, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, ceph=True, io.buildah.version=1.39.3, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)

● system-ceph\x2d281e9bde\x2d2795\x2d59f4\x2d98ac\x2d90cf5b49a2de.slice - Slice /system/ceph-281e9bde-2795-59f4-98ac-90cf5b49a2de
     Loaded: loaded
     Active: active since Tue 2026-01-27 08:29:14 UTC; 53min ago
      Until: Tue 2026-01-27 08:29:14 UTC; 53min ago
         IO: 102.5M read, 2.3G written
      Tasks: 736
     Memory: 1.2G (peak: 1.2G)
        CPU: 1min 53.586s
     CGroup: /system.slice/system-ceph\x2d281e9bde\x2d2795\x2d59f4\x2d98ac\x2d90cf5b49a2de.slice
             ├─ceph-281e9bde-2795-59f4-98ac-90cf5b49a2de@crash.compute-1.service
             │ ├─libpod-payload-8039fbf5b15063d013ad0b7da5995720757f418eb92b3d7e17b4ed9709486f7c
             │ │ ├─76982 /run/podman-init -- /usr/bin/ceph-crash -n client.crash.compute-1
             │ │ └─76984 /usr/bin/python3 -s /usr/bin/ceph-crash -n client.crash.compute-1
             │ └─runtime
             │   └─76980 /usr/bin/conmon --api-version 1 -c 8039fbf5b15063d013ad0b7da5995720757f418eb92b3d7e17b4ed9709486f7c -u 8039fbf5b15063d013ad0b7da5995720757f418eb92b3d7e17b4ed9709486f7c -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/8039fbf5b15063d013ad0b7da5995720757f418eb92b3d7e17b4ed9709486f7c/userdata -p /run/containers/storage/overlay-containers/8039fbf5b15063d013ad0b7da5995720757f418eb92b3d7e17b4ed9709486f7c/userdata/pidfile -n ceph-281e9bde-2795-59f4-98ac-90cf5b49a2de-crash-compute-1 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/8039fbf5b15063d013ad0b7da5995720757f418eb92b3d7e17b4ed9709486f7c --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/8039fbf5b15063d013ad0b7da5995720757f418eb92b3d7e17b4ed9709486f7c/userdata/oci-log --conmon-pidfile /run/ceph-281e9bde-2795-59f4-98ac-90cf5b49a2de@crash.compute-1.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 8039fbf5b15063d013ad0b7da5995720757f418eb92b3d7e17b4ed9709486f7c
             ├─ceph-281e9bde-2795-59f4-98ac-90cf5b49a2de@mds.cephfs.compute-1.taxacd.service
             │ ├─libpod-payload-3a68f1d14d85f6e31a6938b8bcbd914609af59268da0b55b5b26224c4e100211
             │ │ ├─84857 /run/podman-init -- /usr/bin/ceph-mds -n mds.cephfs.compute-1.taxacd -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ └─84859 /usr/bin/ceph-mds -n mds.cephfs.compute-1.taxacd -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ └─runtime
             │   └─84855 /usr/bin/conmon --api-version 1 -c 3a68f1d14d85f6e31a6938b8bcbd914609af59268da0b55b5b26224c4e100211 -u 3a68f1d14d85f6e31a6938b8bcbd914609af59268da0b55b5b26224c4e100211 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/3a68f1d14d85f6e31a6938b8bcbd914609af59268da0b55b5b26224c4e100211/userdata -p /run/containers/storage/overlay-containers/3a68f1d14d85f6e31a6938b8bcbd914609af59268da0b55b5b26224c4e100211/userdata/pidfile -n ceph-281e9bde-2795-59f4-98ac-90cf5b49a2de-mds-cephfs-compute-1-taxacd --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/3a68f1d14d85f6e31a6938b8bcbd914609af59268da0b55b5b26224c4e100211 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/3a68f1d14d85f6e31a6938b8bcbd914609af59268da0b55b5b26224c4e100211/userdata/oci-log --conmon-pidfile /run/ceph-281e9bde-2795-59f4-98ac-90cf5b49a2de@mds.cephfs.compute-1.taxacd.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 3a68f1d14d85f6e31a6938b8bcbd914609af59268da0b55b5b26224c4e100211
             ├─ceph-281e9bde-2795-59f4-98ac-90cf5b49a2de@mgr.compute-1.jqbgxp.service
             │ ├─libpod-payload-2e8b86fae08488082046be8e33ef9fc03ecfc0973404b56eb059b1b63639578d
             │ │ ├─81955 /run/podman-init -- /usr/bin/ceph-mgr -n mgr.compute-1.jqbgxp -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ └─81957 /usr/bin/ceph-mgr -n mgr.compute-1.jqbgxp -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ └─runtime
             │   └─81953 /usr/bin/conmon --api-version 1 -c 2e8b86fae08488082046be8e33ef9fc03ecfc0973404b56eb059b1b63639578d -u 2e8b86fae08488082046be8e33ef9fc03ecfc0973404b56eb059b1b63639578d -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/2e8b86fae08488082046be8e33ef9fc03ecfc0973404b56eb059b1b63639578d/userdata -p /run/containers/storage/overlay-containers/2e8b86fae08488082046be8e33ef9fc03ecfc0973404b56eb059b1b63639578d/userdata/pidfile -n ceph-281e9bde-2795-59f4-98ac-90cf5b49a2de-mgr-compute-1-jqbgxp --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/2e8b86fae08488082046be8e33ef9fc03ecfc0973404b56eb059b1b63639578d --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/2e8b86fae08488082046be8e33ef9fc03ecfc0973404b56eb059b1b63639578d/userdata/oci-log --conmon-pidfile /run/ceph-281e9bde-2795-59f4-98ac-90cf5b49a2de@mgr.compute-1.jqbgxp.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 2e8b86fae08488082046be8e33ef9fc03ecfc0973404b56eb059b1b63639578d
             ├─ceph-281e9bde-2795-59f4-98ac-90cf5b49a2de@mon.compute-1.service
             │ ├─libpod-payload-251b1a4718b2fd4c67af53f2ed308c3c23cf8f9f405ab5e74c4b1afc3d7c00dd
             │ │ ├─81594 /run/podman-init -- /usr/bin/ceph-mon -n mon.compute-1 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false
             │ │ └─81596 /usr/bin/ceph-mon -n mon.compute-1 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false
             │ └─runtime
             │   └─81592 /usr/bin/conmon --api-version 1 -c 251b1a4718b2fd4c67af53f2ed308c3c23cf8f9f405ab5e74c4b1afc3d7c00dd -u 251b1a4718b2fd4c67af53f2ed308c3c23cf8f9f405ab5e74c4b1afc3d7c00dd -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/251b1a4718b2fd4c67af53f2ed308c3c23cf8f9f405ab5e74c4b1afc3d7c00dd/userdata -p /run/containers/storage/overlay-containers/251b1a4718b2fd4c67af53f2ed308c3c23cf8f9f405ab5e74c4b1afc3d7c00dd/userdata/pidfile -n ceph-281e9bde-2795-59f4-98ac-90cf5b49a2de-mon-compute-1 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/251b1a4718b2fd4c67af53f2ed308c3c23cf8f9f405ab5e74c4b1afc3d7c00dd --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/251b1a4718b2fd4c67af53f2ed308c3c23cf8f9f405ab5e74c4b1afc3d7c00dd/userdata/oci-log --conmon-pidfile /run/ceph-281e9bde-2795-59f4-98ac-90cf5b49a2de@mon.compute-1.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 251b1a4718b2fd4c67af53f2ed308c3c23cf8f9f405ab5e74c4b1afc3d7c00dd
             ├─ceph-281e9bde-2795-59f4-98ac-90cf5b49a2de@osd.1.service
             │ ├─libpod-payload-6c1249c6f24e9c6538782f26c6c9ae2b2774052438800f108c52dd0b0dd395ef
             │ │ ├─78931 /run/podman-init -- /usr/bin/ceph-osd -n osd.1 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ └─78933 /usr/bin/ceph-osd -n osd.1 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ └─runtime
             │   └─78929 /usr/bin/conmon --api-version 1 -c 6c1249c6f24e9c6538782f26c6c9ae2b2774052438800f108c52dd0b0dd395ef -u 6c1249c6f24e9c6538782f26c6c9ae2b2774052438800f108c52dd0b0dd395ef -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/6c1249c6f24e9c6538782f26c6c9ae2b2774052438800f108c52dd0b0dd395ef/userdata -p /run/containers/storage/overlay-containers/6c1249c6f24e9c6538782f26c6c9ae2b2774052438800f108c52dd0b0dd395ef/userdata/pidfile -n ceph-281e9bde-2795-59f4-98ac-90cf5b49a2de-osd-1 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/6c1249c6f24e9c6538782f26c6c9ae2b2774052438800f108c52dd0b0dd395ef --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/6c1249c6f24e9c6538782f26c6c9ae2b2774052438800f108c52dd0b0dd395ef/userdata/oci-log --conmon-pidfile /run/ceph-281e9bde-2795-59f4-98ac-90cf5b49a2de@osd.1.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 6c1249c6f24e9c6538782f26c6c9ae2b2774052438800f108c52dd0b0dd395ef
             └─ceph-281e9bde-2795-59f4-98ac-90cf5b49a2de@rgw.rgw.compute-1.nigpsg.service
               ├─libpod-payload-39cc3372cbea0c2530037f484bf587c49489c87fa8290ae449ca851706b4c249
               │ ├─83598 /run/podman-init -- /usr/bin/radosgw -n client.rgw.rgw.compute-1.nigpsg -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
               │ └─83600 /usr/bin/radosgw -n client.rgw.rgw.compute-1.nigpsg -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
               └─runtime
                 └─83596 /usr/bin/conmon --api-version 1 -c 39cc3372cbea0c2530037f484bf587c49489c87fa8290ae449ca851706b4c249 -u 39cc3372cbea0c2530037f484bf587c49489c87fa8290ae449ca851706b4c249 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/39cc3372cbea0c2530037f484bf587c49489c87fa8290ae449ca851706b4c249/userdata -p /run/containers/storage/overlay-containers/39cc3372cbea0c2530037f484bf587c49489c87fa8290ae449ca851706b4c249/userdata/pidfile -n ceph-281e9bde-2795-59f4-98ac-90cf5b49a2de-rgw-rgw-compute-1-nigpsg --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/39cc3372cbea0c2530037f484bf587c49489c87fa8290ae449ca851706b4c249 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/39cc3372cbea0c2530037f484bf587c49489c87fa8290ae449ca851706b4c249/userdata/oci-log --conmon-pidfile /run/ceph-281e9bde-2795-59f4-98ac-90cf5b49a2de@rgw.rgw.compute-1.nigpsg.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 39cc3372cbea0c2530037f484bf587c49489c87fa8290ae449ca851706b4c249

Jan 27 09:22:58 compute-1 ceph-mon[81596]: pgmap v1807: 305 pgs: 305 active+clean; 41 MiB data, 244 MiB used, 21 GiB / 21 GiB avail
Jan 27 09:22:58 compute-1 ceph-mon[81596]: from='client.? 192.168.122.100:0/998336395' entity='client.admin' cmd=[{"prefix": "osd pool ls", "detail": "detail"}]: dispatch
Jan 27 09:22:58 compute-1 ceph-mon[81596]: from='client.28205 -' entity='client.admin' cmd=[{"prefix": "osd pool autoscale-status", "target": ["mon-mgr", ""]}]: dispatch
Jan 27 09:22:58 compute-1 ceph-mon[81596]: from='client.? 192.168.122.101:0/1196541295' entity='client.admin' cmd=[{"prefix": "osd pool ls", "detail": "detail"}]: dispatch
Jan 27 09:22:58 compute-1 ceph-mon[81596]: from='client.? 192.168.122.101:0/1442867532' entity='client.admin' cmd=[{"prefix": "osd stat"}]: dispatch
Jan 27 09:22:58 compute-1 ceph-mon[81596]: mon.compute-1@2(peon) e3 handle_command mon_command({"prefix": "fs dump", "format": "json-pretty"} v 0) v1
Jan 27 09:22:58 compute-1 ceph-mon[81596]: log_channel(audit) log [DBG] : from='client.? 192.168.122.101:0/202423810' entity='client.admin' cmd=[{"prefix": "fs dump", "format": "json-pretty"}]: dispatch
Jan 27 09:22:58 compute-1 ceph-mon[81596]: mon.compute-1@2(peon) e3 handle_command mon_command({"prefix": "fs ls", "format": "json-pretty"} v 0) v1
Jan 27 09:22:58 compute-1 ceph-mon[81596]: log_channel(audit) log [DBG] : from='client.? 192.168.122.101:0/1220449197' entity='client.admin' cmd=[{"prefix": "fs ls", "format": "json-pretty"}]: dispatch
Jan 27 09:22:59 compute-1 ceph-mon[81596]: mon.compute-1@2(peon).osd e177 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 343932928 full_alloc: 348127232 kv_alloc: 318767104

● system-dbus\x2d:1.1\x2dorg.fedoraproject.SetroubleshootPrivileged.slice - Slice /system/dbus-:1.1-org.fedoraproject.SetroubleshootPrivileged
     Loaded: loaded
     Active: active since Tue 2026-01-27 08:45:16 UTC; 37min ago
      Until: Tue 2026-01-27 08:45:16 UTC; 37min ago
         IO: 4.0K read, 0B written
      Tasks: 0
     Memory: 4.0K (peak: 58.7M)
        CPU: 1.084s
     CGroup: /system.slice/system-dbus\x2d:1.1\x2dorg.fedoraproject.SetroubleshootPrivileged.slice

Jan 27 08:45:16 compute-1 systemd[1]: Created slice Slice /system/dbus-:1.1-org.fedoraproject.SetroubleshootPrivileged.

● system-getty.slice - Slice /system/getty
     Loaded: loaded
     Active: active since Tue 2026-01-27 07:47:06 UTC; 1h 35min ago
      Until: Tue 2026-01-27 07:47:06 UTC; 1h 35min ago
         IO: 0B read, 0B written
      Tasks: 1
     Memory: 296.0K (peak: 520.0K)
        CPU: 6ms
     CGroup: /system.slice/system-getty.slice
             └─getty@tty1.service
               └─1012 /sbin/agetty -o "-p -- \\u" --noclear - linux

● system-modprobe.slice - Slice /system/modprobe
     Loaded: loaded
     Active: active since Tue 2026-01-27 07:47:03 UTC; 1h 35min ago
      Until: Tue 2026-01-27 07:47:03 UTC; 1h 35min ago
         IO: 0B read, 0B written
      Tasks: 0
     Memory: 144.0K (peak: 12.0M)
        CPU: 136ms
     CGroup: /system.slice/system-modprobe.slice

Jan 27 07:47:03 localhost systemd[1]: Created slice Slice /system/modprobe.

● system-serial\x2dgetty.slice - Slice /system/serial-getty
     Loaded: loaded
     Active: active since Tue 2026-01-27 07:47:06 UTC; 1h 35min ago
      Until: Tue 2026-01-27 07:47:06 UTC; 1h 35min ago
         IO: 0B read, 0B written
      Tasks: 1
     Memory: 260.0K (peak: 740.0K)
        CPU: 10ms
     CGroup: /system.slice/system-serial\x2dgetty.slice
             └─serial-getty@ttyS0.service
               └─1013 /sbin/agetty -o "-p -- \\u" --keep-baud 115200,57600,38400,9600 - vt220

● system-sshd\x2dkeygen.slice - Slice /system/sshd-keygen
     Loaded: loaded
     Active: active since Tue 2026-01-27 07:47:06 UTC; 1h 35min ago
      Until: Tue 2026-01-27 07:47:06 UTC; 1h 35min ago
         IO: 0B read, 0B written
      Tasks: 0
     Memory: 0B (peak: 0B)
        CPU: 0
     CGroup: /system.slice/system-sshd\x2dkeygen.slice

● system.slice - System Slice
     Loaded: loaded
     Active: active since Tue 2026-01-27 07:47:02 UTC; 1h 35min ago
      Until: Tue 2026-01-27 07:47:02 UTC; 1h 35min ago
       Docs: man:systemd.special(7)
         IO: 262.1M read, 2.4G written
      Tasks: 849
     Memory: 1.9G (peak: 1.9G)
        CPU: 5min 45.041s
     CGroup: /system.slice
             ├─NetworkManager.service
             │ └─48943 /usr/sbin/NetworkManager --no-daemon
             ├─auditd.service
             │ ├─703 /sbin/auditd
             │ └─705 /usr/sbin/sedispatch
             ├─chronyd.service
             │ └─58510 /usr/sbin/chronyd -F 2
             ├─crond.service
             │ └─1011 /usr/sbin/crond -n
             ├─dbus-broker.service
             │ ├─744 /usr/bin/dbus-broker-launch --scope system --audit
             │ └─773 dbus-broker --log 4 --controller 9 --machine-id 85ac68c10a6e7ae08ceb898dbdca0cb5 --max-bytes 536870912 --max-fds 4096 --max-matches 131072 --audit
             ├─edpm_nova_compute.service
             │ └─221466 /usr/bin/conmon --api-version 1 -c a3bb16dbb05ddc47f45ec9373ca7302ec69431e14964c6960ac6f45103f0676a -u a3bb16dbb05ddc47f45ec9373ca7302ec69431e14964c6960ac6f45103f0676a -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/a3bb16dbb05ddc47f45ec9373ca7302ec69431e14964c6960ac6f45103f0676a/userdata -p /run/containers/storage/overlay-containers/a3bb16dbb05ddc47f45ec9373ca7302ec69431e14964c6960ac6f45103f0676a/userdata/pidfile -n nova_compute --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/a3bb16dbb05ddc47f45ec9373ca7302ec69431e14964c6960ac6f45103f0676a --full-attach -s -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/a3bb16dbb05ddc47f45ec9373ca7302ec69431e14964c6960ac6f45103f0676a/userdata/oci-log --conmon-pidfile /run/nova_compute.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg a3bb16dbb05ddc47f45ec9373ca7302ec69431e14964c6960ac6f45103f0676a
             ├─edpm_ovn_controller.service
             │ └─130301 /usr/bin/conmon --api-version 1 -c f524c08bb2ae37ade38d0ec5ff5b0696715a77e37f29e059b5b993958b3ea20d -u f524c08bb2ae37ade38d0ec5ff5b0696715a77e37f29e059b5b993958b3ea20d -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/f524c08bb2ae37ade38d0ec5ff5b0696715a77e37f29e059b5b993958b3ea20d/userdata -p /run/containers/storage/overlay-containers/f524c08bb2ae37ade38d0ec5ff5b0696715a77e37f29e059b5b993958b3ea20d/userdata/pidfile -n ovn_controller --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/f524c08bb2ae37ade38d0ec5ff5b0696715a77e37f29e059b5b993958b3ea20d --full-attach -s -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/f524c08bb2ae37ade38d0ec5ff5b0696715a77e37f29e059b5b993958b3ea20d/userdata/oci-log --conmon-pidfile /run/ovn_controller.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg f524c08bb2ae37ade38d0ec5ff5b0696715a77e37f29e059b5b993958b3ea20d
             ├─edpm_ovn_metadata_agent.service
             │ └─139973 /usr/bin/conmon --api-version 1 -c d6ce9deaaecdb1627bbc6b1f3be04c03bb6975dd82af97ea8a1c73c9cca6abd1 -u d6ce9deaaecdb1627bbc6b1f3be04c03bb6975dd82af97ea8a1c73c9cca6abd1 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/d6ce9deaaecdb1627bbc6b1f3be04c03bb6975dd82af97ea8a1c73c9cca6abd1/userdata -p /run/containers/storage/overlay-containers/d6ce9deaaecdb1627bbc6b1f3be04c03bb6975dd82af97ea8a1c73c9cca6abd1/userdata/pidfile -n ovn_metadata_agent --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/d6ce9deaaecdb1627bbc6b1f3be04c03bb6975dd82af97ea8a1c73c9cca6abd1 --full-attach -s -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/d6ce9deaaecdb1627bbc6b1f3be04c03bb6975dd82af97ea8a1c73c9cca6abd1/userdata/oci-log --conmon-pidfile /run/ovn_metadata_agent.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg d6ce9deaaecdb1627bbc6b1f3be04c03bb6975dd82af97ea8a1c73c9cca6abd1
             ├─gssproxy.service
             │ └─872 /usr/sbin/gssproxy -D
             ├─irqbalance.service
             │ └─788 /usr/sbin/irqbalance
             ├─iscsid.service
             │ └─206933 /usr/sbin/iscsid -f
             ├─multipathd.service
             │ └─207091 /sbin/multipathd -d -s
             ├─ovs-vswitchd.service
             │ └─47250 ovs-vswitchd unix:/var/run/openvswitch/db.sock -vconsole:emer -vsyslog:err -vfile:info --mlockall --user openvswitch:hugetlbfs --no-chdir --log-file=/var/log/openvswitch/ovs-vswitchd.log --pidfile=/var/run/openvswitch/ovs-vswitchd.pid --detach
             ├─ovsdb-server.service
             │ └─47167 ovsdb-server /etc/openvswitch/conf.db -vconsole:emer -vsyslog:err -vfile:info --remote=punix:/var/run/openvswitch/db.sock --private-key=db:Open_vSwitch,SSL,private_key --certificate=db:Open_vSwitch,SSL,certificate --bootstrap-ca-cert=db:Open_vSwitch,SSL,ca_cert --user openvswitch:hugetlbfs --no-chdir --log-file=/var/log/openvswitch/ovsdb-server.log --pidfile=/var/run/openvswitch/ovsdb-server.pid --detach
             ├─polkit.service
             │ └─43431 /usr/lib/polkit-1/polkitd --no-debug
             ├─rpcbind.service
             │ └─701 /usr/bin/rpcbind -w -f
             ├─rsyslog.service
             │ └─1007 /usr/sbin/rsyslogd -n
             ├─sshd.service
             │ └─165764 "sshd: /usr/sbin/sshd -D [listener] 0 of 10-100 startups"
             ├─system-ceph\x2d281e9bde\x2d2795\x2d59f4\x2d98ac\x2d90cf5b49a2de.slice
             │ ├─ceph-281e9bde-2795-59f4-98ac-90cf5b49a2de@crash.compute-1.service
             │ │ ├─libpod-payload-8039fbf5b15063d013ad0b7da5995720757f418eb92b3d7e17b4ed9709486f7c
             │ │ │ ├─76982 /run/podman-init -- /usr/bin/ceph-crash -n client.crash.compute-1
             │ │ │ └─76984 /usr/bin/python3 -s /usr/bin/ceph-crash -n client.crash.compute-1
             │ │ └─runtime
             │ │   └─76980 /usr/bin/conmon --api-version 1 -c 8039fbf5b15063d013ad0b7da5995720757f418eb92b3d7e17b4ed9709486f7c -u 8039fbf5b15063d013ad0b7da5995720757f418eb92b3d7e17b4ed9709486f7c -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/8039fbf5b15063d013ad0b7da5995720757f418eb92b3d7e17b4ed9709486f7c/userdata -p /run/containers/storage/overlay-containers/8039fbf5b15063d013ad0b7da5995720757f418eb92b3d7e17b4ed9709486f7c/userdata/pidfile -n ceph-281e9bde-2795-59f4-98ac-90cf5b49a2de-crash-compute-1 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/8039fbf5b15063d013ad0b7da5995720757f418eb92b3d7e17b4ed9709486f7c --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/8039fbf5b15063d013ad0b7da5995720757f418eb92b3d7e17b4ed9709486f7c/userdata/oci-log --conmon-pidfile /run/ceph-281e9bde-2795-59f4-98ac-90cf5b49a2de@crash.compute-1.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 8039fbf5b15063d013ad0b7da5995720757f418eb92b3d7e17b4ed9709486f7c
             │ ├─ceph-281e9bde-2795-59f4-98ac-90cf5b49a2de@mds.cephfs.compute-1.taxacd.service
             │ │ ├─libpod-payload-3a68f1d14d85f6e31a6938b8bcbd914609af59268da0b55b5b26224c4e100211
             │ │ │ ├─84857 /run/podman-init -- /usr/bin/ceph-mds -n mds.cephfs.compute-1.taxacd -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ │ └─84859 /usr/bin/ceph-mds -n mds.cephfs.compute-1.taxacd -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ └─runtime
             │ │   └─84855 /usr/bin/conmon --api-version 1 -c 3a68f1d14d85f6e31a6938b8bcbd914609af59268da0b55b5b26224c4e100211 -u 3a68f1d14d85f6e31a6938b8bcbd914609af59268da0b55b5b26224c4e100211 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/3a68f1d14d85f6e31a6938b8bcbd914609af59268da0b55b5b26224c4e100211/userdata -p /run/containers/storage/overlay-containers/3a68f1d14d85f6e31a6938b8bcbd914609af59268da0b55b5b26224c4e100211/userdata/pidfile -n ceph-281e9bde-2795-59f4-98ac-90cf5b49a2de-mds-cephfs-compute-1-taxacd --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/3a68f1d14d85f6e31a6938b8bcbd914609af59268da0b55b5b26224c4e100211 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/3a68f1d14d85f6e31a6938b8bcbd914609af59268da0b55b5b26224c4e100211/userdata/oci-log --conmon-pidfile /run/ceph-281e9bde-2795-59f4-98ac-90cf5b49a2de@mds.cephfs.compute-1.taxacd.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 3a68f1d14d85f6e31a6938b8bcbd914609af59268da0b55b5b26224c4e100211
             │ ├─ceph-281e9bde-2795-59f4-98ac-90cf5b49a2de@mgr.compute-1.jqbgxp.service
             │ │ ├─libpod-payload-2e8b86fae08488082046be8e33ef9fc03ecfc0973404b56eb059b1b63639578d
             │ │ │ ├─81955 /run/podman-init -- /usr/bin/ceph-mgr -n mgr.compute-1.jqbgxp -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ │ └─81957 /usr/bin/ceph-mgr -n mgr.compute-1.jqbgxp -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ └─runtime
             │ │   └─81953 /usr/bin/conmon --api-version 1 -c 2e8b86fae08488082046be8e33ef9fc03ecfc0973404b56eb059b1b63639578d -u 2e8b86fae08488082046be8e33ef9fc03ecfc0973404b56eb059b1b63639578d -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/2e8b86fae08488082046be8e33ef9fc03ecfc0973404b56eb059b1b63639578d/userdata -p /run/containers/storage/overlay-containers/2e8b86fae08488082046be8e33ef9fc03ecfc0973404b56eb059b1b63639578d/userdata/pidfile -n ceph-281e9bde-2795-59f4-98ac-90cf5b49a2de-mgr-compute-1-jqbgxp --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/2e8b86fae08488082046be8e33ef9fc03ecfc0973404b56eb059b1b63639578d --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/2e8b86fae08488082046be8e33ef9fc03ecfc0973404b56eb059b1b63639578d/userdata/oci-log --conmon-pidfile /run/ceph-281e9bde-2795-59f4-98ac-90cf5b49a2de@mgr.compute-1.jqbgxp.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 2e8b86fae08488082046be8e33ef9fc03ecfc0973404b56eb059b1b63639578d
             │ ├─ceph-281e9bde-2795-59f4-98ac-90cf5b49a2de@mon.compute-1.service
             │ │ ├─libpod-payload-251b1a4718b2fd4c67af53f2ed308c3c23cf8f9f405ab5e74c4b1afc3d7c00dd
             │ │ │ ├─81594 /run/podman-init -- /usr/bin/ceph-mon -n mon.compute-1 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false
             │ │ │ └─81596 /usr/bin/ceph-mon -n mon.compute-1 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false
             │ │ └─runtime
             │ │   └─81592 /usr/bin/conmon --api-version 1 -c 251b1a4718b2fd4c67af53f2ed308c3c23cf8f9f405ab5e74c4b1afc3d7c00dd -u 251b1a4718b2fd4c67af53f2ed308c3c23cf8f9f405ab5e74c4b1afc3d7c00dd -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/251b1a4718b2fd4c67af53f2ed308c3c23cf8f9f405ab5e74c4b1afc3d7c00dd/userdata -p /run/containers/storage/overlay-containers/251b1a4718b2fd4c67af53f2ed308c3c23cf8f9f405ab5e74c4b1afc3d7c00dd/userdata/pidfile -n ceph-281e9bde-2795-59f4-98ac-90cf5b49a2de-mon-compute-1 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/251b1a4718b2fd4c67af53f2ed308c3c23cf8f9f405ab5e74c4b1afc3d7c00dd --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/251b1a4718b2fd4c67af53f2ed308c3c23cf8f9f405ab5e74c4b1afc3d7c00dd/userdata/oci-log --conmon-pidfile /run/ceph-281e9bde-2795-59f4-98ac-90cf5b49a2de@mon.compute-1.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 251b1a4718b2fd4c67af53f2ed308c3c23cf8f9f405ab5e74c4b1afc3d7c00dd
             │ ├─ceph-281e9bde-2795-59f4-98ac-90cf5b49a2de@osd.1.service
             │ │ ├─libpod-payload-6c1249c6f24e9c6538782f26c6c9ae2b2774052438800f108c52dd0b0dd395ef
             │ │ │ ├─78931 /run/podman-init -- /usr/bin/ceph-osd -n osd.1 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ │ └─78933 /usr/bin/ceph-osd -n osd.1 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ └─runtime
             │ │   └─78929 /usr/bin/conmon --api-version 1 -c 6c1249c6f24e9c6538782f26c6c9ae2b2774052438800f108c52dd0b0dd395ef -u 6c1249c6f24e9c6538782f26c6c9ae2b2774052438800f108c52dd0b0dd395ef -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/6c1249c6f24e9c6538782f26c6c9ae2b2774052438800f108c52dd0b0dd395ef/userdata -p /run/containers/storage/overlay-containers/6c1249c6f24e9c6538782f26c6c9ae2b2774052438800f108c52dd0b0dd395ef/userdata/pidfile -n ceph-281e9bde-2795-59f4-98ac-90cf5b49a2de-osd-1 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/6c1249c6f24e9c6538782f26c6c9ae2b2774052438800f108c52dd0b0dd395ef --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/6c1249c6f24e9c6538782f26c6c9ae2b2774052438800f108c52dd0b0dd395ef/userdata/oci-log --conmon-pidfile /run/ceph-281e9bde-2795-59f4-98ac-90cf5b49a2de@osd.1.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 6c1249c6f24e9c6538782f26c6c9ae2b2774052438800f108c52dd0b0dd395ef
             │ └─ceph-281e9bde-2795-59f4-98ac-90cf5b49a2de@rgw.rgw.compute-1.nigpsg.service
             │   ├─libpod-payload-39cc3372cbea0c2530037f484bf587c49489c87fa8290ae449ca851706b4c249
             │   │ ├─83598 /run/podman-init -- /usr/bin/radosgw -n client.rgw.rgw.compute-1.nigpsg -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │   │ └─83600 /usr/bin/radosgw -n client.rgw.rgw.compute-1.nigpsg -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │   └─runtime
             │     └─83596 /usr/bin/conmon --api-version 1 -c 39cc3372cbea0c2530037f484bf587c49489c87fa8290ae449ca851706b4c249 -u 39cc3372cbea0c2530037f484bf587c49489c87fa8290ae449ca851706b4c249 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/39cc3372cbea0c2530037f484bf587c49489c87fa8290ae449ca851706b4c249/userdata -p /run/containers/storage/overlay-containers/39cc3372cbea0c2530037f484bf587c49489c87fa8290ae449ca851706b4c249/userdata/pidfile -n ceph-281e9bde-2795-59f4-98ac-90cf5b49a2de-rgw-rgw-compute-1-nigpsg --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/39cc3372cbea0c2530037f484bf587c49489c87fa8290ae449ca851706b4c249 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/39cc3372cbea0c2530037f484bf587c49489c87fa8290ae449ca851706b4c249/userdata/oci-log --conmon-pidfile /run/ceph-281e9bde-2795-59f4-98ac-90cf5b49a2de@rgw.rgw.compute-1.nigpsg.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 39cc3372cbea0c2530037f484bf587c49489c87fa8290ae449ca851706b4c249
             ├─system-getty.slice
             │ └─getty@tty1.service
             │   └─1012 /sbin/agetty -o "-p -- \\u" --noclear - linux
             ├─system-serial\x2dgetty.slice
             │ └─serial-getty@ttyS0.service
             │   └─1013 /sbin/agetty -o "-p -- \\u" --keep-baud 115200,57600,38400,9600 - vt220
             ├─systemd-hostnamed.service
             │ └─237147 /usr/lib/systemd/systemd-hostnamed
             ├─systemd-journald.service
             │ └─680 /usr/lib/systemd/systemd-journald
             ├─systemd-logind.service
             │ └─806 /usr/lib/systemd/systemd-logind
             ├─systemd-machined.service
             │ └─190815 /usr/lib/systemd/systemd-machined
             ├─systemd-udevd.service
             │ └─udev
             │   └─731 /usr/lib/systemd/systemd-udevd
             ├─tuned.service
             │ └─92931 /usr/bin/python3 -Es /usr/sbin/tuned -l -P
             ├─virtlogd.service
             │ └─190186 /usr/sbin/virtlogd
             ├─virtnodedevd.service
             │ └─222034 /usr/sbin/virtnodedevd --timeout 120
             └─virtqemud.service
               └─221900 /usr/sbin/virtqemud --timeout 120

Jan 27 09:22:29 compute-1 nova_compute[221466]: 2026-01-27 09:22:29.907 221470 DEBUG oslo_concurrency.lockutils [None req-b051c06b-b04d-4ebb-a974-55605b45294f - - - - - -] Lock "compute_resources" "released" by "nova.compute.resource_tracker.ResourceTracker._update_available_resource" :: held 0.759s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423[00m
Jan 27 09:22:30 compute-1 nova_compute[221466]: 2026-01-27 09:22:30.902 221470 DEBUG oslo_service.periodic_task [None req-b051c06b-b04d-4ebb-a974-55605b45294f - - - - - -] Running periodic task ComputeManager._check_instance_build_time run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210[00m
Jan 27 09:22:32 compute-1 nova_compute[221466]: 2026-01-27 09:22:32.386 221470 DEBUG oslo_service.periodic_task [None req-b051c06b-b04d-4ebb-a974-55605b45294f - - - - - -] Running periodic task ComputeManager._poll_volume_usage run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210[00m
Jan 27 09:22:33 compute-1 rsyslogd[1007]: imjournal: journal files changed, reloading...  [v8.2510.0-2.el9 try https://www.rsyslog.com/e/0 ]
Jan 27 09:22:33 compute-1 podman[236615]: 2026-01-27 09:22:33.93480754 +0000 UTC m=+0.070158927 container health_status d6ce9deaaecdb1627bbc6b1f3be04c03bb6975dd82af97ea8a1c73c9cca6abd1 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=ovn_metadata_agent, health_status=healthy, health_failing_streak=0, health_log=, config_data={'cgroupns': 'host', 'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'EDPM_CONFIG_HASH': '249f42cc0a5de6940e06c976a81a3e64ae1c330f940cff0a51730e3f74af51fa-443ed18f8996048d3715b9ec80abd55220542871602df33775d8611b19ccacea-443ed18f8996048d3715b9ec80abd55220542871602df33775d8611b19ccacea-443ed18f8996048d3715b9ec80abd55220542871602df33775d8611b19ccacea-0823bd3e096c75f72e4a95820d41b0d4b6a1172bd2892ddb9f29b788a11bc87d'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_metadata_agent', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified', 'net': 'host', 'pid': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/run/openvswitch:/run/openvswitch:z', '/var/lib/openstack/neutron-ovn-metadata-agent:/etc/neutron.conf.d:z', '/run/netns:/run/netns:shared', '/var/lib/kolla/config_files/ovn_metadata_agent.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/neutron:/var/lib/neutron:shared,z', '/var/lib/neutron/ovn_metadata_haproxy_wrapper:/usr/local/bin/haproxy:ro', '/var/lib/neutron/kill_scripts:/etc/neutron/kill_scripts:ro', '/var/lib/openstack/cacerts/neutron-metadata/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_metadata_agent:/openstack:ro,z']}, org.label-schema.schema-version=1.0, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251202, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, tcib_managed=true, config_id=ovn_metadata_agent, container_name=ovn_metadata_agent, io.buildah.version=1.41.3, tcib_build_tag=c3923531bcda0b0811b2d5053f189beb, managed_by=edpm_ansible)
Jan 27 09:22:39 compute-1 nova_compute[221466]: 2026-01-27 09:22:39.387 221470 DEBUG oslo_service.periodic_task [None req-b051c06b-b04d-4ebb-a974-55605b45294f - - - - - -] Running periodic task ComputeManager._poll_rescued_instances run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210[00m
Jan 27 09:22:54 compute-1 ovn_metadata_agent[139973]: 2026-01-27 09:22:54.648 139978 DEBUG oslo_concurrency.lockutils [-] Acquiring lock "_check_child_processes" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404[00m
Jan 27 09:22:54 compute-1 ovn_metadata_agent[139973]: 2026-01-27 09:22:54.650 139978 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" acquired by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: waited 0.002s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409[00m
Jan 27 09:22:54 compute-1 ovn_metadata_agent[139973]: 2026-01-27 09:22:54.650 139978 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" "released" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: held 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423[00m
Jan 27 09:22:55 compute-1 podman[240341]: 2026-01-27 09:22:55.952412269 +0000 UTC m=+0.090881580 container health_status f524c08bb2ae37ade38d0ec5ff5b0696715a77e37f29e059b5b993958b3ea20d (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'EDPM_CONFIG_HASH': '249f42cc0a5de6940e06c976a81a3e64ae1c330f940cff0a51730e3f74af51fa-443ed18f8996048d3715b9ec80abd55220542871602df33775d8611b19ccacea-443ed18f8996048d3715b9ec80abd55220542871602df33775d8611b19ccacea-443ed18f8996048d3715b9ec80abd55220542871602df33775d8611b19ccacea'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, config_id=ovn_controller, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, container_name=ovn_controller, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, org.label-schema.license=GPLv2, managed_by=edpm_ansible, org.label-schema.build-date=20251202, org.label-schema.schema-version=1.0, tcib_build_tag=c3923531bcda0b0811b2d5053f189beb, tcib_managed=true)

● user-1000.slice - User Slice of UID 1000
     Loaded: loaded
    Drop-In: /usr/lib/systemd/system/user-.slice.d
             └─10-defaults.conf
     Active: active since Tue 2026-01-27 07:47:29 UTC; 1h 35min ago
      Until: Tue 2026-01-27 07:47:29 UTC; 1h 35min ago
       Docs: man:user@.service(5)
         IO: 457.9M read, 6.2G written
      Tasks: 37 (limit: 20031)
     Memory: 2.9G (peak: 3.5G)
        CPU: 19min 9.113s
     CGroup: /user.slice/user-1000.slice
             ├─session-1.scope
             │ └─4519 /usr/bin/python3
             ├─session-52.scope
             │ ├─234639 "sshd-session: zuul [priv]"
             │ ├─234642 "sshd-session: zuul@notty"
             │ ├─234643 sudo bash -c "rm -rf /var/tmp/sos-osp && mkdir /var/tmp/sos-osp && sos report --batch --all-logs --tmp-dir=/var/tmp/sos-osp  -p container,openstack_edpm,system,storage,virt"
             │ ├─234667 /usr/bin/python3 -s /sbin/sos report --batch --all-logs --tmp-dir=/var/tmp/sos-osp -p container,openstack_edpm,system,storage,virt
             │ ├─240108 timeout 15s turbostat --debug sleep 10
             │ ├─240545 timeout 300s systemctl status --all
             │ ├─240546 systemctl status --all
             │ ├─240589 timeout 300s tuned-adm recommend
             │ ├─240590 /usr/bin/python3 -Es /usr/sbin/tuned-adm recommend
             │ ├─240593 timeout 300s ceph fs status --format json-pretty
             │ └─240594 /usr/bin/python3 -s /usr/bin/ceph fs status --format json-pretty
             └─user@1000.service
               ├─app.slice
               │ └─dbus-broker.service
               │   ├─29571 /usr/bin/dbus-broker-launch --scope user
               │   └─29572 dbus-broker --log 4 --controller 9 --machine-id 85ac68c10a6e7ae08ceb898dbdca0cb5 --max-bytes 100000000000000 --max-fds 25000000000000 --max-matches 5000000000
               ├─init.scope
               │ ├─4307 /usr/lib/systemd/systemd --user
               │ └─4309 "(sd-pam)"
               └─user.slice
                 └─podman-pause-444cdfba.scope
                   └─29556 catatonit -P

Jan 27 08:48:55 compute-1 podman[221679]: 2026-01-27 08:48:55.204003007 +0000 UTC m=+0.027473230 container died 10dd488de2250b62ef999f3cb73e7b430de77c0080bf6292e7baab36ca1069f6 (image=quay.io/podified-antelope-centos9/openstack-nova-compute:current-podified, name=nova_compute_init, tcib_managed=true, container_name=nova_compute_init, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-nova-compute:current-podified', 'privileged': False, 'user': 'root', 'restart': 'never', 'command': 'bash -c $* -- eval python3 /sbin/nova_statedir_ownership.py | logger -t nova_compute_init', 'net': 'none', 'security_opt': ['label=disable'], 'detach': False, 'environment': {'NOVA_STATEDIR_OWNERSHIP_SKIP': '/var/lib/nova/compute_id', '__OS_DEBUG': False}, 'volumes': ['/dev/log:/dev/log', '/var/lib/nova:/var/lib/nova:shared', '/var/lib/_nova_secontext:/var/lib/_nova_secontext:shared,z', '/var/lib/openstack/config/nova/nova_statedir_ownership.py:/sbin/nova_statedir_ownership.py:z']}, config_id=edpm, managed_by=edpm_ansible, org.label-schema.license=GPLv2, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, org.label-schema.build-date=20251202, tcib_build_tag=c3923531bcda0b0811b2d5053f189beb)
Jan 27 08:48:55 compute-1 sudo[221627]: pam_unix(sudo:session): session closed for user root
Jan 27 08:48:56 compute-1 sshd-session[197990]: Connection closed by 192.168.122.30 port 52560
Jan 27 08:48:56 compute-1 sshd-session[197987]: pam_unix(sshd:session): session closed for user zuul
Jan 27 09:22:16 compute-1 sudo[234643]:     zuul : PWD=/home/zuul ; USER=root ; COMMAND=/bin/bash -c 'rm -rf /var/tmp/sos-osp && mkdir /var/tmp/sos-osp && sos report --batch --all-logs --tmp-dir=/var/tmp/sos-osp  -p container,openstack_edpm,system,storage,virt'
Jan 27 09:22:16 compute-1 sudo[234643]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Jan 27 09:22:24 compute-1 ovs-vsctl[235017]: ovs|00001|db_ctl_base|ERR|no key "dpdk-init" in Open_vSwitch record "." column other_config
Jan 27 09:22:48 compute-1 ovs-appctl[238966]: ovs|00001|daemon_unix|WARN|/var/run/openvswitch/ovs-monitor-ipsec.pid: open: No such file or directory
Jan 27 09:22:48 compute-1 ovs-appctl[238971]: ovs|00001|daemon_unix|WARN|/var/run/openvswitch/ovs-monitor-ipsec.pid: open: No such file or directory
Jan 27 09:22:48 compute-1 ovs-appctl[238974]: ovs|00001|daemon_unix|WARN|/var/run/openvswitch/ovs-monitor-ipsec.pid: open: No such file or directory

● user-42477.slice - User Slice of UID 42477
     Loaded: loaded
    Drop-In: /usr/lib/systemd/system/user-.slice.d
             └─10-defaults.conf
     Active: active since Tue 2026-01-27 08:28:46 UTC; 54min ago
      Until: Tue 2026-01-27 08:28:46 UTC; 54min ago
       Docs: man:user@.service(5)
         IO: 184.0K read, 1.4G written
      Tasks: 26 (limit: 20031)
     Memory: 1.2G (peak: 1.7G)
        CPU: 1min 56.143s
     CGroup: /user.slice/user-42477.slice
             ├─session-21.scope
             │ ├─72547 "sshd-session: ceph-admin [priv]"
             │ └─72571 "sshd-session: ceph-admin"
             ├─session-23.scope
             │ ├─72565 "sshd-session: ceph-admin [priv]"
             │ └─72572 "sshd-session: ceph-admin@notty"
             ├─session-24.scope
             │ ├─72623 "sshd-session: ceph-admin [priv]"
             │ └─72626 "sshd-session: ceph-admin@notty"
             ├─session-25.scope
             │ ├─72677 "sshd-session: ceph-admin [priv]"
             │ └─72680 "sshd-session: ceph-admin@notty"
             ├─session-26.scope
             │ ├─72731 "sshd-session: ceph-admin [priv]"
             │ └─72734 "sshd-session: ceph-admin@notty"
             ├─session-27.scope
             │ ├─72785 "sshd-session: ceph-admin [priv]"
             │ └─72788 "sshd-session: ceph-admin@notty"
             ├─session-28.scope
             │ ├─72839 "sshd-session: ceph-admin [priv]"
             │ └─72842 "sshd-session: ceph-admin@notty"
             ├─session-29.scope
             │ ├─72893 "sshd-session: ceph-admin [priv]"
             │ └─72896 "sshd-session: ceph-admin@notty"
             ├─session-30.scope
             │ ├─72947 "sshd-session: ceph-admin [priv]"
             │ └─72950 "sshd-session: ceph-admin@notty"
             ├─session-31.scope
             │ ├─73001 "sshd-session: ceph-admin [priv]"
             │ └─73004 "sshd-session: ceph-admin@notty"
             ├─session-32.scope
             │ ├─73028 "sshd-session: ceph-admin [priv]"
             │ └─73031 "sshd-session: ceph-admin@notty"
             ├─session-33.scope
             │ ├─73082 "sshd-session: ceph-admin [priv]"
             │ └─73085 "sshd-session: ceph-admin@notty"
             └─user@42477.service
               └─init.scope
                 ├─72551 /usr/lib/systemd/systemd --user
                 └─72553 "(sd-pam)"

Jan 27 09:22:08 compute-1 sudo[234558]: pam_unix(sudo:session): session closed for user root
Jan 27 09:22:08 compute-1 sudo[234583]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/281e9bde-2795-59f4-98ac-90cf5b49a2de/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --timeout 895 gather-facts
Jan 27 09:22:08 compute-1 sudo[234583]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Jan 27 09:22:09 compute-1 sudo[234583]: pam_unix(sudo:session): session closed for user root
Jan 27 09:22:17 compute-1 sudo[234710]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Jan 27 09:22:17 compute-1 sudo[234710]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Jan 27 09:22:17 compute-1 sudo[234710]: pam_unix(sudo:session): session closed for user root
Jan 27 09:22:17 compute-1 sudo[234745]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/ls /etc/sysctl.d
Jan 27 09:22:17 compute-1 sudo[234745]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Jan 27 09:22:17 compute-1 sudo[234745]: pam_unix(sudo:session): session closed for user root

● user.slice - User and Session Slice
     Loaded: loaded (/usr/lib/systemd/system/user.slice; static)
     Active: active since Tue 2026-01-27 07:47:06 UTC; 1h 35min ago
      Until: Tue 2026-01-27 07:47:06 UTC; 1h 35min ago
       Docs: man:systemd.special(7)
         IO: 458.1M read, 7.7G written
      Tasks: 63
     Memory: 4.1G (peak: 4.8G)
        CPU: 21min 5.502s
     CGroup: /user.slice
             ├─user-1000.slice
             │ ├─session-1.scope
             │ │ └─4519 /usr/bin/python3
             │ ├─session-52.scope
             │ │ ├─234639 "sshd-session: zuul [priv]"
             │ │ ├─234642 "sshd-session: zuul@notty"
             │ │ ├─234643 sudo bash -c "rm -rf /var/tmp/sos-osp && mkdir /var/tmp/sos-osp && sos report --batch --all-logs --tmp-dir=/var/tmp/sos-osp  -p container,openstack_edpm,system,storage,virt"
             │ │ ├─234667 /usr/bin/python3 -s /sbin/sos report --batch --all-logs --tmp-dir=/var/tmp/sos-osp -p container,openstack_edpm,system,storage,virt
             │ │ ├─240108 timeout 15s turbostat --debug sleep 10
             │ │ ├─240545 timeout 300s systemctl status --all
             │ │ ├─240546 systemctl status --all
             │ │ ├─240589 timeout 300s tuned-adm recommend
             │ │ ├─240590 /usr/bin/python3 -Es /usr/sbin/tuned-adm recommend
             │ │ ├─240593 timeout 300s ceph fs status --format json-pretty
             │ │ └─240594 /usr/bin/python3 -s /usr/bin/ceph fs status --format json-pretty
             │ └─user@1000.service
             │   ├─app.slice
             │   │ └─dbus-broker.service
             │   │   ├─29571 /usr/bin/dbus-broker-launch --scope user
             │   │   └─29572 dbus-broker --log 4 --controller 9 --machine-id 85ac68c10a6e7ae08ceb898dbdca0cb5 --max-bytes 100000000000000 --max-fds 25000000000000 --max-matches 5000000000
             │   ├─init.scope
             │   │ ├─4307 /usr/lib/systemd/systemd --user
             │   │ └─4309 "(sd-pam)"
             │   └─user.slice
             │     └─podman-pause-444cdfba.scope
             │       └─29556 catatonit -P
             └─user-42477.slice
               ├─session-21.scope
               │ ├─72547 "sshd-session: ceph-admin [priv]"
               │ └─72571 "sshd-session: ceph-admin"
               ├─session-23.scope
               │ ├─72565 "sshd-session: ceph-admin [priv]"
               │ └─72572 "sshd-session: ceph-admin@notty"
               ├─session-24.scope
               │ ├─72623 "sshd-session: ceph-admin [priv]"
               │ └─72626 "sshd-session: ceph-admin@notty"
               ├─session-25.scope
               │ ├─72677 "sshd-session: ceph-admin [priv]"
               │ └─72680 "sshd-session: ceph-admin@notty"
               ├─session-26.scope
               │ ├─72731 "sshd-session: ceph-admin [priv]"
               │ └─72734 "sshd-session: ceph-admin@notty"
               ├─session-27.scope
               │ ├─72785 "sshd-session: ceph-admin [priv]"
               │ └─72788 "sshd-session: ceph-admin@notty"
               ├─session-28.scope
               │ ├─72839 "sshd-session: ceph-admin [priv]"
               │ └─72842 "sshd-session: ceph-admin@notty"
               ├─session-29.scope
               │ ├─72893 "sshd-session: ceph-admin [priv]"
               │ └─72896 "sshd-session: ceph-admin@notty"
               ├─session-30.scope
               │ ├─72947 "sshd-session: ceph-admin [priv]"
               │ └─72950 "sshd-session: ceph-admin@notty"
               ├─session-31.scope
               │ ├─73001 "sshd-session: ceph-admin [priv]"
               │ └─73004 "sshd-session: ceph-admin@notty"
               ├─session-32.scope
               │ ├─73028 "sshd-session: ceph-admin [priv]"
               │ └─73031 "sshd-session: ceph-admin@notty"
               ├─session-33.scope
               │ ├─73082 "sshd-session: ceph-admin [priv]"
               │ └─73085 "sshd-session: ceph-admin@notty"
               └─user@42477.service
                 └─init.scope
                   ├─72551 /usr/lib/systemd/systemd --user
                   └─72553 "(sd-pam)"

● dbus.socket - D-Bus System Message Bus Socket
     Loaded: loaded (/usr/lib/systemd/system/dbus.socket; enabled; preset: enabled)
     Active: active (running) since Tue 2026-01-27 07:47:07 UTC; 1h 35min ago
      Until: Tue 2026-01-27 07:47:07 UTC; 1h 35min ago
   Triggers: ● dbus-broker.service
     Listen: /run/dbus/system_bus_socket (Stream)
     CGroup: /system.slice/dbus.socket

Jan 27 07:47:07 localhost systemd[1]: Listening on D-Bus System Message Bus Socket.

● dm-event.socket - Device-mapper event daemon FIFOs
     Loaded: loaded (/usr/lib/systemd/system/dm-event.socket; enabled; preset: enabled)
     Active: active (listening) since Tue 2026-01-27 08:19:30 UTC; 1h 3min ago
      Until: Tue 2026-01-27 08:19:30 UTC; 1h 3min ago
   Triggers: ● dm-event.service
       Docs: man:dmeventd(8)
     Listen: /run/dmeventd-server (FIFO)
             /run/dmeventd-client (FIFO)
     CGroup: /system.slice/dm-event.socket

Jan 27 08:19:30 compute-1 systemd[1]: Listening on Device-mapper event daemon FIFOs.

● iscsid.socket - Open-iSCSI iscsid Socket
     Loaded: loaded (/usr/lib/systemd/system/iscsid.socket; enabled; preset: enabled)
     Active: active (running) since Tue 2026-01-27 08:46:23 UTC; 36min ago
      Until: Tue 2026-01-27 08:46:23 UTC; 36min ago
   Triggers: ● iscsid.service
       Docs: man:iscsid(8)
             man:iscsiadm(8)
     Listen: @ISCSIADM_ABSTRACT_NAMESPACE (Stream)
     CGroup: /system.slice/iscsid.socket

Jan 27 08:46:23 compute-1 systemd[1]: Listening on Open-iSCSI iscsid Socket.

○ iscsiuio.socket - Open-iSCSI iscsiuio Socket
     Loaded: loaded (/usr/lib/systemd/system/iscsiuio.socket; enabled; preset: enabled)
     Active: inactive (dead)
   Triggers: ● iscsiuio.service
       Docs: man:iscsiuio(8)
     Listen: @ISCSID_UIP_ABSTRACT_NAMESPACE (Stream)

○ libvirtd-admin.socket - libvirt legacy monolithic daemon admin socket
     Loaded: loaded (/usr/lib/systemd/system/libvirtd-admin.socket; disabled; preset: disabled)
     Active: inactive (dead)
   Triggers: ● libvirtd.service
     Listen: /run/libvirt/libvirt-admin-sock (Stream)

○ libvirtd-ro.socket - libvirt legacy monolithic daemon read-only socket
     Loaded: loaded (/usr/lib/systemd/system/libvirtd-ro.socket; disabled; preset: disabled)
     Active: inactive (dead)
   Triggers: ● libvirtd.service
     Listen: /run/libvirt/libvirt-sock-ro (Stream)

○ libvirtd-tls.socket
     Loaded: masked (Reason: Unit libvirtd-tls.socket is masked.)
     Active: inactive (dead)

○ libvirtd.socket - libvirt legacy monolithic daemon socket
     Loaded: loaded (/usr/lib/systemd/system/libvirtd.socket; disabled; preset: disabled)
     Active: inactive (dead)
   Triggers: ● libvirtd.service
     Listen: /run/libvirt/libvirt-sock (Stream)

● lvm2-lvmpolld.socket - LVM2 poll daemon socket
     Loaded: loaded (/usr/lib/systemd/system/lvm2-lvmpolld.socket; enabled; preset: enabled)
     Active: active (listening) since Tue 2026-01-27 08:19:31 UTC; 1h 3min ago
      Until: Tue 2026-01-27 08:19:31 UTC; 1h 3min ago
   Triggers: ● lvm2-lvmpolld.service
       Docs: man:lvmpolld(8)
     Listen: /run/lvm/lvmpolld.socket (Stream)
     CGroup: /system.slice/lvm2-lvmpolld.socket

Jan 27 08:19:31 compute-1 systemd[1]: Listening on LVM2 poll daemon socket.

● multipathd.socket - multipathd control socket
     Loaded: loaded (/usr/lib/systemd/system/multipathd.socket; enabled; preset: disabled)
     Active: active (running) since Tue 2026-01-27 08:46:51 UTC; 36min ago
      Until: Tue 2026-01-27 08:46:51 UTC; 36min ago
   Triggers: ● multipathd.service
     Listen: @/org/kernel/linux/storage/multipathd (Stream)
             /run/multipathd.socket (Stream)
     CGroup: /system.slice/multipathd.socket

Jan 27 08:46:51 compute-1 systemd[1]: Listening on multipathd control socket.

● rpcbind.socket - RPCbind Server Activation Socket
     Loaded: loaded (/usr/lib/systemd/system/rpcbind.socket; enabled; preset: enabled)
     Active: active (running) since Tue 2026-01-27 07:47:06 UTC; 1h 35min ago
      Until: Tue 2026-01-27 07:47:06 UTC; 1h 35min ago
   Triggers: ● rpcbind.service
     Listen: /run/rpcbind.sock (Stream)
             0.0.0.0:111 (Stream)
             0.0.0.0:111 (Datagram)
             [::]:111 (Stream)
             [::]:111 (Datagram)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48560)
     Memory: 16.0K (peak: 288.0K)
        CPU: 5ms
     CGroup: /system.slice/rpcbind.socket

● sssd-kcm.socket - SSSD Kerberos Cache Manager responder socket
     Loaded: loaded (/usr/lib/systemd/system/sssd-kcm.socket; enabled; preset: enabled)
     Active: active (listening) since Tue 2026-01-27 07:47:07 UTC; 1h 35min ago
      Until: Tue 2026-01-27 07:47:07 UTC; 1h 35min ago
   Triggers: ● sssd-kcm.service
       Docs: man:sssd-kcm(8)
     Listen: /run/.heim_org.h5l.kcm-socket (Stream)
     CGroup: /system.slice/sssd-kcm.socket

Jan 27 07:47:07 localhost systemd[1]: Listening on SSSD Kerberos Cache Manager responder socket.

○ syslog.socket - Syslog Socket
     Loaded: loaded (/usr/lib/systemd/system/syslog.socket; static)
     Active: inactive (dead)
   Triggers: ● syslog.service
       Docs: man:systemd.special(7)
             https://www.freedesktop.org/wiki/Software/systemd/syslog
     Listen: /run/systemd/journal/syslog (Datagram)

● systemd-coredump.socket - Process Core Dump Socket
     Loaded: loaded (/usr/lib/systemd/system/systemd-coredump.socket; static)
     Active: active (listening) since Tue 2026-01-27 07:47:06 UTC; 1h 35min ago
      Until: Tue 2026-01-27 07:47:06 UTC; 1h 35min ago
       Docs: man:systemd-coredump(8)
     Listen: /run/systemd/coredump (SequentialPacket)
   Accepted: 0; Connected: 0;
     CGroup: /system.slice/systemd-coredump.socket

● systemd-initctl.socket - initctl Compatibility Named Pipe
     Loaded: loaded (/usr/lib/systemd/system/systemd-initctl.socket; static)
     Active: active (listening) since Tue 2026-01-27 07:47:06 UTC; 1h 35min ago
      Until: Tue 2026-01-27 07:47:06 UTC; 1h 35min ago
   Triggers: ● systemd-initctl.service
       Docs: man:systemd-initctl.socket(8)
     Listen: /run/initctl (FIFO)
     CGroup: /system.slice/systemd-initctl.socket

● systemd-journald-dev-log.socket - Journal Socket (/dev/log)
     Loaded: loaded (/usr/lib/systemd/system/systemd-journald-dev-log.socket; static)
     Active: active (running) since Tue 2026-01-27 07:47:03 UTC; 1h 35min ago
      Until: Tue 2026-01-27 07:47:03 UTC; 1h 35min ago
   Triggers: ● systemd-journald.service
       Docs: man:systemd-journald.service(8)
             man:journald.conf(5)
     Listen: /run/systemd/journal/dev-log (Datagram)
     CGroup: /system.slice/systemd-journald-dev-log.socket

Notice: journal has been rotated since unit was started, output may be incomplete.

● systemd-journald.socket - Journal Socket
     Loaded: loaded (/usr/lib/systemd/system/systemd-journald.socket; static)
     Active: active (running) since Tue 2026-01-27 07:47:03 UTC; 1h 35min ago
      Until: Tue 2026-01-27 07:47:03 UTC; 1h 35min ago
   Triggers: ● systemd-journald.service
       Docs: man:systemd-journald.service(8)
             man:journald.conf(5)
     Listen: /run/systemd/journal/socket (Datagram)
             /run/systemd/journal/stdout (Stream)
     CGroup: /system.slice/systemd-journald.socket

Notice: journal has been rotated since unit was started, output may be incomplete.

● systemd-rfkill.socket - Load/Save RF Kill Switch Status /dev/rfkill Watch
     Loaded: loaded (/usr/lib/systemd/system/systemd-rfkill.socket; static)
     Active: active (listening) since Tue 2026-01-27 07:47:09 UTC; 1h 35min ago
      Until: Tue 2026-01-27 07:47:09 UTC; 1h 35min ago
   Triggers: ● systemd-rfkill.service
       Docs: man:systemd-rfkill.socket(8)
     Listen: /dev/rfkill (Special)
     CGroup: /system.slice/systemd-rfkill.socket

Jan 27 07:47:09 np0005597077.novalocal systemd[1]: Listening on Load/Save RF Kill Switch Status /dev/rfkill Watch.

● systemd-udevd-control.socket - udev Control Socket
     Loaded: loaded (/usr/lib/systemd/system/systemd-udevd-control.socket; static)
     Active: active (running) since Tue 2026-01-27 07:47:06 UTC; 1h 35min ago
      Until: Tue 2026-01-27 07:47:06 UTC; 1h 35min ago
   Triggers: ● systemd-udevd.service
       Docs: man:systemd-udevd-control.socket(8)
             man:udev(7)
     Listen: /run/udev/control (SequentialPacket)
     CGroup: /system.slice/systemd-udevd-control.socket

● systemd-udevd-kernel.socket - udev Kernel Socket
     Loaded: loaded (/usr/lib/systemd/system/systemd-udevd-kernel.socket; static)
     Active: active (running) since Tue 2026-01-27 07:47:06 UTC; 1h 35min ago
      Until: Tue 2026-01-27 07:47:06 UTC; 1h 35min ago
   Triggers: ● systemd-udevd.service
       Docs: man:systemd-udevd-kernel.socket(8)
             man:udev(7)
     Listen: kobject-uevent 1 (Netlink)
     CGroup: /system.slice/systemd-udevd-kernel.socket

○ virtinterfaced-admin.socket - libvirt interface daemon admin socket
     Loaded: loaded (/usr/lib/systemd/system/virtinterfaced-admin.socket; enabled; preset: enabled)
     Active: inactive (dead)
   Triggers: ● virtinterfaced.service
     Listen: /run/libvirt/virtinterfaced-admin-sock (Stream)

○ virtinterfaced-ro.socket - libvirt interface daemon read-only socket
     Loaded: loaded (/usr/lib/systemd/system/virtinterfaced-ro.socket; enabled; preset: enabled)
     Active: inactive (dead)
   Triggers: ● virtinterfaced.service
     Listen: /run/libvirt/virtinterfaced-sock-ro (Stream)

○ virtinterfaced.socket - libvirt interface daemon socket
     Loaded: loaded (/usr/lib/systemd/system/virtinterfaced.socket; enabled; preset: enabled)
     Active: inactive (dead)
   Triggers: ● virtinterfaced.service
     Listen: /run/libvirt/virtinterfaced-sock (Stream)

○ virtlockd-admin.socket - libvirt locking daemon admin socket
     Loaded: loaded (/usr/lib/systemd/system/virtlockd-admin.socket; enabled; preset: enabled)
     Active: inactive (dead)
   Triggers: ● virtlockd.service
     Listen: /run/libvirt/virtlockd-admin-sock (Stream)

● virtlockd.socket - libvirt locking daemon socket
     Loaded: loaded (/usr/lib/systemd/system/virtlockd.socket; enabled; preset: enabled)
     Active: active (listening) since Tue 2026-01-27 08:45:16 UTC; 37min ago
      Until: Tue 2026-01-27 08:45:16 UTC; 37min ago
   Triggers: ● virtlockd.service
     Listen: /run/libvirt/virtlockd-sock (Stream)
     CGroup: /system.slice/virtlockd.socket

Jan 27 08:45:16 compute-1 systemd[1]: Listening on libvirt locking daemon socket.

● virtlogd-admin.socket - libvirt logging daemon admin socket
     Loaded: loaded (/usr/lib/systemd/system/virtlogd-admin.socket; enabled; preset: enabled)
    Drop-In: /etc/systemd/system/virtlogd-admin.socket.d
             └─override.conf
     Active: active (running) since Tue 2026-01-27 08:45:13 UTC; 37min ago
      Until: Tue 2026-01-27 08:45:13 UTC; 37min ago
   Triggers: ● virtlogd.service
     Listen: /run/libvirt/virtlogd-admin-sock (Stream)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48560)
     Memory: 4.0K (peak: 568.0K)
        CPU: 2ms
     CGroup: /system.slice/virtlogd-admin.socket

Jan 27 08:45:13 compute-1 systemd[1]: Starting libvirt logging daemon admin socket...
Jan 27 08:45:13 compute-1 systemd[1]: Listening on libvirt logging daemon admin socket.

● virtlogd.socket - libvirt logging daemon socket
     Loaded: loaded (/usr/lib/systemd/system/virtlogd.socket; enabled; preset: enabled)
    Drop-In: /etc/systemd/system/virtlogd.socket.d
             └─override.conf
     Active: active (running) since Tue 2026-01-27 08:45:13 UTC; 37min ago
      Until: Tue 2026-01-27 08:45:13 UTC; 37min ago
   Triggers: ● virtlogd.service
     Listen: /run/libvirt/virtlogd-sock (Stream)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48560)
     Memory: 0B (peak: 512.0K)
        CPU: 3ms
     CGroup: /system.slice/virtlogd.socket

Jan 27 08:45:13 compute-1 systemd[1]: Starting libvirt logging daemon socket...
Jan 27 08:45:13 compute-1 systemd[1]: Listening on libvirt logging daemon socket.

○ virtnetworkd-admin.socket - libvirt network daemon admin socket
     Loaded: loaded (/usr/lib/systemd/system/virtnetworkd-admin.socket; enabled; preset: enabled)
     Active: inactive (dead)
   Triggers: ● virtnetworkd.service
     Listen: /run/libvirt/virtnetworkd-admin-sock (Stream)

○ virtnetworkd-ro.socket - libvirt network daemon read-only socket
     Loaded: loaded (/usr/lib/systemd/system/virtnetworkd-ro.socket; enabled; preset: enabled)
     Active: inactive (dead)
   Triggers: ● virtnetworkd.service
     Listen: /run/libvirt/virtnetworkd-sock-ro (Stream)

○ virtnetworkd.socket - libvirt network daemon socket
     Loaded: loaded (/usr/lib/systemd/system/virtnetworkd.socket; enabled; preset: enabled)
     Active: inactive (dead)
   Triggers: ● virtnetworkd.service
     Listen: /run/libvirt/virtnetworkd-sock (Stream)

● virtnodedevd-admin.socket - libvirt nodedev daemon admin socket
     Loaded: loaded (/usr/lib/systemd/system/virtnodedevd-admin.socket; enabled; preset: enabled)
    Drop-In: /etc/systemd/system/virtnodedevd-admin.socket.d
             └─override.conf
     Active: active (running) since Tue 2026-01-27 08:45:14 UTC; 37min ago
      Until: Tue 2026-01-27 08:45:14 UTC; 37min ago
   Triggers: ● virtnodedevd.service
     Listen: /run/libvirt/virtnodedevd-admin-sock (Stream)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48560)
     Memory: 0B (peak: 484.0K)
        CPU: 3ms
     CGroup: /system.slice/virtnodedevd-admin.socket

Jan 27 08:45:14 compute-1 systemd[1]: Starting libvirt nodedev daemon admin socket...
Jan 27 08:45:14 compute-1 systemd[1]: Listening on libvirt nodedev daemon admin socket.

● virtnodedevd-ro.socket - libvirt nodedev daemon read-only socket
     Loaded: loaded (/usr/lib/systemd/system/virtnodedevd-ro.socket; enabled; preset: enabled)
    Drop-In: /etc/systemd/system/virtnodedevd-ro.socket.d
             └─override.conf
     Active: active (running) since Tue 2026-01-27 08:45:14 UTC; 37min ago
      Until: Tue 2026-01-27 08:45:14 UTC; 37min ago
   Triggers: ● virtnodedevd.service
     Listen: /run/libvirt/virtnodedevd-sock-ro (Stream)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48560)
     Memory: 0B (peak: 512.0K)
        CPU: 5ms
     CGroup: /system.slice/virtnodedevd-ro.socket

Jan 27 08:45:14 compute-1 systemd[1]: Starting libvirt nodedev daemon read-only socket...
Jan 27 08:45:14 compute-1 systemd[1]: Listening on libvirt nodedev daemon read-only socket.

● virtnodedevd.socket - libvirt nodedev daemon socket
     Loaded: loaded (/usr/lib/systemd/system/virtnodedevd.socket; enabled; preset: enabled)
    Drop-In: /etc/systemd/system/virtnodedevd.socket.d
             └─override.conf
     Active: active (running) since Tue 2026-01-27 08:45:14 UTC; 37min ago
      Until: Tue 2026-01-27 08:45:14 UTC; 37min ago
   Triggers: ● virtnodedevd.service
     Listen: /run/libvirt/virtnodedevd-sock (Stream)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48560)
     Memory: 0B (peak: 512.0K)
        CPU: 4ms
     CGroup: /system.slice/virtnodedevd.socket

Jan 27 08:45:14 compute-1 systemd[1]: Starting libvirt nodedev daemon socket...
Jan 27 08:45:14 compute-1 systemd[1]: Listening on libvirt nodedev daemon socket.

○ virtnwfilterd-admin.socket - libvirt nwfilter daemon admin socket
     Loaded: loaded (/usr/lib/systemd/system/virtnwfilterd-admin.socket; enabled; preset: enabled)
     Active: inactive (dead)
   Triggers: ● virtnwfilterd.service
     Listen: /run/libvirt/virtnwfilterd-admin-sock (Stream)

○ virtnwfilterd-ro.socket - libvirt nwfilter daemon read-only socket
     Loaded: loaded (/usr/lib/systemd/system/virtnwfilterd-ro.socket; enabled; preset: enabled)
     Active: inactive (dead)
   Triggers: ● virtnwfilterd.service
     Listen: /run/libvirt/virtnwfilterd-sock-ro (Stream)

○ virtnwfilterd.socket - libvirt nwfilter daemon socket
     Loaded: loaded (/usr/lib/systemd/system/virtnwfilterd.socket; enabled; preset: enabled)
     Active: inactive (dead)
   Triggers: ● virtnwfilterd.service
     Listen: /run/libvirt/virtnwfilterd-sock (Stream)

● virtproxyd-admin.socket - libvirt proxy daemon admin socket
     Loaded: loaded (/usr/lib/systemd/system/virtproxyd-admin.socket; enabled; preset: enabled)
    Drop-In: /etc/systemd/system/virtproxyd-admin.socket.d
             └─override.conf
     Active: active (listening) since Tue 2026-01-27 08:45:15 UTC; 37min ago
      Until: Tue 2026-01-27 08:45:15 UTC; 37min ago
   Triggers: ● virtproxyd.service
     Listen: /run/libvirt/libvirt-admin-sock (Stream)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48560)
     Memory: 0B (peak: 512.0K)
        CPU: 3ms
     CGroup: /system.slice/virtproxyd-admin.socket

Jan 27 08:45:15 compute-1 systemd[1]: Starting libvirt proxy daemon admin socket...
Jan 27 08:45:15 compute-1 systemd[1]: Listening on libvirt proxy daemon admin socket.

● virtproxyd-ro.socket - libvirt proxy daemon read-only socket
     Loaded: loaded (/usr/lib/systemd/system/virtproxyd-ro.socket; enabled; preset: enabled)
    Drop-In: /etc/systemd/system/virtproxyd-ro.socket.d
             └─override.conf
     Active: active (listening) since Tue 2026-01-27 08:45:15 UTC; 37min ago
      Until: Tue 2026-01-27 08:45:15 UTC; 37min ago
   Triggers: ● virtproxyd.service
     Listen: /run/libvirt/libvirt-sock-ro (Stream)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48560)
     Memory: 4.0K (peak: 256.0K)
        CPU: 3ms
     CGroup: /system.slice/virtproxyd-ro.socket

Jan 27 08:45:15 compute-1 systemd[1]: Starting libvirt proxy daemon read-only socket...
Jan 27 08:45:15 compute-1 systemd[1]: Listening on libvirt proxy daemon read-only socket.

● virtproxyd-tls.socket - libvirt proxy daemon TLS IP socket
     Loaded: loaded (/usr/lib/systemd/system/virtproxyd-tls.socket; enabled; preset: disabled)
     Active: active (listening) since Tue 2026-01-27 08:44:01 UTC; 38min ago
      Until: Tue 2026-01-27 08:44:01 UTC; 38min ago
   Triggers: ● virtproxyd.service
     Listen: [::]:16514 (Stream)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48560)
     Memory: 8.0K (peak: 256.0K)
        CPU: 1ms
     CGroup: /system.slice/virtproxyd-tls.socket

Jan 27 08:44:01 compute-1 systemd[1]: Listening on libvirt proxy daemon TLS IP socket.

● virtproxyd.socket - libvirt proxy daemon socket
     Loaded: loaded (/usr/lib/systemd/system/virtproxyd.socket; enabled; preset: enabled)
    Drop-In: /etc/systemd/system/virtproxyd.socket.d
             └─override.conf
     Active: active (listening) since Tue 2026-01-27 08:44:01 UTC; 38min ago
      Until: Tue 2026-01-27 08:44:01 UTC; 38min ago
   Triggers: ● virtproxyd.service
     Listen: /run/libvirt/libvirt-sock (Stream)
     CGroup: /system.slice/virtproxyd.socket

Jan 27 08:44:01 compute-1 systemd[1]: Listening on libvirt proxy daemon socket.

● virtqemud-admin.socket - libvirt QEMU daemon admin socket
     Loaded: loaded (/usr/lib/systemd/system/virtqemud-admin.socket; enabled; preset: enabled)
    Drop-In: /etc/systemd/system/virtqemud-admin.socket.d
             └─override.conf
     Active: active (running) since Tue 2026-01-27 08:45:16 UTC; 37min ago
      Until: Tue 2026-01-27 08:45:16 UTC; 37min ago
   Triggers: ● virtqemud.service
     Listen: /run/libvirt/virtqemud-admin-sock (Stream)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48560)
     Memory: 0B (peak: 540.0K)
        CPU: 3ms
     CGroup: /system.slice/virtqemud-admin.socket

Jan 27 08:45:16 compute-1 systemd[1]: Starting libvirt QEMU daemon admin socket...
Jan 27 08:45:16 compute-1 systemd[1]: Listening on libvirt QEMU daemon admin socket.

● virtqemud-ro.socket - libvirt QEMU daemon read-only socket
     Loaded: loaded (/usr/lib/systemd/system/virtqemud-ro.socket; enabled; preset: enabled)
    Drop-In: /etc/systemd/system/virtqemud-ro.socket.d
             └─override.conf
     Active: active (running) since Tue 2026-01-27 08:45:16 UTC; 37min ago
      Until: Tue 2026-01-27 08:45:16 UTC; 37min ago
   Triggers: ● virtqemud.service
     Listen: /run/libvirt/virtqemud-sock-ro (Stream)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48560)
     Memory: 0B (peak: 508.0K)
        CPU: 2ms
     CGroup: /system.slice/virtqemud-ro.socket

Jan 27 08:45:16 compute-1 systemd[1]: Starting libvirt QEMU daemon read-only socket...
Jan 27 08:45:16 compute-1 systemd[1]: Listening on libvirt QEMU daemon read-only socket.

● virtqemud.socket - libvirt QEMU daemon socket
     Loaded: loaded (/usr/lib/systemd/system/virtqemud.socket; enabled; preset: enabled)
    Drop-In: /etc/systemd/system/virtqemud.socket.d
             └─override.conf
     Active: active (running) since Tue 2026-01-27 08:45:16 UTC; 37min ago
      Until: Tue 2026-01-27 08:45:16 UTC; 37min ago
   Triggers: ● virtqemud.service
     Listen: /run/libvirt/virtqemud-sock (Stream)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48560)
     Memory: 0B (peak: 512.0K)
        CPU: 3ms
     CGroup: /system.slice/virtqemud.socket

Jan 27 08:45:16 compute-1 systemd[1]: Starting libvirt QEMU daemon socket...
Jan 27 08:45:16 compute-1 systemd[1]: Listening on libvirt QEMU daemon socket.

● virtsecretd-admin.socket - libvirt secret daemon admin socket
     Loaded: loaded (/usr/lib/systemd/system/virtsecretd-admin.socket; enabled; preset: enabled)
    Drop-In: /etc/systemd/system/virtsecretd-admin.socket.d
             └─override.conf
     Active: active (listening) since Tue 2026-01-27 08:45:18 UTC; 37min ago
      Until: Tue 2026-01-27 08:45:18 UTC; 37min ago
   Triggers: ● virtsecretd.service
     Listen: /run/libvirt/virtsecretd-admin-sock (Stream)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48560)
     Memory: 0B (peak: 512.0K)
        CPU: 3ms
     CGroup: /system.slice/virtsecretd-admin.socket

Jan 27 08:45:18 compute-1 systemd[1]: Starting libvirt secret daemon admin socket...
Jan 27 08:45:18 compute-1 systemd[1]: Listening on libvirt secret daemon admin socket.

● virtsecretd-ro.socket - libvirt secret daemon read-only socket
     Loaded: loaded (/usr/lib/systemd/system/virtsecretd-ro.socket; enabled; preset: enabled)
    Drop-In: /etc/systemd/system/virtsecretd-ro.socket.d
             └─override.conf
     Active: active (listening) since Tue 2026-01-27 08:45:18 UTC; 37min ago
      Until: Tue 2026-01-27 08:45:18 UTC; 37min ago
   Triggers: ● virtsecretd.service
     Listen: /run/libvirt/virtsecretd-sock-ro (Stream)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48560)
     Memory: 0B (peak: 512.0K)
        CPU: 4ms
     CGroup: /system.slice/virtsecretd-ro.socket

Jan 27 08:45:18 compute-1 systemd[1]: Starting libvirt secret daemon read-only socket...
Jan 27 08:45:18 compute-1 systemd[1]: Listening on libvirt secret daemon read-only socket.

● virtsecretd.socket - libvirt secret daemon socket
     Loaded: loaded (/usr/lib/systemd/system/virtsecretd.socket; enabled; preset: enabled)
    Drop-In: /etc/systemd/system/virtsecretd.socket.d
             └─override.conf
     Active: active (listening) since Tue 2026-01-27 08:45:18 UTC; 37min ago
      Until: Tue 2026-01-27 08:45:18 UTC; 37min ago
   Triggers: ● virtsecretd.service
     Listen: /run/libvirt/virtsecretd-sock (Stream)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48560)
     Memory: 4.0K (peak: 512.0K)
        CPU: 5ms
     CGroup: /system.slice/virtsecretd.socket

Jan 27 08:45:18 compute-1 systemd[1]: Starting libvirt secret daemon socket...
Jan 27 08:45:18 compute-1 systemd[1]: Listening on libvirt secret daemon socket.

○ virtstoraged-admin.socket - libvirt storage daemon admin socket
     Loaded: loaded (/usr/lib/systemd/system/virtstoraged-admin.socket; enabled; preset: enabled)
     Active: inactive (dead)
   Triggers: ● virtstoraged.service
     Listen: /run/libvirt/virtstoraged-admin-sock (Stream)

○ virtstoraged-ro.socket - libvirt storage daemon read-only socket
     Loaded: loaded (/usr/lib/systemd/system/virtstoraged-ro.socket; enabled; preset: enabled)
     Active: inactive (dead)
   Triggers: ● virtstoraged.service
     Listen: /run/libvirt/virtstoraged-sock-ro (Stream)

○ virtstoraged.socket - libvirt storage daemon socket
     Loaded: loaded (/usr/lib/systemd/system/virtstoraged.socket; enabled; preset: enabled)
     Active: inactive (dead)
   Triggers: ● virtstoraged.service
     Listen: /run/libvirt/virtstoraged-sock (Stream)

● swap.swap - /swap
     Loaded: loaded (/etc/fstab; generated)
     Active: active since Tue 2026-01-27 08:21:44 UTC; 1h 1min ago
      Until: Tue 2026-01-27 08:21:44 UTC; 1h 1min ago
       What: /swap
       Docs: man:fstab(5)
             man:systemd-fstab-generator(8)

● basic.target - Basic System
     Loaded: loaded (/usr/lib/systemd/system/basic.target; static)
     Active: active since Tue 2026-01-27 07:47:07 UTC; 1h 35min ago
      Until: Tue 2026-01-27 07:47:07 UTC; 1h 35min ago
       Docs: man:systemd.special(7)

Jan 27 07:47:07 localhost systemd[1]: Reached target Basic System.

○ blockdev@dev-disk-by\x2duuid-22ac9141\x2d3960\x2d4912\x2db20e\x2d19fc8a328d40.target - Block Device Preparation for /dev/disk/by-uuid/22ac9141-3960-4912-b20e-19fc8a328d40
     Loaded: loaded (/usr/lib/systemd/system/blockdev@.target; static)
     Active: inactive (dead)
       Docs: man:systemd.special(7)

○ blockdev@dev-vda1.target - Block Device Preparation for /dev/vda1
     Loaded: loaded (/usr/lib/systemd/system/blockdev@.target; static)
     Active: inactive (dead)
       Docs: man:systemd.special(7)

● ceph-281e9bde-2795-59f4-98ac-90cf5b49a2de.target - Ceph cluster 281e9bde-2795-59f4-98ac-90cf5b49a2de
     Loaded: loaded (/etc/systemd/system/ceph-281e9bde-2795-59f4-98ac-90cf5b49a2de.target; enabled; preset: disabled)
     Active: active since Tue 2026-01-27 08:29:14 UTC; 53min ago
      Until: Tue 2026-01-27 08:29:14 UTC; 53min ago

Jan 27 08:29:14 compute-1 systemd[1]: Reached target Ceph cluster 281e9bde-2795-59f4-98ac-90cf5b49a2de.

● ceph.target - All Ceph clusters and services
     Loaded: loaded (/etc/systemd/system/ceph.target; enabled; preset: disabled)
     Active: active since Tue 2026-01-27 08:29:13 UTC; 53min ago
      Until: Tue 2026-01-27 08:29:13 UTC; 53min ago

Jan 27 08:29:13 compute-1 systemd[1]: Reached target All Ceph clusters and services.

● cloud-config.target - Cloud-config availability
     Loaded: loaded (/usr/lib/systemd/system/cloud-config.target; static)
     Active: active since Tue 2026-01-27 07:47:12 UTC; 1h 35min ago
      Until: Tue 2026-01-27 07:47:12 UTC; 1h 35min ago

Jan 27 07:47:12 np0005597077.novalocal systemd[1]: Reached target Cloud-config availability.

● cloud-init.target - Cloud-init target
     Loaded: loaded (/usr/lib/systemd/system/cloud-init.target; enabled-runtime; preset: disabled)
     Active: active since Tue 2026-01-27 07:47:13 UTC; 1h 35min ago
      Until: Tue 2026-01-27 07:47:13 UTC; 1h 35min ago

Jan 27 07:47:13 np0005597077.novalocal systemd[1]: Reached target Cloud-init target.

○ cryptsetup-pre.target - Local Encrypted Volumes (Pre)
     Loaded: loaded (/usr/lib/systemd/system/cryptsetup-pre.target; static)
     Active: inactive (dead)
       Docs: man:systemd.special(7)

● cryptsetup.target - Local Encrypted Volumes
     Loaded: loaded (/usr/lib/systemd/system/cryptsetup.target; static)
     Active: active since Tue 2026-01-27 07:47:06 UTC; 1h 35min ago
      Until: Tue 2026-01-27 07:47:06 UTC; 1h 35min ago
       Docs: man:systemd.special(7)

● edpm_libvirt.target
     Loaded: loaded (/etc/systemd/system/edpm_libvirt.target; static)
     Active: active since Tue 2026-01-27 08:45:54 UTC; 37min ago
      Until: Tue 2026-01-27 08:45:54 UTC; 37min ago

Jan 27 08:45:54 compute-1 systemd[1]: Reached target edpm_libvirt.target.

○ emergency.target - Emergency Mode
     Loaded: loaded (/usr/lib/systemd/system/emergency.target; static)
     Active: inactive (dead)
       Docs: man:systemd.special(7)

○ first-boot-complete.target - First Boot Complete
     Loaded: loaded (/usr/lib/systemd/system/first-boot-complete.target; static)
     Active: inactive (dead)
  Condition: start condition failed at Tue 2026-01-27 07:47:06 UTC; 1h 35min ago
       Docs: man:systemd.special(7)

Jan 27 07:47:06 localhost systemd[1]: First Boot Complete was skipped because of an unmet condition check (ConditionFirstBoot=yes).

○ getty-pre.target - Preparation for Logins
     Loaded: loaded (/usr/lib/systemd/system/getty-pre.target; static)
     Active: inactive (dead)
       Docs: man:systemd.special(7)
             man:systemd-getty-generator(8)
             http://0pointer.de/blog/projects/serial-console.html

● getty.target - Login Prompts
     Loaded: loaded (/usr/lib/systemd/system/getty.target; static)
     Active: active since Tue 2026-01-27 07:47:12 UTC; 1h 35min ago
      Until: Tue 2026-01-27 07:47:12 UTC; 1h 35min ago
       Docs: man:systemd.special(7)
             man:systemd-getty-generator(8)
             http://0pointer.de/blog/projects/serial-console.html

Jan 27 07:47:12 np0005597077.novalocal systemd[1]: Reached target Login Prompts.

○ graphical.target - Graphical Interface
     Loaded: loaded (/usr/lib/systemd/system/graphical.target; static)
     Active: inactive (dead)
       Docs: man:systemd.special(7)

○ initrd-fs.target - Initrd File Systems
     Loaded: loaded (/usr/lib/systemd/system/initrd-fs.target; static)
     Active: inactive (dead) since Tue 2026-01-27 07:47:06 UTC; 1h 35min ago
       Docs: man:systemd.special(7)

Jan 27 07:47:04 localhost systemd[1]: Reached target Initrd File Systems.

○ initrd-root-device.target - Initrd Root Device
     Loaded: loaded (/usr/lib/systemd/system/initrd-root-device.target; static)
     Active: inactive (dead) since Tue 2026-01-27 07:47:05 UTC; 1h 35min ago
       Docs: man:systemd.special(7)

Jan 27 07:47:04 localhost systemd[1]: Reached target Initrd Root Device.
Jan 27 07:47:05 localhost systemd[1]: Stopped target Initrd Root Device.

○ initrd-root-fs.target - Initrd Root File System
     Loaded: loaded (/usr/lib/systemd/system/initrd-root-fs.target; static)
     Active: inactive (dead) since Tue 2026-01-27 07:47:06 UTC; 1h 35min ago
       Docs: man:systemd.special(7)

Jan 27 07:47:04 localhost systemd[1]: Reached target Initrd Root File System.

○ initrd-switch-root.target - Switch Root
     Loaded: loaded (/usr/lib/systemd/system/initrd-switch-root.target; static)
     Active: inactive (dead) since Tue 2026-01-27 07:47:06 UTC; 1h 35min ago

Jan 27 07:47:05 localhost systemd[1]: Reached target Switch Root.

○ initrd-usr-fs.target - Initrd /usr File System
     Loaded: loaded (/usr/lib/systemd/system/initrd-usr-fs.target; static)
     Active: inactive (dead) since Tue 2026-01-27 07:47:05 UTC; 1h 35min ago
       Docs: man:systemd.special(7)

Jan 27 07:47:05 localhost systemd[1]: Stopped target Initrd /usr File System.
Notice: journal has been rotated since unit was started, output may be incomplete.

○ initrd.target - Initrd Default Target
     Loaded: loaded (/usr/lib/systemd/system/initrd.target; static)
     Active: inactive (dead) since Tue 2026-01-27 07:47:05 UTC; 1h 35min ago
       Docs: man:systemd.special(7)

Jan 27 07:47:04 localhost systemd[1]: Reached target Initrd Default Target.
Jan 27 07:47:05 localhost systemd[1]: Stopped target Initrd Default Target.

● integritysetup.target - Local Integrity Protected Volumes
     Loaded: loaded (/usr/lib/systemd/system/integritysetup.target; static)
     Active: active since Tue 2026-01-27 07:47:06 UTC; 1h 35min ago
      Until: Tue 2026-01-27 07:47:06 UTC; 1h 35min ago
       Docs: man:systemd.special(7)

● local-fs-pre.target - Preparation for Local File Systems
     Loaded: loaded (/usr/lib/systemd/system/local-fs-pre.target; static)
     Active: active since Tue 2026-01-27 07:47:06 UTC; 1h 35min ago
      Until: Tue 2026-01-27 07:47:06 UTC; 1h 35min ago
       Docs: man:systemd.special(7)

Jan 27 07:47:06 localhost systemd[1]: Reached target Preparation for Local File Systems.

● local-fs.target - Local File Systems
     Loaded: loaded (/usr/lib/systemd/system/local-fs.target; static)
     Active: active since Tue 2026-01-27 07:47:06 UTC; 1h 35min ago
      Until: Tue 2026-01-27 07:47:06 UTC; 1h 35min ago
       Docs: man:systemd.special(7)

Jan 27 07:47:06 localhost systemd[1]: Reached target Local File Systems.

● multi-user.target - Multi-User System
     Loaded: loaded (/usr/lib/systemd/system/multi-user.target; indirect; preset: disabled)
     Active: active since Tue 2026-01-27 07:47:12 UTC; 1h 35min ago
      Until: Tue 2026-01-27 07:47:12 UTC; 1h 35min ago
       Docs: man:systemd.special(7)

Jan 27 07:47:12 np0005597077.novalocal systemd[1]: Reached target Multi-User System.

● network-online.target - Network is Online
     Loaded: loaded (/usr/lib/systemd/system/network-online.target; static)
     Active: active since Tue 2026-01-27 07:47:12 UTC; 1h 35min ago
      Until: Tue 2026-01-27 07:47:12 UTC; 1h 35min ago
       Docs: man:systemd.special(7)
             https://systemd.io/NETWORK_ONLINE

Jan 27 07:47:12 np0005597077.novalocal systemd[1]: Reached target Network is Online.

● network-pre.target - Preparation for Network
     Loaded: loaded (/usr/lib/systemd/system/network-pre.target; static)
     Active: active since Tue 2026-01-27 07:47:09 UTC; 1h 35min ago
      Until: Tue 2026-01-27 07:47:09 UTC; 1h 35min ago
       Docs: man:systemd.special(7)
             https://systemd.io/NETWORK_ONLINE

Jan 27 07:47:09 np0005597077.novalocal systemd[1]: Reached target Preparation for Network.

● network.target - Network
     Loaded: loaded (/usr/lib/systemd/system/network.target; static)
     Active: active since Tue 2026-01-27 07:47:09 UTC; 1h 35min ago
      Until: Tue 2026-01-27 07:47:09 UTC; 1h 35min ago
       Docs: man:systemd.special(7)
             https://systemd.io/NETWORK_ONLINE

Jan 27 07:47:09 np0005597077.novalocal systemd[1]: Reached target Network.

● nfs-client.target - NFS client services
     Loaded: loaded (/usr/lib/systemd/system/nfs-client.target; enabled; preset: disabled)
     Active: active since Tue 2026-01-27 07:47:09 UTC; 1h 35min ago
      Until: Tue 2026-01-27 07:47:09 UTC; 1h 35min ago

Jan 27 07:47:09 np0005597077.novalocal systemd[1]: Reached target NFS client services.

○ nss-lookup.target - Host and Network Name Lookups
     Loaded: loaded (/usr/lib/systemd/system/nss-lookup.target; static)
     Active: inactive (dead)
       Docs: man:systemd.special(7)

● nss-user-lookup.target - User and Group Name Lookups
     Loaded: loaded (/usr/lib/systemd/system/nss-user-lookup.target; static)
     Active: active since Tue 2026-01-27 07:47:07 UTC; 1h 35min ago
      Until: Tue 2026-01-27 07:47:07 UTC; 1h 35min ago
       Docs: man:systemd.special(7)

Jan 27 07:47:07 localhost systemd[1]: Reached target User and Group Name Lookups.

● paths.target - Path Units
     Loaded: loaded (/usr/lib/systemd/system/paths.target; static)
     Active: active since Tue 2026-01-27 07:47:06 UTC; 1h 35min ago
      Until: Tue 2026-01-27 07:47:06 UTC; 1h 35min ago
       Docs: man:systemd.special(7)

○ remote-cryptsetup.target - Remote Encrypted Volumes
     Loaded: loaded (/usr/lib/systemd/system/remote-cryptsetup.target; disabled; preset: enabled)
     Active: inactive (dead) since Tue 2026-01-27 07:47:05 UTC; 1h 35min ago
       Docs: man:systemd.special(7)

Jan 27 07:47:04 localhost systemd[1]: Reached target Remote Encrypted Volumes.
Jan 27 07:47:05 localhost systemd[1]: Stopped target Remote Encrypted Volumes.

● remote-fs-pre.target - Preparation for Remote File Systems
     Loaded: loaded (/usr/lib/systemd/system/remote-fs-pre.target; static)
     Active: active since Tue 2026-01-27 07:47:09 UTC; 1h 35min ago
      Until: Tue 2026-01-27 07:47:09 UTC; 1h 35min ago
       Docs: man:systemd.special(7)

Jan 27 07:47:09 np0005597077.novalocal systemd[1]: Reached target Preparation for Remote File Systems.

● remote-fs.target - Remote File Systems
     Loaded: loaded (/usr/lib/systemd/system/remote-fs.target; enabled; preset: enabled)
     Active: active since Tue 2026-01-27 07:47:09 UTC; 1h 35min ago
      Until: Tue 2026-01-27 07:47:09 UTC; 1h 35min ago
       Docs: man:systemd.special(7)

Jan 27 07:47:09 np0005597077.novalocal systemd[1]: Reached target Remote File Systems.

○ remote-veritysetup.target - Remote Verity Protected Volumes
     Loaded: loaded (/usr/lib/systemd/system/remote-veritysetup.target; diUnit syslog.target could not be found.
sabled; preset: disabled)
     Active: inactive (dead)
       Docs: man:systemd.special(7)

○ rescue.target - Rescue Mode
     Loaded: loaded (/usr/lib/systemd/system/rescue.target; static)
     Active: inactive (dead)
       Docs: man:systemd.special(7)

● rpc_pipefs.target
     Loaded: loaded (/usr/lib/systemd/system/rpc_pipefs.target; static)
     Active: active since Tue 2026-01-27 07:47:06 UTC; 1h 35min ago
      Until: Tue 2026-01-27 07:47:06 UTC; 1h 35min ago

● rpcbind.target - RPC Port Mapper
     Loaded: loaded (/usr/lib/systemd/system/rpcbind.target; static)
     Active: active since Tue 2026-01-27 07:47:06 UTC; 1h 35min ago
      Until: Tue 2026-01-27 07:47:06 UTC; 1h 35min ago
       Docs: man:systemd.special(7)

○ shutdown.target - System Shutdown
     Loaded: loaded (/usr/lib/systemd/system/shutdown.target; static)
     Active: inactive (dead)
       Docs: man:systemd.special(7)

● slices.target - Slice Units
     Loaded: loaded (/usr/lib/systemd/system/slices.target; static)
     Active: active since Tue 2026-01-27 07:47:06 UTC; 1h 35min ago
      Until: Tue 2026-01-27 07:47:06 UTC; 1h 35min ago
       Docs: man:systemd.special(7)

● sockets.target - Socket Units
     Loaded: loaded (/usr/lib/systemd/system/sockets.target; static)
     Active: active since Tue 2026-01-27 07:47:07 UTC; 1h 35min ago
      Until: Tue 2026-01-27 07:47:07 UTC; 1h 35min ago
       Docs: man:systemd.special(7)

Jan 27 07:47:07 localhost systemd[1]: Reached target Socket Units.

● sshd-keygen.target
     Loaded: loaded (/usr/lib/systemd/system/sshd-keygen.target; static)
     Active: active since Tue 2026-01-27 08:43:42 UTC; 39min ago
      Until: Tue 2026-01-27 08:43:42 UTC; 39min ago

Jan 27 08:43:42 compute-1 systemd[1]: Reached target sshd-keygen.target.

● swap.target - Swaps
     Loaded: loaded (/usr/lib/systemd/system/swap.target; static)
     Active: active since Tue 2026-01-27 07:47:06 UTC; 1h 35min ago
      Until: Tue 2026-01-27 07:47:06 UTC; 1h 35min ago
       Docs: man:systemd.special(7)

● sysinit.target - System Initialization
     Loaded: loaded (/usr/lib/systemd/system/sysinit.target; static)
     Active: active since Tue 2026-01-27 07:47:07 UTC; 1h 35min ago
      Until: Tue 2026-01-27 07:47:07 UTC; 1h 35min ago
       Docs: man:systemd.special(7)

Jan 27 07:47:07 localhost systemd[1]: Reached target System Initialization.

● time-set.target - System Time Set
     Loaded: loaded (/usr/lib/systemd/system/time-set.target; static)
     Active: active since Tue 2026-01-27 08:29:14 UTC; 53min ago
      Until: Tue 2026-01-27 08:29:14 UTC; 53min ago
       Docs: man:systemd.special(7)

Jan 27 08:29:14 compute-1 systemd[1]: Reached target System Time Set.

● time-sync.target - System Time Synchronized
     Loaded: loaded (/usr/lib/systemd/system/time-sync.target; static)
     Active: active since Tue 2026-01-27 08:29:14 UTC; 53min ago
      Until: Tue 2026-01-27 08:29:14 UTC; 53min ago
       Docs: man:systemd.special(7)

Jan 27 08:29:14 compute-1 systemd[1]: Reached target System Time Synchronized.

● timers.target - Timer Units
     Loaded: loaded (/usr/lib/systemd/system/timers.target; static)
     Active: active since Tue 2026-01-27 07:47:07 UTC; 1h 35min ago
      Until: Tue 2026-01-27 07:47:07 UTC; 1h 35min ago
       Docs: man:systemd.special(7)

Jan 27 07:47:07 localhost systemd[1]: Reached target Timer Units.

○ umount.target - Unmount All Filesystems
     Loaded: loaded (/usr/lib/systemd/system/umount.target; static)
     Active: inactive (dead)
       Docs: man:systemd.special(7)

○ veritysetup-pre.target - Local Verity Protected Volumes (Pre)
     Loaded: loaded (/usr/lib/systemd/system/veritysetup-pre.target; static)
     Active: inactive (dead)
       Docs: man:systemd.special(7)

● veritysetup.target - Local Verity Protected Volumes
     Loaded: loaded (/usr/lib/systemd/system/veritysetup.target; static)
     Active: active since Tue 2026-01-27 07:47:06 UTC; 1h 35min ago
      Until: Tue 2026-01-27 07:47:06 UTC; 1h 35min ago
       Docs: man:systemd.special(7)

○ virt-guest-shutdown.target - Libvirt guests shutdown
     Loaded: loaded (/etc/systemd/system/virt-guest-shutdown.target; static)
     Active: inactive (dead)
       Docs: https://libvirt.org

● d6ce9deaaecdb1627bbc6b1f3be04c03bb6975dd82af97ea8a1c73c9cca6abd1-309390b80c059bce.timer - /usr/bin/podman healthcheck run d6ce9deaaecdb1627bbc6b1f3be04c03bb6975dd82af97ea8a1c73c9cca6abd1
     Loaded: loaded (/run/systemd/transient/d6ce9deaaecdb1627bbc6b1f3be04c03bb6975dd82af97ea8a1c73c9cca6abd1-309390b80c059bce.timer; transient)
  Transient: yes
     Active: active (waiting) since Tue 2026-01-27 08:40:52 UTC; 42min ago
      Until: Tue 2026-01-27 08:40:52 UTC; 42min ago
    Trigger: Tue 2026-01-27 09:23:03 UTC; 4s left
   Triggers: ● d6ce9deaaecdb1627bbc6b1f3be04c03bb6975dd82af97ea8a1c73c9cca6abd1-309390b80c059bce.service

Jan 27 08:40:52 compute-1 systemd[1]: Started /usr/bin/podman healthcheck run d6ce9deaaecdb1627bbc6b1f3be04c03bb6975dd82af97ea8a1c73c9cca6abd1.

● dnf-makecache.timer - dnf makecache --timer
     Loaded: loaded (/usr/lib/systemd/system/dnf-makecache.timer; enabled; preset: enabled)
     Active: active (waiting) since Tue 2026-01-27 07:47:07 UTC; 1h 35min ago
      Until: Tue 2026-01-27 07:47:07 UTC; 1h 35min ago
    Trigger: Tue 2026-01-27 10:02:19 UTC; 39min left
   Triggers: ● dnf-makecache.service

Jan 27 07:47:07 localhost systemd[1]: Started dnf makecache --timer.

● f524c08bb2ae37ade38d0ec5ff5b0696715a77e37f29e059b5b993958b3ea20d-183c56ee99fd276c.timer - /usr/bin/podman healthcheck run f524c08bb2ae37ade38d0ec5ff5b0696715a77e37f29e059b5b993958b3ea20d
     Loaded: loaded (/run/systemd/transient/f524c08bb2ae37ade38d0ec5ff5b0696715a77e37f29e059b5b993958b3ea20d-183c56ee99fd276c.timer; transient)
  Transient: yes
     Active: active (waiting) since Tue 2026-01-27 08:39:38 UTC; 43min ago
      Until: Tue 2026-01-27 08:39:38 UTC; 43min ago
    Trigger: Tue 2026-01-27 09:23:25 UTC; 26s left
   Triggers: ● f524c08bb2ae37ade38d0ec5ff5b0696715a77e37f29e059b5b993958b3ea20d-183c56ee99fd276c.service

Jan 27 08:39:38 compute-1 systemd[1]: Started /usr/bin/podman healthcheck run f524c08bb2ae37ade38d0ec5ff5b0696715a77e37f29e059b5b993958b3ea20d.

● logrotate.timer - Daily rotation of log files
     Loaded: loaded (/usr/lib/systemd/system/logrotate.timer; enabled; preset: enabled)
     Active: active (waiting) since Tue 2026-01-27 07:47:07 UTC; 1h 35min ago
      Until: Tue 2026-01-27 07:47:07 UTC; 1h 35min ago
    Trigger: Wed 2026-01-28 00:00:00 UTC; 14h left
   Triggers: ● logrotate.service
       Docs: man:logrotate(8)
             man:logrotate.conf(5)

Jan 27 07:47:07 localhost systemd[1]: Started Daily rotation of log files.

○ raid-check.timer - Weekly RAID setup health check
     Loaded: loaded (/usr/lib/systemd/system/raid-check.timer; enabled; preset: enabled)
     Active: inactive (dead)
    Trigger: n/a
   Triggers: ● raid-check.service

○ sysstat-collect.timer - Run system activity accounting tool every 10 minutes
     Loaded: loaded (/usr/lib/systemd/system/sysstat-collect.timer; enabled; preset: disabled)
     Active: inactive (dead)
    Trigger: n/a
   Triggers: ● sysstat-collect.service

○ sysstat-summary.timer - Generate summary of yesterday's process accounting
     Loaded: loaded (/usr/lib/systemd/system/sysstat-summary.timer; enabled; preset: disabled)
     Active: inactive (dead)
    Trigger: n/a
   Triggers: ● sysstat-summary.service

● systemd-tmpfiles-clean.timer - Daily Cleanup of Temporary Directories
     Loaded: loaded (/usr/lib/systemd/system/systemd-tmpfiles-clean.timer; static)
     Active: active (waiting) since Tue 2026-01-27 07:47:07 UTC; 1h 35min ago
      Until: Tue 2026-01-27 07:47:07 UTC; 1h 35min ago
    Trigger: Wed 2026-01-28 08:02:13 UTC; 22h left
   Triggers: ● systemd-tmpfiles-clean.service
       Docs: man:tmpfiles.d(5)
             man:systemd-tmpfiles(8)

Jan 27 07:47:07 localhost systemd[1]: Started Daily Cleanup of Temporary Directories.

● unbound-anchor.timer - daily update of the root trust anchor for DNSSEC
     Loaded: loaded (/usr/lib/systemd/system/unbound-anchor.timer; enabled; preset: enabled)
     Active: active (waiting) since Tue 2026-01-27 08:22:21 UTC; 1h 0min ago
      Until: Tue 2026-01-27 08:22:21 UTC; 1h 0min ago
    Trigger: Wed 2026-01-28 00:00:00 UTC; 14h left
   Triggers: ● unbound-anchor.service
       Docs: man:unbound-anchor(8)

Jan 27 08:22:21 compute-1 systemd[1]: Started daily update of the root trust anchor for DNSSEC.
