● compute-0
    State: running
    Units: 451 loaded (incl. loaded aliases)
     Jobs: 0 queued
   Failed: 0 units
    Since: Sat 2026-01-31 06:24:58 UTC; 3h 12min ago
  systemd: 252-64.el9
   CGroup: /
           ├─449424 turbostat --debug sleep 10
           ├─449429 sleep 10
           ├─init.scope
           │ └─1 /usr/lib/systemd/systemd --switched-root --system --deserialize 31
           ├─machine.slice
           │ ├─libpod-16f3cf77eee09ab7709c67d820482fbb1b64354510093143a021a8f1ec2ccd87.scope
           │ │ └─container
           │ │   ├─247401 dumb-init --single-child -- kolla_start
           │ │   ├─247403 /usr/bin/python3 /usr/bin/nova-compute
           │ │   ├─253078 /usr/bin/python3 /bin/privsep-helper --config-file /etc/nova/nova.conf --config-file /etc/nova/nova-compute.conf --config-dir /etc/nova/nova.conf.d --privsep_context vif_plug_ovs.privsep.vif_plug --privsep_sock_path /tmp/tmpkn8xotgf/privsep.sock
           │ │   ├─254621 /usr/bin/python3 /bin/privsep-helper --config-file /etc/nova/nova.conf --config-file /etc/nova/nova-compute.conf --config-dir /etc/nova/nova.conf.d --privsep_context os_brick.privileged.default --privsep_sock_path /tmp/tmpuc93k029/privsep.sock
           │ │   └─307490 /usr/bin/python3 /bin/privsep-helper --config-file /etc/nova/nova.conf --config-file /etc/nova/nova-compute.conf --config-dir /etc/nova/nova.conf.d --privsep_context nova.privsep.sys_admin_pctxt --privsep_sock_path /tmp/tmp1z21xzi6/privsep.sock
           │ ├─libpod-1ae13fee90b46f5eb19bd9030483f3244c23c433b8f4e46eb5cc0bd2c6a35d52.scope
           │ │ └─container
           │ │   ├─159731 dumb-init --single-child -- kolla_start
           │ │   ├─159734 "neutron-ovn-metadata-agent (/usr/bin/python3 /usr/bin/neutron-ovn-metadata-agent)"
           │ │   ├─159995 "neutron-ovn-metadata-agent (/usr/bin/python3 /usr/bin/neutron-ovn-metadata-agent)"
           │ │   ├─160056 /usr/bin/python3 /bin/privsep-helper --config-file /etc/neutron/neutron.conf --config-dir /etc/neutron.conf.d --privsep_context neutron.privileged.namespace_cmd --privsep_sock_path /tmp/tmp07osgxqf/privsep.sock
           │ │   ├─253234 /usr/bin/python3 /bin/privsep-helper --config-file /etc/neutron/neutron.conf --config-dir /etc/neutron.conf.d --privsep_context neutron.privileged.default --privsep_sock_path /tmp/tmpnmaxlr7e/privsep.sock
           │ │   └─253297 /usr/bin/python3 /bin/privsep-helper --config-file /etc/neutron/neutron.conf --config-dir /etc/neutron.conf.d --privsep_context neutron.privileged.link_cmd --privsep_sock_path /tmp/tmpyqrzsz_e/privsep.sock
           │ └─libpod-d068e3ccc9a9ec2248f269659070439009ec5c33951d360f88b13647cd275613.scope
           │   └─container
           │     ├─149154 dumb-init --single-child -- kolla_start
           │     └─149157 /usr/bin/ovn-controller --pidfile unix:/run/openvswitch/db.sock -p /etc/pki/tls/private/ovndb.key -c /etc/pki/tls/certs/ovndb.crt -C /etc/pki/tls/certs/ovndbca.crt
           ├─system.slice
           │ ├─NetworkManager.service
           │ │ └─49013 /usr/sbin/NetworkManager --no-daemon
           │ ├─auditd.service
           │ │ ├─697 /sbin/auditd
           │ │ └─699 /usr/sbin/sedispatch
           │ ├─chronyd.service
           │ │ └─58578 /usr/sbin/chronyd -F 2
           │ ├─crond.service
           │ │ └─1002 /usr/sbin/crond -n
           │ ├─dbus-broker.service
           │ │ ├─791 /usr/bin/dbus-broker-launch --scope system --audit
           │ │ └─808 dbus-broker --log 4 --controller 9 --machine-id bf0bc0bb03de29b24cba1cc9599cf5d0 --max-bytes 536870912 --max-fds 4096 --max-matches 131072 --audit
           │ ├─edpm_nova_compute.service
           │ │ └─247399 /usr/bin/conmon --api-version 1 -c 16f3cf77eee09ab7709c67d820482fbb1b64354510093143a021a8f1ec2ccd87 -u 16f3cf77eee09ab7709c67d820482fbb1b64354510093143a021a8f1ec2ccd87 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/16f3cf77eee09ab7709c67d820482fbb1b64354510093143a021a8f1ec2ccd87/userdata -p /run/containers/storage/overlay-containers/16f3cf77eee09ab7709c67d820482fbb1b64354510093143a021a8f1ec2ccd87/userdata/pidfile -n nova_compute --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/16f3cf77eee09ab7709c67d820482fbb1b64354510093143a021a8f1ec2ccd87 --full-attach -s -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/16f3cf77eee09ab7709c67d820482fbb1b64354510093143a021a8f1ec2ccd87/userdata/oci-log --conmon-pidfile /run/nova_compute.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg 16f3cf77eee09ab7709c67d820482fbb1b64354510093143a021a8f1ec2ccd87
           │ ├─edpm_ovn_controller.service
           │ │ └─149152 /usr/bin/conmon --api-version 1 -c d068e3ccc9a9ec2248f269659070439009ec5c33951d360f88b13647cd275613 -u d068e3ccc9a9ec2248f269659070439009ec5c33951d360f88b13647cd275613 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/d068e3ccc9a9ec2248f269659070439009ec5c33951d360f88b13647cd275613/userdata -p /run/containers/storage/overlay-containers/d068e3ccc9a9ec2248f269659070439009ec5c33951d360f88b13647cd275613/userdata/pidfile -n ovn_controller --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/d068e3ccc9a9ec2248f269659070439009ec5c33951d360f88b13647cd275613 --full-attach -s -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/d068e3ccc9a9ec2248f269659070439009ec5c33951d360f88b13647cd275613/userdata/oci-log --conmon-pidfile /run/ovn_controller.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg d068e3ccc9a9ec2248f269659070439009ec5c33951d360f88b13647cd275613
           │ ├─edpm_ovn_metadata_agent.service
           │ │ └─159729 /usr/bin/conmon --api-version 1 -c 1ae13fee90b46f5eb19bd9030483f3244c23c433b8f4e46eb5cc0bd2c6a35d52 -u 1ae13fee90b46f5eb19bd9030483f3244c23c433b8f4e46eb5cc0bd2c6a35d52 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/1ae13fee90b46f5eb19bd9030483f3244c23c433b8f4e46eb5cc0bd2c6a35d52/userdata -p /run/containers/storage/overlay-containers/1ae13fee90b46f5eb19bd9030483f3244c23c433b8f4e46eb5cc0bd2c6a35d52/userdata/pidfile -n ovn_metadata_agent --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/1ae13fee90b46f5eb19bd9030483f3244c23c433b8f4e46eb5cc0bd2c6a35d52 --full-attach -s -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/1ae13fee90b46f5eb19bd9030483f3244c23c433b8f4e46eb5cc0bd2c6a35d52/userdata/oci-log --conmon-pidfile /run/ovn_metadata_agent.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg 1ae13fee90b46f5eb19bd9030483f3244c23c433b8f4e46eb5cc0bd2c6a35d52
           │ ├─gssproxy.service
           │ │ └─871 /usr/sbin/gssproxy -D
           │ ├─irqbalance.service
           │ │ └─813 /usr/sbin/irqbalance
           │ ├─iscsid.service
           │ │ └─230940 /usr/sbin/iscsid -f
           │ ├─multipathd.service
           │ │ └─231100 /sbin/multipathd -d -s
           │ ├─ovs-vswitchd.service
           │ │ └─47313 ovs-vswitchd unix:/var/run/openvswitch/db.sock -vconsole:emer -vsyslog:err -vfile:info --mlockall --user openvswitch:hugetlbfs --no-chdir --log-file=/var/log/openvswitch/ovs-vswitchd.log --pidfile=/var/run/openvswitch/ovs-vswitchd.pid --detach
           │ ├─ovsdb-server.service
           │ │ └─47231 ovsdb-server /etc/openvswitch/conf.db -vconsole:emer -vsyslog:err -vfile:info --remote=punix:/var/run/openvswitch/db.sock --private-key=db:Open_vSwitch,SSL,private_key --certificate=db:Open_vSwitch,SSL,certificate --bootstrap-ca-cert=db:Open_vSwitch,SSL,ca_cert --user openvswitch:hugetlbfs --no-chdir --log-file=/var/log/openvswitch/ovsdb-server.log --pidfile=/var/run/openvswitch/ovsdb-server.pid --detach
           │ ├─polkit.service
           │ │ └─43497 /usr/lib/polkit-1/polkitd --no-debug
           │ ├─rpcbind.service
           │ │ └─695 /usr/bin/rpcbind -w -f
           │ ├─rsyslog.service
           │ │ └─998 /usr/sbin/rsyslogd -n
           │ ├─sshd.service
           │ │ └─186742 "sshd: /usr/sbin/sshd -D [listener] 0 of 10-100 startups"
           │ ├─system-ceph\x2d2f5ab832\x2d5f2e\x2d5a84\x2dbd93\x2dcf8bab960ee2.slice
           │ │ ├─ceph-2f5ab832-5f2e-5a84-bd93-cf8bab960ee2@crash.compute-0.service
           │ │ │ ├─libpod-payload-287b42eb8058430f66d49fad6a858b93e85e13d224b0307dc6db988fdd517c44
           │ │ │ │ ├─81766 /run/podman-init -- /usr/bin/ceph-crash -n client.crash.compute-0
           │ │ │ │ └─81774 /usr/bin/python3 -s /usr/bin/ceph-crash -n client.crash.compute-0
           │ │ │ └─runtime
           │ │ │   └─81764 /usr/bin/conmon --api-version 1 -c 287b42eb8058430f66d49fad6a858b93e85e13d224b0307dc6db988fdd517c44 -u 287b42eb8058430f66d49fad6a858b93e85e13d224b0307dc6db988fdd517c44 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/287b42eb8058430f66d49fad6a858b93e85e13d224b0307dc6db988fdd517c44/userdata -p /run/containers/storage/overlay-containers/287b42eb8058430f66d49fad6a858b93e85e13d224b0307dc6db988fdd517c44/userdata/pidfile -n ceph-2f5ab832-5f2e-5a84-bd93-cf8bab960ee2-crash-compute-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/287b42eb8058430f66d49fad6a858b93e85e13d224b0307dc6db988fdd517c44 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/287b42eb8058430f66d49fad6a858b93e85e13d224b0307dc6db988fdd517c44/userdata/oci-log --conmon-pidfile /run/ceph-2f5ab832-5f2e-5a84-bd93-cf8bab960ee2@crash.compute-0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 287b42eb8058430f66d49fad6a858b93e85e13d224b0307dc6db988fdd517c44
           │ │ ├─ceph-2f5ab832-5f2e-5a84-bd93-cf8bab960ee2@haproxy.rgw.default.compute-0.evwczw.service
           │ │ │ ├─libpod-payload-e85bd24af30cc47f8586afc6e524047e12ba657e07d03a65812cdd8f8d12c0fe
           │ │ │ │ ├─96044 /run/podman-init -- docker-entrypoint.sh haproxy -f /var/lib/haproxy/haproxy.cfg
           │ │ │ │ ├─96046 haproxy -W -db -f /var/lib/haproxy/haproxy.cfg
           │ │ │ │ └─96048 haproxy -W -db -f /var/lib/haproxy/haproxy.cfg
           │ │ │ └─runtime
           │ │ │   └─96042 /usr/bin/conmon --api-version 1 -c e85bd24af30cc47f8586afc6e524047e12ba657e07d03a65812cdd8f8d12c0fe -u e85bd24af30cc47f8586afc6e524047e12ba657e07d03a65812cdd8f8d12c0fe -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/e85bd24af30cc47f8586afc6e524047e12ba657e07d03a65812cdd8f8d12c0fe/userdata -p /run/containers/storage/overlay-containers/e85bd24af30cc47f8586afc6e524047e12ba657e07d03a65812cdd8f8d12c0fe/userdata/pidfile -n ceph-2f5ab832-5f2e-5a84-bd93-cf8bab960ee2-haproxy-rgw-default-compute-0-evwczw --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/e85bd24af30cc47f8586afc6e524047e12ba657e07d03a65812cdd8f8d12c0fe --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/e85bd24af30cc47f8586afc6e524047e12ba657e07d03a65812cdd8f8d12c0fe/userdata/oci-log --conmon-pidfile /run/ceph-2f5ab832-5f2e-5a84-bd93-cf8bab960ee2@haproxy.rgw.default.compute-0.evwczw.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg e85bd24af30cc47f8586afc6e524047e12ba657e07d03a65812cdd8f8d12c0fe
           │ │ ├─ceph-2f5ab832-5f2e-5a84-bd93-cf8bab960ee2@keepalived.rgw.default.compute-0.wujrgc.service
           │ │ │ ├─libpod-payload-9f037d7b549dfea18e07bcd207334bdb6be640ac888fb6a410ce9d0db364b790
           │ │ │ │ ├─96458 /run/podman-init -- ./init.sh
           │ │ │ │ ├─96460 /usr/sbin/keepalived -n -l -f /etc/keepalived/keepalived.conf
           │ │ │ │ └─96462 /usr/sbin/keepalived -n -l -f /etc/keepalived/keepalived.conf
           │ │ │ └─runtime
           │ │ │   └─96456 /usr/bin/conmon --api-version 1 -c 9f037d7b549dfea18e07bcd207334bdb6be640ac888fb6a410ce9d0db364b790 -u 9f037d7b549dfea18e07bcd207334bdb6be640ac888fb6a410ce9d0db364b790 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/9f037d7b549dfea18e07bcd207334bdb6be640ac888fb6a410ce9d0db364b790/userdata -p /run/containers/storage/overlay-containers/9f037d7b549dfea18e07bcd207334bdb6be640ac888fb6a410ce9d0db364b790/userdata/pidfile -n ceph-2f5ab832-5f2e-5a84-bd93-cf8bab960ee2-keepalived-rgw-default-compute-0-wujrgc --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/9f037d7b549dfea18e07bcd207334bdb6be640ac888fb6a410ce9d0db364b790 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/9f037d7b549dfea18e07bcd207334bdb6be640ac888fb6a410ce9d0db364b790/userdata/oci-log --conmon-pidfile /run/ceph-2f5ab832-5f2e-5a84-bd93-cf8bab960ee2@keepalived.rgw.default.compute-0.wujrgc.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 9f037d7b549dfea18e07bcd207334bdb6be640ac888fb6a410ce9d0db364b790
           │ │ ├─ceph-2f5ab832-5f2e-5a84-bd93-cf8bab960ee2@mds.cephfs.compute-0.jroeqh.service
           │ │ │ ├─libpod-payload-41a77f8e2fff2b8ec08ee3239657a26d889a4c43656e169833ff12edd4843c0e
           │ │ │ │ ├─94916 /run/podman-init -- /usr/bin/ceph-mds -n mds.cephfs.compute-0.jroeqh -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
           │ │ │ │ └─94918 /usr/bin/ceph-mds -n mds.cephfs.compute-0.jroeqh -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
           │ │ │ └─runtime
           │ │ │   └─94914 /usr/bin/conmon --api-version 1 -c 41a77f8e2fff2b8ec08ee3239657a26d889a4c43656e169833ff12edd4843c0e -u 41a77f8e2fff2b8ec08ee3239657a26d889a4c43656e169833ff12edd4843c0e -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/41a77f8e2fff2b8ec08ee3239657a26d889a4c43656e169833ff12edd4843c0e/userdata -p /run/containers/storage/overlay-containers/41a77f8e2fff2b8ec08ee3239657a26d889a4c43656e169833ff12edd4843c0e/userdata/pidfile -n ceph-2f5ab832-5f2e-5a84-bd93-cf8bab960ee2-mds-cephfs-compute-0-jroeqh --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/41a77f8e2fff2b8ec08ee3239657a26d889a4c43656e169833ff12edd4843c0e --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/41a77f8e2fff2b8ec08ee3239657a26d889a4c43656e169833ff12edd4843c0e/userdata/oci-log --conmon-pidfile /run/ceph-2f5ab832-5f2e-5a84-bd93-cf8bab960ee2@mds.cephfs.compute-0.jroeqh.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 41a77f8e2fff2b8ec08ee3239657a26d889a4c43656e169833ff12edd4843c0e
           │ │ ├─ceph-2f5ab832-5f2e-5a84-bd93-cf8bab960ee2@mgr.compute-0.ddmhwk.service
           │ │ │ ├─libpod-payload-27d29d56922973df23bd6f1e58ddc4d731ae614392da047fc3969a22adc43583
           │ │ │ │ ├─74687 /run/podman-init -- /usr/bin/ceph-mgr -n mgr.compute-0.ddmhwk -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
           │ │ │ │ └─74689 /usr/bin/ceph-mgr -n mgr.compute-0.ddmhwk -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
           │ │ │ └─runtime
           │ │ │   └─74685 /usr/bin/conmon --api-version 1 -c 27d29d56922973df23bd6f1e58ddc4d731ae614392da047fc3969a22adc43583 -u 27d29d56922973df23bd6f1e58ddc4d731ae614392da047fc3969a22adc43583 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/27d29d56922973df23bd6f1e58ddc4d731ae614392da047fc3969a22adc43583/userdata -p /run/containers/storage/overlay-containers/27d29d56922973df23bd6f1e58ddc4d731ae614392da047fc3969a22adc43583/userdata/pidfile -n ceph-2f5ab832-5f2e-5a84-bd93-cf8bab960ee2-mgr-compute-0-ddmhwk --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/27d29d56922973df23bd6f1e58ddc4d731ae614392da047fc3969a22adc43583 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/27d29d56922973df23bd6f1e58ddc4d731ae614392da047fc3969a22adc43583/userdata/oci-log --conmon-pidfile /run/ceph-2f5ab832-5f2e-5a84-bd93-cf8bab960ee2@mgr.compute-0.ddmhwk.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 27d29d56922973df23bd6f1e58ddc4d731ae614392da047fc3969a22adc43583
           │ │ ├─ceph-2f5ab832-5f2e-5a84-bd93-cf8bab960ee2@mon.compute-0.service
           │ │ │ ├─libpod-payload-8a056797e460928a5aeedd3e2298bafe24e7f52c20f4a7487148750cf8988d86
           │ │ │ │ ├─74392 /run/podman-init -- /usr/bin/ceph-mon -n mon.compute-0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false
           │ │ │ │ └─74394 /usr/bin/ceph-mon -n mon.compute-0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false
           │ │ │ └─runtime
           │ │ │   └─74390 /usr/bin/conmon --api-version 1 -c 8a056797e460928a5aeedd3e2298bafe24e7f52c20f4a7487148750cf8988d86 -u 8a056797e460928a5aeedd3e2298bafe24e7f52c20f4a7487148750cf8988d86 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/8a056797e460928a5aeedd3e2298bafe24e7f52c20f4a7487148750cf8988d86/userdata -p /run/containers/storage/overlay-containers/8a056797e460928a5aeedd3e2298bafe24e7f52c20f4a7487148750cf8988d86/userdata/pidfile -n ceph-2f5ab832-5f2e-5a84-bd93-cf8bab960ee2-mon-compute-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/8a056797e460928a5aeedd3e2298bafe24e7f52c20f4a7487148750cf8988d86 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/8a056797e460928a5aeedd3e2298bafe24e7f52c20f4a7487148750cf8988d86/userdata/oci-log --conmon-pidfile /run/ceph-2f5ab832-5f2e-5a84-bd93-cf8bab960ee2@mon.compute-0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 8a056797e460928a5aeedd3e2298bafe24e7f52c20f4a7487148750cf8988d86
           │ │ ├─ceph-2f5ab832-5f2e-5a84-bd93-cf8bab960ee2@osd.0.service
           │ │ │ ├─libpod-payload-af2e3815ded07052013af44e961eb30d6aebcbabf404f7e8579600fe4cb398c1
           │ │ │ │ ├─84878 /run/podman-init -- /usr/bin/ceph-osd -n osd.0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
           │ │ │ │ └─84880 /usr/bin/ceph-osd -n osd.0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
           │ │ │ └─runtime
           │ │ │   └─84876 /usr/bin/conmon --api-version 1 -c af2e3815ded07052013af44e961eb30d6aebcbabf404f7e8579600fe4cb398c1 -u af2e3815ded07052013af44e961eb30d6aebcbabf404f7e8579600fe4cb398c1 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/af2e3815ded07052013af44e961eb30d6aebcbabf404f7e8579600fe4cb398c1/userdata -p /run/containers/storage/overlay-containers/af2e3815ded07052013af44e961eb30d6aebcbabf404f7e8579600fe4cb398c1/userdata/pidfile -n ceph-2f5ab832-5f2e-5a84-bd93-cf8bab960ee2-osd-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/af2e3815ded07052013af44e961eb30d6aebcbabf404f7e8579600fe4cb398c1 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/af2e3815ded07052013af44e961eb30d6aebcbabf404f7e8579600fe4cb398c1/userdata/oci-log --conmon-pidfile /run/ceph-2f5ab832-5f2e-5a84-bd93-cf8bab960ee2@osd.0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg af2e3815ded07052013af44e961eb30d6aebcbabf404f7e8579600fe4cb398c1
           │ │ └─ceph-2f5ab832-5f2e-5a84-bd93-cf8bab960ee2@rgw.rgw.compute-0.pnpmok.service
           │ │   ├─libpod-payload-3d2c60d2a43b72ac915708b2ea14f4ac8b5ff4660c01be5af30a821a7121dba9
           │ │   │ ├─94349 /run/podman-init -- /usr/bin/radosgw -n client.rgw.rgw.compute-0.pnpmok -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
           │ │   │ └─94351 /usr/bin/radosgw -n client.rgw.rgw.compute-0.pnpmok -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
           │ │   └─runtime
           │ │     └─94347 /usr/bin/conmon --api-version 1 -c 3d2c60d2a43b72ac915708b2ea14f4ac8b5ff4660c01be5af30a821a7121dba9 -u 3d2c60d2a43b72ac915708b2ea14f4ac8b5ff4660c01be5af30a821a7121dba9 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/3d2c60d2a43b72ac915708b2ea14f4ac8b5ff4660c01be5af30a821a7121dba9/userdata -p /run/containers/storage/overlay-containers/3d2c60d2a43b72ac915708b2ea14f4ac8b5ff4660c01be5af30a821a7121dba9/userdata/pidfile -n ceph-2f5ab832-5f2e-5a84-bd93-cf8bab960ee2-rgw-rgw-compute-0-pnpmok --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/3d2c60d2a43b72ac915708b2ea14f4ac8b5ff4660c01be5af30a821a7121dba9 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/3d2c60d2a43b72ac915708b2ea14f4ac8b5ff4660c01be5af30a821a7121dba9/userdata/oci-log --conmon-pidfile /run/ceph-2f5ab832-5f2e-5a84-bd93-cf8bab960ee2@rgw.rgw.compute-0.pnpmok.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 3d2c60d2a43b72ac915708b2ea14f4ac8b5ff4660c01be5af30a821a7121dba9
           │ ├─system-getty.slice
           │ │ └─getty@tty1.service
           │ │   └─1003 /sbin/agetty -o "-p -- \\u" --noclear - linux
           │ ├─system-serial\x2dgetty.slice
           │ │ └─serial-getty@ttyS0.service
           │ │   └─1004 /sbin/agetty -o "-p -- \\u" --keep-baud 115200,57600,38400,9600 - vt220
           │ ├─systemd-hostnamed.service
           │ │ └─444949 /usr/lib/systemd/systemd-hostnamed
           │ ├─systemd-journald.service
           │ │ └─674 /usr/lib/systemd/systemd-journald
           │ ├─systemd-logind.service
           │ │ └─818 /usr/lib/systemd/systemd-logind
           │ ├─systemd-machined.service
           │ │ └─212769 /usr/lib/systemd/systemd-machined
           │ ├─systemd-udevd.service
           │ │ └─udev
           │ │   └─725 /usr/lib/systemd/systemd-udevd
           │ ├─tuned.service
           │ │ └─108256 /usr/bin/python3 -Es /usr/sbin/tuned -l -P
           │ ├─virtlogd.service
           │ │ └─212140 /usr/sbin/virtlogd
           │ ├─virtnodedevd.service
           │ │ └─247723 /usr/sbin/virtnodedevd --timeout 120
           │ ├─virtqemud.service
           │ │ └─247123 /usr/sbin/virtqemud --timeout 120
           │ └─virtsecretd.service
           │   └─253144 /usr/sbin/virtsecretd --timeout 120
           └─user.slice
             ├─user-1000.slice
             │ ├─session-1.scope
             │ │ └─4518 /usr/bin/python3
             │ ├─session-78.scope
             │ │ ├─442577 "sshd-session: zuul [priv]"
             │ │ ├─442580 "sshd-session: zuul@notty"
             │ │ ├─442581 sudo bash -c "rm -rf /var/tmp/sos-osp && mkdir /var/tmp/sos-osp && sos report --batch --aUnit boot.automount could not be found.
ll-logs --tmp-dir=/var/tmp/sos-osp  -p container,openstack_edpm,system,storage,virt"
             │ │ ├─442605 /usr/bin/python3 -s /sbin/sos report --batch --all-logs --tmp-dir=/var/tmp/sos-osp -p container,openstack_edpm,system,storage,virt
             │ │ ├─449422 timeout 15s turbostat --debug sleep 10
             │ │ ├─449773 timeout 300s ceph osd dump --format json-pretty
             │ │ ├─449774 /usr/bin/python3 -s /usr/bin/ceph osd dump --format json-pretty
             │ │ ├─449775 timeout 300s semanage interface -l
             │ │ ├─449776 /usr/bin/python3 -EsI /usr/sbin/semanage interface -l
             │ │ ├─449779 timeout 300s systemctl status --all
             │ │ └─449781 systemctl status --all
             │ └─user@1000.service
             │   ├─app.slice
             │   │ └─dbus-broker.service
             │   │   ├─20820 /usr/bin/dbus-broker-launch --scope user
             │   │   └─20831 dbus-broker --log 4 --controller 9 --machine-id bf0bc0bb03de29b24cba1cc9599cf5d0 --max-bytes 100000000000000 --max-fds 25000000000000 --max-matches 5000000000
             │   ├─init.scope
             │   │ ├─4306 /usr/lib/systemd/systemd --user
             │   │ └─4308 "(sd-pam)"
             │   └─user.slice
             │     └─podman-pause-d63f6fad.scope
             │       └─20760 catatonit -P
             └─user-42477.slice
               ├─session-21.scope
               │ ├─76056 "sshd-session: ceph-admin [priv]"
               │ └─76078 "sshd-session: ceph-admin"
               ├─session-23.scope
               │ ├─76073 "sshd-session: ceph-admin [priv]"
               │ └─76079 "sshd-session: ceph-admin@notty"
               ├─session-24.scope
               │ ├─76130 "sshd-session: ceph-admin [priv]"
               │ └─76133 "sshd-session: ceph-admin@notty"
               ├─session-25.scope
               │ ├─76184 "sshd-session: ceph-admin [priv]"
               │ └─76187 "sshd-session: ceph-admin@notty"
               ├─session-26.scope
               │ ├─76238 "sshd-session: ceph-admin [priv]"
               │ └─76241 "sshd-session: ceph-admin@notty"
               ├─session-27.scope
               │ ├─76292 "sshd-session: ceph-admin [priv]"
               │ └─76295 "sshd-session: ceph-admin@notty"
               ├─session-28.scope
               │ ├─76346 "sshd-session: ceph-admin [priv]"
               │ └─76349 "sshd-session: ceph-admin@notty"
               ├─session-29.scope
               │ ├─76400 "sshd-session: ceph-admin [priv]"
               │ └─76403 "sshd-session: ceph-admin@notty"
               ├─session-30.scope
               │ ├─76454 "sshd-session: ceph-admin [priv]"
               │ └─76457 "sshd-session: ceph-admin@notty"
               ├─session-31.scope
               │ ├─76508 "sshd-session: ceph-admin [priv]"
               │ └─76511 "sshd-session: ceph-admin@notty"
               ├─session-32.scope
               │ ├─76535 "sshd-session: ceph-admin [priv]"
               │ └─76538 "sshd-session: ceph-admin@notty"
               ├─session-33.scope
               │ ├─76589 "sshd-session: ceph-admin [priv]"
               │ └─76592 "sshd-session: ceph-admin@notty"
               └─user@42477.service
                 └─init.scope
                   ├─76060 /usr/lib/systemd/systemd --user
                   └─76062 "(sd-pam)"

● proc-sys-fs-binfmt_misc.automount - Arbitrary Executable File Formats File System Automount Point
     Loaded: loaded (/usr/lib/systemd/system/proc-sys-fs-binfmt_misc.automount; static)
     Active: active (running) since Sat 2026-01-31 06:24:59 UTC; 3h 12min ago
      Until: Sat 2026-01-31 06:24:59 UTC; 3h 12min ago
   Triggers: ● proc-sys-fs-binfmt_misc.mount
      Where: /proc/sys/fs/binfmt_misc
       Docs: https://docs.kernel.org/admin-guide/binfmt-misc.html
             https://www.freedesktop.org/wiki/Software/systemd/APIFileSystems

Jan 31 07:20:55 compute-0 systemd[1]: proc-sys-fs-binfmt_misc.automount: Got automount request for /proc/sys/fs/binfmt_misc, triggered by 77500 (sysctl)

● dev-cdrom.device - QEMU_DVD-ROM config-2
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:01.1-ata1-host0-target0:0:0-0:0:0:0-block-sr0.device
     Loaded: loaded
     Active: active (plugged) since Sat 2026-01-31 06:25:00 UTC; 3h 11min ago
      Until: Sat 2026-01-31 06:25:00 UTC; 3h 11min ago
     Device: /sys/devices/pci0000:00/0000:00:01.1/ata1/host0/target0:0:0/0:0:0:0/block/sr0

● dev-ceph_vg0-ceph_lv0.device - /dev/ceph_vg0/ceph_lv0
    Follows: unit currently follows state of sys-devices-virtual-block-dm\x2d0.device
     Loaded: loaded
     Active: active (plugged) since Sat 2026-01-31 07:19:13 UTC; 2h 17min ago
      Until: Sat 2026-01-31 07:19:13 UTC; 2h 17min ago
     Device: /sys/devices/virtual/block/dm-0

● dev-disk-by\x2ddiskseq-1.device - /dev/disk/by-diskseq/1
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:04.0-virtio2-block-vda.device
     Loaded: loaded
     Active: active (plugged) since Sat 2026-01-31 06:25:00 UTC; 3h 11min ago
      Until: Sat 2026-01-31 06:25:00 UTC; 3h 11min ago
     Device: /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda

● dev-disk-by\x2ddiskseq-3.device - QEMU_DVD-ROM config-2
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:01.1-ata1-host0-target0:0:0-0:0:0:0-block-sr0.device
     Loaded: loaded
     Active: active (plugged) since Sat 2026-01-31 06:25:00 UTC; 3h 11min ago
      Until: Sat 2026-01-31 06:25:00 UTC; 3h 11min ago
     Device: /sys/devices/pci0000:00/0000:00:01.1/ata1/host0/target0:0:0/0:0:0:0/block/sr0

● dev-disk-by\x2ddiskseq-5.device - /dev/disk/by-diskseq/5
    Follows: unit currently follows state of sys-devices-virtual-block-loop3.device
     Loaded: loaded
     Active: active (plugged) since Sat 2026-01-31 07:19:13 UTC; 2h 17min ago
      Until: Sat 2026-01-31 07:19:13 UTC; 2h 17min ago
     Device: /sys/devices/virtual/block/loop3

● dev-disk-by\x2ddiskseq-6.device - /dev/disk/by-diskseq/6
    Follows: unit currently follows state of sys-devices-virtual-block-dm\x2d0.device
     Loaded: loaded
     Active: active (plugged) since Sat 2026-01-31 07:19:13 UTC; 2h 17min ago
      Until: Sat 2026-01-31 07:19:13 UTC; 2h 17min ago
     Device: /sys/devices/virtual/block/dm-0

● dev-disk-by\x2did-ata\x2dQEMU_DVD\x2dROM_QM00001.device - QEMU_DVD-ROM config-2
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:01.1-ata1-host0-target0:0:0-0:0:0:0-block-sr0.device
     Loaded: loaded
     Active: active (plugged) since Sat 2026-01-31 06:25:00 UTC; 3h 11min ago
      Until: Sat 2026-01-31 06:25:00 UTC; 3h 11min ago
     Device: /sys/devices/pci0000:00/0000:00:01.1/ata1/host0/target0:0:0/0:0:0:0/block/sr0

● dev-disk-by\x2did-dm\x2dname\x2dceph_vg0\x2dceph_lv0.device - /dev/disk/by-id/dm-name-ceph_vg0-ceph_lv0
    Follows: unit currently follows state of sys-devices-virtual-block-dm\x2d0.device
     Loaded: loaded
     Active: active (plugged) since Sat 2026-01-31 07:19:13 UTC; 2h 17min ago
      Until: Sat 2026-01-31 07:19:13 UTC; 2h 17min ago
     Device: /sys/devices/virtual/block/dm-0

● dev-disk-by\x2did-dm\x2duuid\x2dLVM\x2dIv7mxa8ZXtHuu3cqqU7qvSzYTOczksJrV17ZLe6JhHtcmOnjuAy31Pd8EnSx6fGh.device - /dev/disk/by-id/dm-uuid-LVM-Iv7mxa8ZXtHuu3cqqU7qvSzYTOczksJrV17ZLe6JhHtcmOnjuAy31Pd8EnSx6fGh
    Follows: unit currently follows state of sys-devices-virtual-block-dm\x2d0.device
     Loaded: loaded
     Active: active (plugged) since Sat 2026-01-31 07:19:13 UTC; 2h 17min ago
      Until: Sat 2026-01-31 07:19:13 UTC; 2h 17min ago
     Device: /sys/devices/virtual/block/dm-0

● dev-disk-by\x2did-lvm\x2dpv\x2duuid\x2dWI9xcg\x2db50E\x2df1rK\x2dywOK\x2d610u\x2dU47Q\x2dWBKBy9.device - /dev/disk/by-id/lvm-pv-uuid-WI9xcg-b50E-f1rK-ywOK-610u-U47Q-WBKBy9
    Follows: unit currently follows state of sys-devices-virtual-block-loop3.device
     Loaded: loaded
     Active: active (plugged) since Sat 2026-01-31 07:19:13 UTC; 2h 17min ago
      Until: Sat 2026-01-31 07:19:13 UTC; 2h 17min ago
     Device: /sys/devices/virtual/block/loop3

● dev-disk-by\x2dlabel-config\x2d2.device - QEMU_DVD-ROM config-2
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:01.1-ata1-host0-target0:0:0-0:0:0:0-block-sr0.device
     Loaded: loaded
     Active: active (plugged) since Sat 2026-01-31 06:25:00 UTC; 3h 11min ago
      Until: Sat 2026-01-31 06:25:00 UTC; 3h 11min ago
     Device: /sys/devices/pci0000:00/0000:00:01.1/ata1/host0/target0:0:0/0:0:0:0/block/sr0

● dev-disk-by\x2dpartuuid-714daac1\x2d01.device - /dev/disk/by-partuuid/714daac1-01
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:04.0-virtio2-block-vda-vda1.device
     Loaded: loaded
     Active: active (plugged) since Sat 2026-01-31 06:25:00 UTC; 3h 11min ago
      Until: Sat 2026-01-31 06:25:00 UTC; 3h 11min ago
     Device: /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda/vda1

● dev-disk-by\x2dpath-pci\x2d0000:00:01.1\x2data\x2d1.0.device - QEMU_DVD-ROM config-2
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:01.1-ata1-host0-target0:0:0-0:0:0:0-block-sr0.device
     Loaded: loaded
     Active: active (plugged) since Sat 2026-01-31 06:25:00 UTC; 3h 11min ago
      Until: Sat 2026-01-31 06:25:00 UTC; 3h 11min ago
     Device: /sys/devices/pci0000:00/0000:00:01.1/ata1/host0/target0:0:0/0:0:0:0/block/sr0

● dev-disk-by\x2dpath-pci\x2d0000:00:01.1\x2data\x2d1.device - QEMU_DVD-ROM config-2
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:01.1-ata1-host0-target0:0:0-0:0:0:0-block-sr0.device
     Loaded: loaded
     Active: active (plugged) since Sat 2026-01-31 06:25:00 UTC; 3h 11min ago
      Until: Sat 2026-01-31 06:25:00 UTC; 3h 11min ago
     Device: /sys/devices/pci0000:00/0000:00:01.1/ata1/host0/target0:0:0/0:0:0:0/block/sr0

● dev-disk-by\x2dpath-pci\x2d0000:00:04.0.device - /dev/disk/by-path/pci-0000:00:04.0
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:04.0-virtio2-block-vda.device
     Loaded: loaded
     Active: active (plugged) since Sat 2026-01-31 06:25:00 UTC; 3h 11min ago
      Until: Sat 2026-01-31 06:25:00 UTC; 3h 11min ago
     Device: /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda

● dev-disk-by\x2dpath-pci\x2d0000:00:04.0\x2dpart1.device - /dev/disk/by-path/pci-0000:00:04.0-part1
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:04.0-virtio2-block-vda-vda1.device
     Loaded: loaded
     Active: active (plugged) since Sat 2026-01-31 06:25:00 UTC; 3h 11min ago
      Until: Sat 2026-01-31 06:25:00 UTC; 3h 11min ago
     Device: /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda/vda1

● dev-disk-by\x2dpath-virtio\x2dpci\x2d0000:00:04.0.device - /dev/disk/by-path/virtio-pci-0000:00:04.0
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:04.0-virtio2-block-vda.device
     Loaded: loaded
     Active: active (plugged) since Sat 2026-01-31 06:25:00 UTC; 3h 11min ago
      Until: Sat 2026-01-31 06:25:00 UTC; 3h 11min ago
     Device: /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda

● dev-disk-by\x2dpath-virtio\x2dpci\x2d0000:00:04.0\x2dpart1.device - /dev/disk/by-path/virtio-pci-0000:00:04.0-part1
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:04.0-virtio2-block-vda-vda1.device
     Loaded: loaded
     Active: active (plugged) since Sat 2026-01-31 06:25:00 UTC; 3h 11min ago
      Until: Sat 2026-01-31 06:25:00 UTC; 3h 11min ago
     Device: /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda/vda1

● dev-disk-by\x2duuid-2026\x2d01\x2d31\x2d06\x2d24\x2d43\x2d00.device - QEMU_DVD-ROM config-2
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:01.1-ata1-host0-target0:0:0-0:0:0:0-block-sr0.device
     Loaded: loaded
     Active: active (plugged) since Sat 2026-01-31 06:25:00 UTC; 3h 11min ago
      Until: Sat 2026-01-31 06:25:00 UTC; 3h 11min ago
     Device: /sys/devices/pci0000:00/0000:00:01.1/ata1/host0/target0:0:0/0:0:0:0/block/sr0

● dev-disk-by\x2duuid-822f14ea\x2d6e7e\x2d41df\x2db0d8\x2dfbe282d9ded8.device - /dev/disk/by-uuid/822f14ea-6e7e-41df-b0d8-fbe282d9ded8
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:04.0-virtio2-block-vda-vda1.device
     Loaded: loaded
     Active: active (plugged) since Sat 2026-01-31 06:25:00 UTC; 3h 11min ago
      Until: Sat 2026-01-31 06:25:00 UTC; 3h 11min ago
     Device: /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda/vda1

Jan 31 06:24:57 localhost systemd[1]: Found device /dev/disk/by-uuid/822f14ea-6e7e-41df-b0d8-fbe282d9ded8.

● dev-dm\x2d0.device - /dev/dm-0
    Follows: unit currently follows state of sys-devices-virtual-block-dm\x2d0.device
     Loaded: loaded
     Active: active (plugged) since Sat 2026-01-31 07:19:13 UTC; 2h 17min ago
      Until: Sat 2026-01-31 07:19:13 UTC; 2h 17min ago
     Device: /sys/devices/virtual/block/dm-0

● dev-loop3.device - /dev/loop3
    Follows: unit currently follows state of sys-devices-virtual-block-loop3.device
     Loaded: loaded
     Active: active (plugged) since Sat 2026-01-31 07:19:13 UTC; 2h 17min ago
      Until: Sat 2026-01-31 07:19:13 UTC; 2h 17min ago
     Device: /sys/devices/virtual/block/loop3

● dev-mapper-ceph_vg0\x2dceph_lv0.device - /dev/mapper/ceph_vg0-ceph_lv0
    Follows: unit currently follows state of sys-devices-virtual-block-dm\x2d0.device
     Loaded: loaded
     Active: active (plugged) since Sat 2026-01-31 07:19:13 UTC; 2h 17min ago
      Until: Sat 2026-01-31 07:19:13 UTC; 2h 17min ago
     Device: /sys/devices/virtual/block/dm-0

● dev-rfkill.device - /dev/rfkill
    Follows: unit currently follows state of sys-devices-virtual-misc-rfkill.device
     Loaded: loaded
     Active: active (plugged) since Sat 2026-01-31 06:25:01 UTC; 3h 11min ago
      Until: Sat 2026-01-31 06:25:01 UTC; 3h 11min ago
     Device: /sys/devices/virtual/misc/rfkill

● dev-sr0.device - QEMU_DVD-ROM config-2
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:01.1-ata1-host0-target0:0:0-0:0:0:0-block-sr0.device
     Loaded: loaded
     Active: active (plugged) since Sat 2026-01-31 06:25:00 UTC; 3h 11min ago
      Until: Sat 2026-01-31 06:25:00 UTC; 3h 11min ago
     Device: /sys/devices/pci0000:00/0000:00:01.1/ata1/host0/target0:0:0/0:0:0:0/block/sr0

● dev-ttyS0.device - /dev/ttyS0
    Follows: unit currently follows state of sys-devices-pnp0-00:00-tty-ttyS0.device
     Loaded: loaded
     Active: active (plugged) since Sat 2026-01-31 06:25:00 UTC; 3h 11min ago
      Until: Sat 2026-01-31 06:25:00 UTC; 3h 11min ago
     Device: /sys/devices/pnp0/00:00/tty/ttyS0

● dev-ttyS1.device - /dev/ttyS1
    Follows: unit currently follows state of sys-devices-platform-serial8250-tty-ttyS1.device
     Loaded: loaded
     Active: active (plugged) since Sat 2026-01-31 06:25:00 UTC; 3h 11min ago
      Until: Sat 2026-01-31 06:25:00 UTC; 3h 11min ago
     Device: /sys/devices/platform/serial8250/tty/ttyS1

● dev-ttyS2.device - /dev/ttyS2
    Follows: unit currently follows state of sys-devices-platform-serial8250-tty-ttyS2.device
     Loaded: loaded
     Active: active (plugged) since Sat 2026-01-31 06:25:00 UTC; 3h 11min ago
      Until: Sat 2026-01-31 06:25:00 UTC; 3h 11min ago
     Device: /sys/devices/platform/serial8250/tty/ttyS2

● dev-ttyS3.device - /dev/ttyS3
    Follows: unit currently follows state of sys-devices-platform-serial8250-tty-ttyS3.device
     Loaded: loaded
     Active: active (plugged) since Sat 2026-01-31 06:25:00 UTC; 3h 11min ago
      Until: Sat 2026-01-31 06:25:00 UTC; 3h 11min ago
     Device: /sys/devices/platform/serial8250/tty/ttyS3

● dev-vda.device - /dev/vda
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:04.0-virtio2-block-vda.device
     Loaded: loaded
     Active: active (plugged) since Sat 2026-01-31 06:25:00 UTC; 3h 11min ago
      Until: Sat 2026-01-31 06:25:00 UTC; 3h 11min ago
     Device: /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda

● dev-vda1.device - /dev/vda1
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:04.0-virtio2-block-vda-vda1.device
     Loaded: loaded
     Active: active (plugged) since Sat 2026-01-31 06:25:00 UTC; 3h 11min ago
      Until: Sat 2026-01-31 06:25:00 UTC; 3h 11min ago
     Device: /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda/vda1

● sys-devices-pci0000:00-0000:00:01.1-ata1-host0-target0:0:0-0:0:0:0-block-sr0.device - QEMU_DVD-ROM config-2
     Loaded: loaded
     Active: active (plugged) since Sat 2026-01-31 06:25:00 UTC; 3h 11min ago
      Until: Sat 2026-01-31 06:25:00 UTC; 3h 11min ago
     Device: /sys/devices/pci0000:00/0000:00:01.1/ata1/host0/target0:0:0/0:0:0:0/block/sr0

● sys-devices-pci0000:00-0000:00:03.0-virtio1-net-eth0.device - Virtio network device
     Loaded: loaded
     Active: active (plugged) since Sat 2026-01-31 06:25:00 UTC; 3h 11min ago
      Until: Sat 2026-01-31 06:25:00 UTC; 3h 11min ago
     Device: /sys/devices/pci0000:00/0000:00:03.0/virtio1/net/eth0

● sys-devices-pci0000:00-0000:00:04.0-virtio2-block-vda-vda1.device - /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda/vda1
     Loaded: loaded
     Active: active (plugged) since Sat 2026-01-31 06:25:00 UTC; 3h 11min ago
      Until: Sat 2026-01-31 06:25:00 UTC; 3h 11min ago
     Device: /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda/vda1

● sys-devices-pci0000:00-0000:00:04.0-virtio2-block-vda.device - /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda
     Loaded: loaded
     Active: active (plugged) since Sat 2026-01-31 06:25:00 UTC; 3h 11min ago
      Until: Sat 2026-01-31 06:25:00 UTC; 3h 11min ago
     Device: /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda

● sys-devices-pci0000:00-0000:00:07.0-virtio5-net-eth1.device - Virtio network device
     Loaded: loaded
     Active: active (plugged) since Sat 2026-01-31 06:33:33 UTC; 3h 3min ago
      Until: Sat 2026-01-31 06:33:33 UTC; 3h 3min ago
     Device: /sys/devices/pci0000:00/0000:00:07.0/virtio5/net/eth1

● sys-devices-platform-serial8250-tty-ttyS1.device - /sys/devices/platform/serial8250/tty/ttyS1
     Loaded: loaded
     Active: active (plugged) since Sat 2026-01-31 06:25:00 UTC; 3h 11min ago
      Until: Sat 2026-01-31 06:25:00 UTC; 3h 11min ago
     Device: /sys/devices/platform/serial8250/tty/ttyS1

● sys-devices-platform-serial8250-tty-ttyS2.device - /sys/devices/platform/serial8250/tty/ttyS2
     Loaded: loaded
     Active: active (plugged) since Sat 2026-01-31 06:25:00 UTC; 3h 11min ago
      Until: Sat 2026-01-31 06:25:00 UTC; 3h 11min ago
     Device: /sys/devices/platform/serial8250/tty/ttyS2

● sys-devices-platform-serial8250-tty-ttyS3.device - /sys/devices/platform/serial8250/tty/ttyS3
     Loaded: loaded
     Active: active (plugged) since Sat 2026-01-31 06:25:00 UTC; 3h 11min ago
      Until: Sat 2026-01-31 06:25:00 UTC; 3h 11min ago
     Device: /sys/devices/platform/serial8250/tty/ttyS3

● sys-devices-pnp0-00:00-tty-ttyS0.device - /sys/devices/pnp0/00:00/tty/ttyS0
     Loaded: loaded
     Active: active (plugged) since Sat 2026-01-31 06:25:00 UTC; 3h 11min ago
      Until: Sat 2026-01-31 06:25:00 UTC; 3h 11min ago
     Device: /sys/devices/pnp0/00:00/tty/ttyS0

● sys-devices-virtual-block-dm\x2d0.device - /sys/devices/virtual/block/dm-0
     Loaded: loaded
     Active: active (plugged) since Sat 2026-01-31 07:19:13 UTC; 2h 17min ago
      Until: Sat 2026-01-31 07:19:13 UTC; 2h 17min ago
     Device: /sys/devices/virtual/block/dm-0

● sys-devices-virtual-block-loop3.device - /sys/devices/virtual/block/loop3
     Loaded: loaded
     Active: active (plugged) since Sat 2026-01-31 07:19:13 UTC; 2h 17min ago
      Until: Sat 2026-01-31 07:19:13 UTC; 2h 17min ago
     Device: /sys/devices/virtual/block/loop3

● sys-devices-virtual-misc-rfkill.device - /sys/devices/virtual/misc/rfkill
     Loaded: loaded
     Active: active (plugged) since Sat 2026-01-31 06:25:01 UTC; 3h 11min ago
      Until: Sat 2026-01-31 06:25:01 UTC; 3h 11min ago
     Device: /sys/devices/virtual/misc/rfkill

● sys-devices-virtual-net-br\x2dex.device - /sys/devices/virtual/net/br-ex
     Loaded: loaded
     Active: active (plugged) since Sat 2026-01-31 07:15:59 UTC; 2h 21min ago
      Until: Sat 2026-01-31 07:15:59 UTC; 2h 21min ago
     Device: /sys/devices/virtual/net/br-ex

● sys-devices-virtual-net-br\x2dint.device - /sys/devices/virtual/net/br-int
     Loaded: loaded
     Active: active (plugged) since Sat 2026-01-31 07:31:19 UTC; 2h 5min ago
      Until: Sat 2026-01-31 07:31:19 UTC; 2h 5min ago
     Device: /sys/devices/virtual/net/br-int

● sys-devices-virtual-net-genev_sys_6081.device - /sys/devices/virtual/net/genev_sys_6081
     Loaded: loaded
     Active: active (plugged) since Sat 2026-01-31 07:31:19 UTC; 2h 5min ago
      Until: Sat 2026-01-31 07:31:19 UTC; 2h 5min ago
     Device: /sys/devices/virtual/net/genev_sys_6081

● sys-devices-virtual-net-ovs\x2dsystem.device - /sys/devices/virtual/net/ovs-system
     Loaded: loaded
     Active: active (plugged) since Sat 2026-01-31 07:15:58 UTC; 2h 21min ago
      Until: Sat 2026-01-31 07:15:58 UTC; 2h 21min ago
     Device: /sys/devices/virtual/net/ovs-system

● sys-devices-virtual-net-vlan20.device - /sys/devices/virtual/net/vlan20
     Loaded: loaded
     Active: active (plugged) since Sat 2026-01-31 07:15:59 UTC; 2h 21min ago
      Until: Sat 2026-01-31 07:15:59 UTC; 2h 21min ago
     Device: /sys/devices/virtual/net/vlan20

● sys-devices-virtual-net-vlan21.device - /sys/devices/virtual/net/vlan21
     Loaded: loaded
     Active: active (plugged) since Sat 2026-01-31 07:15:59 UTC; 2h 21min ago
      Until: Sat 2026-01-31 07:15:59 UTC; 2h 21min ago
     Device: /sys/devices/virtual/net/vlan21

● sys-devices-virtual-net-vlan22.device - /sys/devices/virtual/net/vlan22
     Loaded: loaded
     Active: active (plugged) since Sat 2026-01-31 07:15:59 UTC; 2h 21min ago
      Until: Sat 2026-01-31 07:15:59 UTC; 2h 21min ago
     Device: /sys/devices/virtual/net/vlan22

● sys-devices-virtual-net-vlan23.device - /sys/devices/virtual/net/vlan23
     Loaded: loaded
     Active: active (plugged) since Sat 2026-01-31 07:15:59 UTC; 2h 21min ago
      Until: Sat 2026-01-31 07:15:59 UTC; 2h 21min ago
     Device: /sys/devices/virtual/net/vlan23

● sys-module-configfs.device - /sys/module/configfs
     Loaded: loaded
     Active: active (plugged) since Sat 2026-01-31 06:25:00 UTC; 3h 11min ago
      Until: Sat 2026-01-31 06:25:00 UTC; 3h 11min ago
     Device: /sys/module/configfs

● sys-module-fuse.device - /sys/module/fuse
     Loaded: loaded
     Active: active (plugged) since Sat 2026-01-31 06:25:00 UTC; 3h 11min ago
      Until: Sat 2026-01-31 06:25:00 UTC; 3h 11min ago
     Device: /sys/module/fuse

● sys-subsystem-net-devices-br\x2dex.device - /sys/subsystem/net/devices/br-ex
     Loaded: loaded
     Active: active (plugged) since Sat 2026-01-31 07:15:59 UTC; 2h 21min ago
      Until: Sat 2026-01-31 07:15:59 UTC; 2h 21min ago
     Device: /sys/devices/virtual/net/br-ex

● sys-subsystem-net-devices-br\x2dint.device - /sys/subsystem/net/devices/br-int
     Loaded: loaded
     Active: active (plugged) since Sat 2026-01-31 07:31:19 UTC; 2h 5min ago
      Until: Sat 2026-01-31 07:31:19 UTC; 2h 5min ago
     Device: /sys/devices/virtual/net/br-int

● sys-subsystem-net-devices-eth0.device - Virtio network device
     Loaded: loaded
     Active: active (plugged) since Sat 2026-01-31 06:25:00 UTC; 3h 11min ago
      Until: Sat 2026-01-31 06:25:00 UTC; 3h 11min ago
     Device: /sys/devices/pci0000:00/0000:00:03.0/virtio1/net/eth0

● sys-subsystem-net-devices-eth1.device - Virtio network device
     Loaded: loaded
     Active: active (plugged) since Sat 2026-01-31 06:33:33 UTC; 3h 3min ago
      Until: Sat 2026-01-31 06:33:33 UTC; 3h 3min ago
     Device: /sys/devices/pci0000:00/0000:00:07.0/virtio5/net/eth1

● sys-subsystem-net-devices-genev_sys_6081.device - /sys/subsystem/net/devices/genev_sys_6081
     Loaded: loaded
     Active: active (plugged) since Sat 2Unit boot.mount could not be found.
Unit home.mount could not be found.
026-01-31 07:31:19 UTC; 2h 5min ago
      Until: Sat 2026-01-31 07:31:19 UTC; 2h 5min ago
     Device: /sys/devices/virtual/net/genev_sys_6081

● sys-subsystem-net-devices-ovs\x2dsystem.device - /sys/subsystem/net/devices/ovs-system
     Loaded: loaded
     Active: active (plugged) since Sat 2026-01-31 07:15:58 UTC; 2h 21min ago
      Until: Sat 2026-01-31 07:15:58 UTC; 2h 21min ago
     Device: /sys/devices/virtual/net/ovs-system

● sys-subsystem-net-devices-vlan20.device - /sys/subsystem/net/devices/vlan20
     Loaded: loaded
     Active: active (plugged) since Sat 2026-01-31 07:15:59 UTC; 2h 21min ago
      Until: Sat 2026-01-31 07:15:59 UTC; 2h 21min ago
     Device: /sys/devices/virtual/net/vlan20

● sys-subsystem-net-devices-vlan21.device - /sys/subsystem/net/devices/vlan21
     Loaded: loaded
     Active: active (plugged) since Sat 2026-01-31 07:15:59 UTC; 2h 21min ago
      Until: Sat 2026-01-31 07:15:59 UTC; 2h 21min ago
     Device: /sys/devices/virtual/net/vlan21

● sys-subsystem-net-devices-vlan22.device - /sys/subsystem/net/devices/vlan22
     Loaded: loaded
     Active: active (plugged) since Sat 2026-01-31 07:15:59 UTC; 2h 21min ago
      Until: Sat 2026-01-31 07:15:59 UTC; 2h 21min ago
     Device: /sys/devices/virtual/net/vlan22

● sys-subsystem-net-devices-vlan23.device - /sys/subsystem/net/devices/vlan23
     Loaded: loaded
     Active: active (plugged) since Sat 2026-01-31 07:15:59 UTC; 2h 21min ago
      Until: Sat 2026-01-31 07:15:59 UTC; 2h 21min ago
     Device: /sys/devices/virtual/net/vlan23

● -.mount - Root Mount
     Loaded: loaded (/etc/fstab; generated)
     Active: active (mounted)
      Where: /
       What: /dev/vda1
       Docs: man:fstab(5)
             man:systemd-fstab-generator(8)

● dev-hugepages.mount - Huge Pages File System
     Loaded: loaded (/usr/lib/systemd/system/dev-hugepages.mount; static)
     Active: active (mounted) since Sat 2026-01-31 06:24:59 UTC; 3h 12min ago
      Until: Sat 2026-01-31 06:24:59 UTC; 3h 12min ago
      Where: /dev/hugepages
       What: hugetlbfs
       Docs: https://docs.kernel.org/admin-guide/mm/hugetlbpage.html
             https://www.freedesktop.org/wiki/Software/systemd/APIFileSystems
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48560)
     Memory: 52.0K (peak: 556.0K)
        CPU: 4ms
     CGroup: /dev-hugepages.mount

● dev-hugepages1G.mount - /dev/hugepages1G
     Loaded: loaded (/etc/fstab; generated)
     Active: active (mounted) since Sat 2026-01-31 07:18:18 UTC; 2h 18min ago
      Until: Sat 2026-01-31 07:18:18 UTC; 2h 18min ago
      Where: /dev/hugepages1G
       What: none
       Docs: man:fstab(5)
             man:systemd-fstab-generator(8)

● dev-hugepages2M.mount - /dev/hugepages2M
     Loaded: loaded (/etc/fstab; generated)
     Active: active (mounted) since Sat 2026-01-31 07:18:18 UTC; 2h 18min ago
      Until: Sat 2026-01-31 07:18:18 UTC; 2h 18min ago
      Where: /dev/hugepages2M
       What: none
       Docs: man:fstab(5)
             man:systemd-fstab-generator(8)

● dev-mqueue.mount - POSIX Message Queue File System
     Loaded: loaded (/usr/lib/systemd/system/dev-mqueue.mount; static)
     Active: active (mounted) since Sat 2026-01-31 06:24:59 UTC; 3h 12min ago
      Until: Sat 2026-01-31 06:24:59 UTC; 3h 12min ago
      Where: /dev/mqueue
       What: mqueue
       Docs: man:mq_overview(7)
             https://www.freedesktop.org/wiki/Software/systemd/APIFileSystems
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48560)
     Memory: 4.0K (peak: 540.0K)
        CPU: 3ms
     CGroup: /dev-mqueue.mount

○ proc-fs-nfsd.mount - NFSD configuration filesystem
     Loaded: loaded (/usr/lib/systemd/system/proc-fs-nfsd.mount; static)
     Active: inactive (dead)
      Where: /proc/fs/nfsd
       What: nfsd

● proc-sys-fs-binfmt_misc.mount - Arbitrary Executable File Formats File System
     Loaded: loaded (/usr/lib/systemd/system/proc-sys-fs-binfmt_misc.mount; disabled; preset: disabled)
     Active: active (mounted) since Sat 2026-01-31 07:20:56 UTC; 2h 16min ago
      Until: Sat 2026-01-31 07:20:56 UTC; 2h 16min ago
TriggeredBy: ● proc-sys-fs-binfmt_misc.automount
      Where: /proc/sys/fs/binfmt_misc
       What: binfmt_misc
       Docs: https://docs.kernel.org/admin-guide/binfmt-misc.html
             https://www.freedesktop.org/wiki/Software/systemd/APIFileSystems
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48560)
     Memory: 8.0K (peak: 540.0K)
        CPU: 5ms
     CGroup: /proc-sys-fs-binfmt_misc.mount

Jan 31 07:20:56 compute-0 systemd[1]: Mounting Arbitrary Executable File Formats File System...
Jan 31 07:20:56 compute-0 systemd[1]: Mounted Arbitrary Executable File Formats File System.

● run-credentials-systemd\x2dsysctl.service.mount - /run/credentials/systemd-sysctl.service
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Sat 2026-01-31 07:14:40 UTC; 2h 22min ago
      Until: Sat 2026-01-31 07:14:40 UTC; 2h 22min ago
      Where: /run/credentials/systemd-sysctl.service
       What: none

● run-credentials-systemd\x2dsysusers.service.mount - /run/credentials/systemd-sysusers.service
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Sat 2026-01-31 06:24:59 UTC; 3h 12min ago
      Until: Sat 2026-01-31 06:24:59 UTC; 3h 12min ago
      Where: /run/credentials/systemd-sysusers.service
       What: none

● run-credentials-systemd\x2dtmpfiles\x2dsetup.service.mount - /run/credentials/systemd-tmpfiles-setup.service
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Sat 2026-01-31 06:25:00 UTC; 3h 11min ago
      Until: Sat 2026-01-31 06:25:00 UTC; 3h 11min ago
      Where: /run/credentials/systemd-tmpfiles-setup.service
       What: none

● run-credentials-systemd\x2dtmpfiles\x2dsetup\x2ddev.service.mount - /run/credentials/systemd-tmpfiles-setup-dev.service
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Sat 2026-01-31 06:24:59 UTC; 3h 12min ago
      Until: Sat 2026-01-31 06:24:59 UTC; 3h 12min ago
      Where: /run/credentials/systemd-tmpfiles-setup-dev.service
       What: none

● run-netns.mount - /run/netns
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Sat 2026-01-31 07:17:44 UTC; 2h 19min ago
      Until: Sat 2026-01-31 07:17:44 UTC; 2h 19min ago
      Where: /run/netns
       What: tmpfs

● run-user-1000.mount - /run/user/1000
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Sat 2026-01-31 06:31:00 UTC; 3h 5min ago
      Until: Sat 2026-01-31 06:31:00 UTC; 3h 5min ago
      Where: /run/user/1000
       What: tmpfs

● run-user-42477.mount - /run/user/42477
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Sat 2026-01-31 07:20:43 UTC; 2h 16min ago
      Until: Sat 2026-01-31 07:20:43 UTC; 2h 16min ago
      Where: /run/user/42477
       What: tmpfs

● sys-fs-fuse-connections.mount - FUSE Control File System
     Loaded: loaded (/usr/lib/systemd/system/sys-fs-fuse-connections.mount; static)
     Active: active (mounted) since Sat 2026-01-31 06:24:59 UTC; 3h 12min ago
      Until: Sat 2026-01-31 06:24:59 UTC; 3h 12min ago
      Where: /sys/fs/fuse/connections
       What: fusectl
       Docs: https://docs.kernel.org/filesystems/fuse.html
             https://www.freedesktop.org/wiki/Software/systemd/APIFileSystems
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48560)
     Memory: 4.0K (peak: 540.0K)
        CPU: 2ms
     CGroup: /sys-fs-fuse-connections.mount

● sys-kernel-config.mount - Kernel Configuration File System
     Loaded: loaded (/proc/self/mountinfo; static)
     Active: active (mounted) since Sat 2026-01-31 06:24:59 UTC; 3h 12min ago
      Until: Sat 2026-01-31 06:24:59 UTC; 3h 12min ago
      Where: /sys/kernel/config
       What: configfs
       Docs: https://docs.kernel.org/filesystems/configfs.html
             https://www.freedesktop.org/wiki/Software/systemd/APIFileSystems

● sys-kernel-debug-tracing.mount - /sys/kernel/debug/tracing
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Sat 2026-01-31Unit sysroot.mount could not be found.
 09:20:12 UTC; 16min ago
      Until: Sat 2026-01-31 09:20:12 UTC; 16min ago
      Where: /sys/kernel/debug/tracing
       What: tracefs

● sys-kernel-debug.mount - Kernel Debug File System
     Loaded: loaded (/usr/lib/systemd/system/sys-kernel-debug.mount; static)
     Active: active (mounted) since Sat 2026-01-31 06:24:59 UTC; 3h 12min ago
      Until: Sat 2026-01-31 06:24:59 UTC; 3h 12min ago
      Where: /sys/kernel/debug
       What: debugfs
       Docs: https://docs.kernel.org/filesystems/debugfs.html
             https://www.freedesktop.org/wiki/Software/systemd/APIFileSystems
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48560)
     Memory: 4.0K (peak: 540.0K)
        CPU: 1ms
     CGroup: /sys-kernel-debug.mount

● sys-kernel-tracing.mount - Kernel Trace File System
     Loaded: loaded (/usr/lib/systemd/system/sys-kernel-tracing.mount; static)
     Active: active (mounted) since Sat 2026-01-31 06:24:59 UTC; 3h 12min ago
      Until: Sat 2026-01-31 06:24:59 UTC; 3h 12min ago
      Where: /sys/kernel/tracing
       What: tracefs
       Docs: https://docs.kernel.org/trace/ftrace.html
             https://www.freedesktop.org/wiki/Software/systemd/APIFileSystems
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48560)
     Memory: 4.0K (peak: 544.0K)
        CPU: 1ms
     CGroup: /sys-kernel-tracing.mount

○ tmp.mount - Temporary Directory /tmp
     Loaded: loaded (/usr/lib/systemd/system/tmp.mount; disabled; preset: disabled)
     Active: inactive (dead)
      Where: /tmp
       What: tmpfs
       Docs: https://systemd.io/TEMPORARY_DIRECTORIES
             man:file-hierarchy(7)
             https://www.freedesktop.org/wiki/Software/systemd/APIFileSystems

● var-lib-containers-storage-overlay-434702c433a94fed853448d3da05cbf9280c7e9f6751da3dd50abb345b6d0e73-merged.mount - /var/lib/containers/storage/overlay/434702c433a94fed853448d3da05cbf9280c7e9f6751da3dd50abb345b6d0e73/merged
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Sat 2026-01-31 07:23:24 UTC; 2h 13min ago
      Until: Sat 2026-01-31 07:23:24 UTC; 2h 13min ago
      Where: /var/lib/containers/storage/overlay/434702c433a94fed853448d3da05cbf9280c7e9f6751da3dd50abb345b6d0e73/merged
       What: overlay

● var-lib-containers-storage-overlay-56776b456fbbd22c6ccc926ba646f22d38cc6cae41edd11817bf4fc2e9ec4db0-merged.mount - /var/lib/containers/storage/overlay/56776b456fbbd22c6ccc926ba646f22d38cc6cae41edd11817bf4fc2e9ec4db0/merged
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Sat 2026-01-31 07:31:18 UTC; 2h 5min ago
      Until: Sat 2026-01-31 07:31:18 UTC; 2h 5min ago
      Where: /var/lib/containers/storage/overlay/56776b456fbbd22c6ccc926ba646f22d38cc6cae41edd11817bf4fc2e9ec4db0/merged
       What: overlay

● var-lib-containers-storage-overlay-5aa5d5fd38d4a5d883a1897d5f6ba37531cdc343e7f1449bd4f729cb9c6aba5a-merged.mount - /var/lib/containers/storage/overlay/5aa5d5fd38d4a5d883a1897d5f6ba37531cdc343e7f1449bd4f729cb9c6aba5a/merged
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Sat 2026-01-31 07:19:50 UTC; 2h 17min ago
      Until: Sat 2026-01-31 07:19:50 UTC; 2h 17min ago
      Where: /var/lib/containers/storage/overlay/5aa5d5fd38d4a5d883a1897d5f6ba37531cdc343e7f1449bd4f729cb9c6aba5a/merged
       What: overlay

● var-lib-containers-storage-overlay-7b8c8448a93ae2267cb316f4191314782982849de7758266ed44098b2c37ed00-merged.mount - /var/lib/containers/storage/overlay/7b8c8448a93ae2267cb316f4191314782982849de7758266ed44098b2c37ed00/merged
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Sat 2026-01-31 07:21:59 UTC; 2h 14min ago
      Until: Sat 2026-01-31 07:21:59 UTC; 2h 14min ago
      Where: /var/lib/containers/storage/overlay/7b8c8448a93ae2267cb316f4191314782982849de7758266ed44098b2c37ed00/merged
       What: overlay

● var-lib-containers-storage-overlay-8542ac82b5c85be570ed379312009f278265d22f19e187c58c78e043bbcbb587-merged.mount - /var/lib/containers/storage/overlay/8542ac82b5c85be570ed379312009f278265d22f19e187c58c78e043bbcbb587/merged
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Sat 2026-01-31 07:21:14 UTC; 2h 15min ago
      Until: Sat 2026-01-31 07:21:14 UTC; 2h 15min ago
      Where: /var/lib/containers/storage/overlay/8542ac82b5c85be570ed379312009f278265d22f19e187c58c78e043bbcbb587/merged
       What: overlay

● var-lib-containers-storage-overlay-88e384f85db41244ef040f6e286250dda74f0b80329a42a697d92a582124d8ad-merged.mount - /var/lib/containers/storage/overlay/88e384f85db41244ef040f6e286250dda74f0b80329a42a697d92a582124d8ad/merged
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Sat 2026-01-31 07:23:29 UTC; 2h 13min ago
      Until: Sat 2026-01-31 07:23:29 UTC; 2h 13min ago
      Where: /var/lib/containers/storage/overlay/88e384f85db41244ef040f6e286250dda74f0b80329a42a697d92a582124d8ad/merged
       What: overlay

● var-lib-containers-storage-overlay-a37d3e66217641a68a15506f8b733a614fa76e9f2714dda89488ea10d9ad13c5-merged.mount - /var/lib/containers/storage/overlay/a37d3e66217641a68a15506f8b733a614fa76e9f2714dda89488ea10d9ad13c5/merged
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Sat 2026-01-31 07:23:20 UTC; 2h 13min ago
      Until: Sat 2026-01-31 07:23:20 UTC; 2h 13min ago
      Where: /var/lib/containers/storage/overlay/a37d3e66217641a68a15506f8b733a614fa76e9f2714dda89488ea10d9ad13c5/merged
       What: overlay

● var-lib-containers-storage-overlay-bd869002e8b8a3382f2de97b10877aa2b31a62c26e451dfbbaca94ae5ce6e9e9-merged.mount - /var/lib/containers/storage/overlay/bd869002e8b8a3382f2de97b10877aa2b31a62c26e451dfbbaca94ae5ce6e9e9/merged
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Sat 2026-01-31 07:19:48 UTC; 2h 17min ago
      Until: Sat 2026-01-31 07:19:48 UTC; 2h 17min ago
      Where: /var/lib/containers/storage/overlay/bd869002e8b8a3382f2de97b10877aa2b31a62c26e451dfbbaca94ae5ce6e9e9/merged
       What: overlay

● var-lib-containers-storage-overlay-d5700937b5df4b0ada5a5a64acd02557c30327e3c17146608882ad64e4c9145c-merged.mount - /var/lib/containers/storage/overlay/d5700937b5df4b0ada5a5a64acd02557c30327e3c17146608882ad64e4c9145c/merged
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Sat 2026-01-31 07:40:00 UTC; 1h 56min ago
      Until: Sat 2026-01-31 07:40:00 UTC; 1h 56min ago
      Where: /var/lib/containers/storage/overlay/d5700937b5df4b0ada5a5a64acd02557c30327e3c17146608882ad64e4c9145c/merged
       What: overlay

● var-lib-containers-storage-overlay-d957c390eafe5c7e51f1f2a29a3f2cfc4db809d1d248b9d6ce6764e2a4e8b0e3-merged.mount - /var/lib/containers/storage/overlay/d957c390eafe5c7e51f1f2a29a3f2cfc4db809d1d248b9d6ce6764e2a4e8b0e3/merged
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Sat 2026-01-31 07:23:37 UTC; 2h 13min ago
      Until: Sat 2026-01-31 07:23:37 UTC; 2h 13min ago
      Where: /var/lib/containers/storage/overlay/d957c390eafe5c7e51f1f2a29a3f2cfc4db809d1d248b9d6ce6764e2a4e8b0e3/merged
       What: overlay

● var-lib-containers-storage-overlay-f9ac0e02931eacf6eed2f2c4c6b1921bb30c2aafd54d5acd018b9c101edb32e6-merged.mount - /var/lib/containers/storage/overlay/f9ac0e02931eacf6eed2f2c4c6b1921bb30c2aafd54d5acd018b9c101edb32e6/merged
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Sat 2026-01-31 07:32:28 UTC; 2h 4min ago
      Until: Sat 2026-01-31 07:32:28 UTC; 2h 4min ago
      Where: /var/lib/containers/storage/overlay/f9ac0e02931eacf6eed2f2c4c6b1921bb30c2aafd54d5acd018b9c101edb32e6/merged
       What: overlay

● var-lib-containers-storage-overlay.mount - /var/lib/containers/storage/overlay
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Sat 2026-01-31 07:19:48 UTC; 2h 17min ago
      Until: Sat 2026-01-31 07:19:48 UTC; 2h 17min ago
      Where: /var/lib/containers/storage/overlay
       What: /dev/vda1

● var-lib-containers-storage-overlay\x2dcontainers-16f3cf77eee09ab7709c67d820482fbb1b64354510093143a021a8f1ec2ccd87-userdata-shm.mount - /var/lib/containers/storage/overlay-containers/16f3cf77eee09ab7709c67d820482fbb1b64354510093143a021a8f1ec2ccd87/userdata/shm
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Sat 2026-01-31 07:39:59 UTC; 1h 56min ago
      Until: Sat 2026-01-31 07:39:59 UTC; 1h 56min ago
      Where: /var/lib/containers/storage/overlay-containers/16f3cf77eee09ab7709c67d820482fbb1b64354510093143a021a8f1ec2ccd87/userdata/shm
       What: shm

● var-lib-containers-storage-overlay\x2dcontainers-1ae13fee90b46f5eb19bd9030483f3244c23c433b8f4e46eb5cc0bd2c6a35d52-userdata-shm.mount - /var/lib/containers/storage/overlay-containers/1ae13fee90b46f5eb19bd9030483f3244c23c433b8f4e46eb5cc0bd2c6a35d52/userdata/shm
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Sat 2026-01-31 07:32:28 UTC; 2h 4min ago
      Until: Sat 2026-01-31 07:32:28 UTC; 2h 4min ago
      Where: /var/lib/containers/storage/overlay-containers/1ae13fee90b46f5eb19bd9030483f3244c23c433b8f4e46eb5cc0bd2c6a35d52/userdata/shm
       What: shm

● var-lib-containers-storage-overlay\x2dcontainers-d068e3ccc9a9ec2248f269659070439009ec5c33951d360f88b13647cd275613-userdata-shm.mount - /var/lib/containers/storage/overlay-containers/d068e3ccc9a9ec2248f269659070439009ec5c33951d360f88b13647cd275613/userdata/shm
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Sat 2026-01-31 07:31:18 UTC; 2h 5min ago
      Until: Sat 2026-01-31 07:31:18 UTC; 2h 5min ago
      Where: /var/lib/containers/storage/overlay-containers/d068e3ccc9a9ec2248f269659070439009ec5c33951d360f88b13647cd275613/userdata/shm
       What: shm

○ var-lib-machines.mount - Virtual Machine and Container Storage (Compatibility)
     Loaded: loaded (/usr/lib/systemd/system/var-lib-machines.mount; static)
     Active: inactive (dead)
  Condition: start condition failed at Sat 2026-01-31 07:36:17 UTC; 2h 0min ago
      Where: /var/lib/machines
       What: /var/lib/machines.raw

Jan 31 07:36:17 compute-0 systemd[1]: Virtual Machine and Container Storage (Compatibility) was skipped because of an unmet condition check (ConditionPathExists=/var/lib/machines.raw).

● var-lib-nfs-rpc_pipefs.mount - RPC Pipe File System
     Loaded: loaded (/proc/self/mountinfo; static)
     Active: active (mounted) since Sat 2026-01-31 06:24:59 UTC; 3h 12min ago
      Until: Sat 2026-01-31 06:24:59 UTC; 3h 12min ago
      Where: /var/lib/nfs/rpc_pipefs
       What: rpc_pipefs

● systemd-ask-password-console.path - Dispatch Password Requests to Console Directory Watch
     Loaded: loaded (/usr/lib/systemd/system/systemd-ask-password-console.path; static)
     Active: active (waiting) since Sat 2026-01-31 06:24:59 UTC; 3h 12min ago
      Until: Sat 2026-01-31 06:24:59 UTC; 3h 12min ago
   Triggers: ● systemd-ask-password-console.service
       Docs: man:systemd-ask-password-console.path(8)

● systemd-ask-password-wall.path - Forward Password Requests to Wall Directory Watch
     Loaded: loaded (/usr/lib/systemd/system/systemd-ask-password-wall.path; static)
     Active: active (waiting) since Sat 2026-01-31 06:24:59 UTC; 3h 12min ago
      Until: Sat 2026-01-31 06:24:59 UTC; 3h 12min ago
   Triggers: ● systemd-ask-password-wall.service
       Docs: man:systemd-ask-password-wall.path(8)

● init.scope - System and Service Manager
     Loaded: loaded
  Transient: yes
     Active: active (running) since Sat 2026-01-31 06:24:55 UTC; 3h 12min ago
       Docs: man:systemd(1)
         IO: 1016.0K read, 0B written
      Tasks: 1 (limit: 48560)
     Memory: 46.6M (peak: 66.3M)
        CPU: 1min 10.175s
     CGroup: /init.scope
             └─1 /usr/lib/systemd/systemd --switched-root --system --deserialize 31

Jan 31 09:36:52 compute-0 systemd[1]: Started libpod-conmon-8c5a3ca73977d107763f6ed286ee477512d39185c4dce9219f645fb98284ffef.scope.
Jan 31 09:36:52 compute-0 systemd[1]: Started libcrun container.
Jan 31 09:36:52 compute-0 systemd[1]: libpod-8c5a3ca73977d107763f6ed286ee477512d39185c4dce9219f645fb98284ffef.scope: Deactivated successfully.
Jan 31 09:36:52 compute-0 systemd[1]: var-lib-containers-storage-overlay-2e5e6ad8884dbbeef499e54d58bcad7c5080f9ece4332a0f13d317225b91af9a-merged.mount: Deactivated successfully.
Jan 31 09:36:52 compute-0 systemd[1]: libpod-conmon-8c5a3ca73977d107763f6ed286ee477512d39185c4dce9219f645fb98284ffef.scope: Deactivated successfully.
Jan 31 09:36:53 compute-0 systemd[1]: Started libpod-conmon-52d0c0ac81b961a974a80e50ed2724c529d249c83767a41c57b0fe1b833d3e0d.scope.
Jan 31 09:36:53 compute-0 systemd[1]: Started libcrun container.
Jan 31 09:36:53 compute-0 systemd[1]: libpod-52d0c0ac81b961a974a80e50ed2724c529d249c83767a41c57b0fe1b833d3e0d.scope: Deactivated successfully.
Jan 31 09:36:53 compute-0 systemd[1]: var-lib-containers-storage-overlay-62330815f9f7a6a73d1693c90e625546c3e1011796b61560a5984393623fe9e2-merged.mount: Deactivated successfully.
Jan 31 09:36:54 compute-0 systemd[1]: libpod-conmon-52d0c0ac81b961a974a80e50ed2724c529d249c83767a41c57b0fe1b833d3e0d.scope: Deactivated successfully.

● libpod-16f3cf77eee09ab7709c67d820482fbb1b64354510093143a021a8f1ec2ccd87.scope - libcrun container
     Loaded: loaded (/run/systemd/transient/libpod-16f3cf77eee09ab7709c67d820482fbb1b64354510093143a021a8f1ec2ccd87.scope; transient)
  Transient: yes
     Active: active (running) since Sat 2026-01-31 07:40:00 UTC; 1h 56min ago
         IO: 59.4M read, 85.0M written
      Tasks: 30 (limit: 4096)
     Memory: 792.5M (peak: 905.2M)
        CPU: 7min 42.648s
     CGroup: /machine.slice/libpod-16f3cf77eee09ab7709c67d820482fbb1b64354510093143a021a8f1ec2ccd87.scope
             └─container
               ├─247401 dumb-init --single-child -- kolla_start
               ├─247403 /usr/bin/python3 /usr/bin/nova-compute
               ├─253078 /usr/bin/python3 /bin/privsep-helper --config-file /etc/nova/nova.conf --config-file /etc/nova/nova-compute.conf --config-dir /etc/nova/nova.conf.d --privsep_context vif_plug_ovs.privsep.vif_plug --privsep_sock_path /tmp/tmpkn8xotgf/privsep.sock
               ├─254621 /usr/bin/python3 /bin/privsep-helper --config-file /etc/nova/nova.conf --config-file /etc/nova/nova-compute.conf --config-dir /etc/nova/nova.conf.d --privsep_context os_brick.privileged.default --privsep_sock_path /tmp/tmpuc93k029/privsep.sock
               └─307490 /usr/bin/python3 /bin/privsep-helper --config-file /etc/nova/nova.conf --config-file /etc/nova/nova-compute.conf --config-dir /etc/nova/nova.conf.d --privsep_context nova.privsep.sys_admin_pctxt --privsep_sock_path /tmp/tmp1z21xzi6/privsep.sock

Jan 31 07:40:00 compute-0 systemd[1]: Started libcrun container.

● libpod-1ae13fee90b46f5eb19bd9030483f3244c23c433b8f4e46eb5cc0bd2c6a35d52.scope - libcrun container
     Loaded: loaded (/run/systemd/transient/libpod-1ae13fee90b46f5eb19bd9030483f3244c23c433b8f4e46eb5cc0bd2c6a35d52.scope; transient)
  Transient: yes
    Drop-In: /run/systemd/transient/libpod-1ae13fee90b46f5eb19bd9030483f3244c23c433b8f4e46eb5cc0bd2c6a35d52.scope.d
             └─dep.conf
     Active: active (running) since Sat 2026-01-31 07:32:28 UTC; 2h 4min ago
         IO: 23.2M read, 44.0M written
      Tasks: 11 (limit: 4096)
     Memory: 453.9M (peak: 518.5M)
        CPU: 1min 51.149s
     CGroup: /machine.slice/libpod-1ae13fee90b46f5eb19bd9030483f3244c23c433b8f4e46eb5cc0bd2c6a35d52.scope
             └─container
               ├─159731 dumb-init --single-child -- kolla_start
               ├─159734 "neutron-ovn-metadata-agent (/usr/bin/python3 /usr/bin/neutron-ovn-metadata-agent)"
               ├─159995 "neutron-ovn-metadata-agent (/usr/bin/python3 /usr/bin/neutron-ovn-metadata-agent)"
               ├─160056 /usr/bin/python3 /bin/privsep-helper --config-file /etc/neutron/neutron.conf --config-dir /etc/neutron.conf.d --privsep_context neutron.privileged.namespace_cmd --privsep_sock_path /tmp/tmp07osgxqf/privsep.sock
               ├─253234 /usr/bin/python3 /bin/privsep-helper --config-file /etc/neutron/neutron.conf --config-dir /etc/neutron.conf.d --privsep_context neutron.privileged.default --privsep_sock_path /tmp/tmpnmaxlr7e/privsep.sock
               └─253297 /usr/bin/python3 /bin/privsep-helper --config-file /etc/neutron/neutron.conf --config-dir /etc/neutron.conf.d --privsep_context neutron.privileged.link_cmd --privsep_sock_path /tmp/tmpyqrzsz_e/privsep.sock

Jan 31 09:15:54 compute-0 podman[410408]: 2026-01-31 09:15:54.793585564 +0000 UTC m=+0.044196487 container died 589005cd4e32a0e86124a240e4b81a57c673ba8bad72c320e41849190a40512c (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=neutron-haproxy-ovnmeta-408ee2a7-16b5-490a-949b-0003daf995e7, maintainer=OpenStack Kubernetes Operator team, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, tcib_managed=true, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.41.3, org.label-schema.build-date=20260127, org.label-schema.schema-version=1.0, tcib_build_tag=b85d0548925081ae8c6bdd697658cec4)
Jan 31 09:15:54 compute-0 podman[410408]: 2026-01-31 09:15:54.847515295 +0000 UTC m=+0.098126198 container cleanup 589005cd4e32a0e86124a240e4b81a57c673ba8bad72c320e41849190a40512c (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=neutron-haproxy-ovnmeta-408ee2a7-16b5-490a-949b-0003daf995e7, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20260127, org.label-schema.schema-version=1.0, tcib_build_tag=b85d0548925081ae8c6bdd697658cec4, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.41.3, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, tcib_managed=true)
Jan 31 09:15:54 compute-0 podman[410470]: 2026-01-31 09:15:54.908553002 +0000 UTC m=+0.045060603 container remove 589005cd4e32a0e86124a240e4b81a57c673ba8bad72c320e41849190a40512c (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=neutron-haproxy-ovnmeta-408ee2a7-16b5-490a-949b-0003daf995e7, org.label-schema.build-date=20260127, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, tcib_build_tag=b85d0548925081ae8c6bdd697658cec4, tcib_managed=true, maintainer=OpenStack Kubernetes Operator team, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, io.buildah.version=1.41.3)
Jan 31 09:16:50 compute-0 podman[412192]: 2026-01-31 09:16:50.473381111 +0000 UTC m=+0.048609775 container create 40616c1ffbd14ad5e8e5b56a4fc5bb7598eb5515631ce9af83f37b5105953a3e (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=neutron-haproxy-ovnmeta-3f3cc872-5825-455b-b8f4-03469e3aacf8, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20260127, org.label-schema.license=GPLv2, io.buildah.version=1.41.3, tcib_build_tag=b85d0548925081ae8c6bdd697658cec4, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, tcib_managed=true)
Jan 31 09:16:50 compute-0 podman[412192]: 2026-01-31 09:16:50.544570097 +0000 UTC m=+0.119798791 container init 40616c1ffbd14ad5e8e5b56a4fc5bb7598eb5515631ce9af83f37b5105953a3e (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=neutron-haproxy-ovnmeta-3f3cc872-5825-455b-b8f4-03469e3aacf8, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, tcib_managed=true, io.buildah.version=1.41.3, tcib_build_tag=b85d0548925081ae8c6bdd697658cec4, maintainer=OpenStack Kubernetes Operator team, org.label-schema.vendor=CentOS, org.label-schema.build-date=20260127)
Jan 31 09:16:50 compute-0 podman[412192]: 2026-01-31 09:16:50.448847527 +0000 UTC m=+0.024076211 image pull 19964fda6b912d3d57e21b0bcc221725d936e513025030cb508474fe04b06af8 quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified
Jan 31 09:16:50 compute-0 podman[412192]: 2026-01-31 09:16:50.548812482 +0000 UTC m=+0.124041156 container start 40616c1ffbd14ad5e8e5b56a4fc5bb7598eb5515631ce9af83f37b5105953a3e (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=neutron-haproxy-ovnmeta-3f3cc872-5825-455b-b8f4-03469e3aacf8, org.label-schema.build-date=20260127, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, io.buildah.version=1.41.3, tcib_build_tag=b85d0548925081ae8c6bdd697658cec4, maintainer=OpenStack Kubernetes Operator team, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, tcib_managed=true)
Jan 31 09:18:50 compute-0 podman[414742]: 2026-01-31 09:18:50.621023422 +0000 UTC m=+0.042875013 container died 40616c1ffbd14ad5e8e5b56a4fc5bb7598eb5515631ce9af83f37b5105953a3e (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=neutron-haproxy-ovnmeta-3f3cc872-5825-455b-b8f4-03469e3aacf8, tcib_build_tag=b85d0548925081ae8c6bdd697658cec4, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, tcib_managed=true, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20260127)
Jan 31 09:18:50 compute-0 podman[414742]: 2026-01-31 09:18:50.665080243 +0000 UTC m=+0.086931834 container cleanup 40616c1ffbd14ad5e8e5b56a4fc5bb7598eb5515631ce9af83f37b5105953a3e (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=neutron-haproxy-ovnmeta-3f3cc872-5825-455b-b8f4-03469e3aacf8, org.label-schema.schema-version=1.0, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20260127, tcib_build_tag=b85d0548925081ae8c6bdd697658cec4, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, tcib_managed=true, org.label-schema.vendor=CentOS)
Jan 31 09:18:50 compute-0 podman[414782]: 2026-01-31 09:18:50.729873998 +0000 UTC m=+0.046100376 container remove 40616c1ffbd14ad5e8e5b56a4fc5bb7598eb5515631ce9af83f37b5105953a3e (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=neutron-haproxy-ovnmeta-3f3cc872-5825-455b-b8f4-03469e3aacf8, tcib_managed=true, tcib_build_tag=b85d0548925081ae8c6bdd697658cec4, org.label-schema.build-date=20260127, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, maintainer=OpenStack Kubernetes Operator team, io.buildah.version=1.41.3, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0)

● libpod-d068e3ccc9a9ec2248f269659070439009ec5c33951d360f88b13647cd275613.scope - libcrun container
     Loaded: loaded (/run/systemd/transient/libpod-d068e3ccc9a9ec2248f269659070439009ec5c33951d360f88b13647cd275613.scope; transient)
  Transient: yes
    Drop-In: /run/systemd/transient/libpod-d068e3ccc9a9ec2248f269659070439009ec5c33951d360f88b13647cd275613.scope.d
             └─dep.conf
     Active: active (running) since Sat 2026-01-31 07:31:18 UTC; 2h 5min ago
         IO: 7.6M read, 4.0K written
      Tasks: 6 (limit: 4096)
     Memory: 22.0M (peak: 25.8M)
        CPU: 24.293s
     CGroup: /machine.slice/libpod-d068e3ccc9a9ec2248f269659070439009ec5c33951d360f88b13647cd275613.scope
             └─container
               ├─149154 dumb-init --single-child -- kolla_start
               └─149157 /usr/bin/ovn-controller --pidfile unix:/run/openvswitch/db.sock -p /etc/pki/tls/private/ovndb.key -c /etc/pki/tls/certs/ovndb.crt -C /etc/pki/tls/certs/ovndbca.crt

Jan 31 07:31:18 compute-0 systemd[1]: Started libcrun container.

● session-1.scope - Session 1 of User zuul
     Loaded: loaded (/run/systemd/transient/session-1.scope; transient)
  Transient: yes
     Active: active (abandoned) since Sat 2026-01-31 06:31:01 UTC; 3h 5min ago
         IO: 0B read, 0B written
      Tasks: 1
     Memory: 17.1M (peak: 38.9M)
        CPU: 1min 5.327s
     CGroup: /user.slice/user-1000.slice/session-1.scope
             └─4518 /usr/bin/python3

● session-21.scope - Session 21 of User ceph-admin
     Loaded: loaded (/run/systemd/transient/session-21.scope; transient)
  Transient: yes
     Active: active (running) since Sat 2026-01-31 07:20:44 UTC; 2h 16min ago
         IO: 0B read, 0B written
      Tasks: 2
     Memory: 1.2M (peak: 1.7M)
        CPU: 198ms
     CGroup: /user.slice/user-42477.slice/session-21.scope
             ├─76056 "sshd-session: ceph-admin [priv]"
             └─76078 "sshd-session: ceph-admin"

Jan 31 07:20:44 compute-0 systemd[1]: Started Session 21 of User ceph-admin.

● session-23.scope - Session 23 of User ceph-admin
     Loaded: loaded (/run/systemd/transient/session-23.scope; transient)
  Transient: yes
     Active: active (running) since Sat 2026-01-31 07:20:44 UTC; 2h 16min ago
         IO: 0B read, 0B written
      Tasks: 2
     Memory: 1.2M (peak: 4.4M)
        CPU: 296ms
     CGroup: /user.slice/user-42477.slice/session-23.scope
             ├─76073 "sshd-session: ceph-admin [priv]"
             └─76079 "sshd-session: ceph-admin@notty"

Jan 31 07:20:44 compute-0 systemd[1]: Started Session 23 of User ceph-admin.
Jan 31 07:20:44 compute-0 sudo[76080]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Jan 31 07:20:44 compute-0 sudo[76080]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Jan 31 07:20:44 compute-0 sudo[76080]: pam_unix(sudo:session): session closed for user root
Jan 31 07:20:44 compute-0 sudo[76105]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Jan 31 07:20:44 compute-0 sudo[76105]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Jan 31 07:20:44 compute-0 sudo[76105]: pam_unix(sudo:session): session closed for user root

● session-24.scope - Session 24 of User ceph-admin
     Loaded: loaded (/run/systemd/transient/session-24.scope; transient)
  Transient: yes
     Active: active (running) since Sat 2026-01-31 07:20:44 UTC; 2h 16min ago
         IO: 0B read, 0B written
      Tasks: 2
     Memory: 1.2M (peak: 5.9M)
        CPU: 414ms
     CGroup: /user.slice/user-42477.slice/session-24.scope
             ├─76130 "sshd-session: ceph-admin [priv]"
             └─76133 "sshd-session: ceph-admin@notty"

Jan 31 07:20:44 compute-0 systemd[1]: Started Session 24 of User ceph-admin.
Jan 31 07:20:44 compute-0 sudo[76134]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Jan 31 07:20:44 compute-0 sudo[76134]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Jan 31 07:20:44 compute-0 sudo[76134]: pam_unix(sudo:session): session closed for user root
Jan 31 07:20:44 compute-0 sudo[76159]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/2f5ab832-5f2e-5a84-bd93-cf8bab960ee2/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --timeout 895 check-host --expect-hostname compute-0
Jan 31 07:20:44 compute-0 sudo[76159]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Jan 31 07:20:44 compute-0 sudo[76159]: pam_unix(sudo:session): session closed for user root

● session-25.scope - Session 25 of User ceph-admin
     Loaded: loaded (/run/systemd/transient/session-25.scope; transient)
  Transient: yes
     Active: active (running) since Sat 2026-01-31 07:20:44 UTC; 2h 16min ago
         IO: 0B read, 0B written
      Tasks: 2
     Memory: 1.2M (peak: 4.2M)
        CPU: 297ms
     CGroup: /user.slice/user-42477.slice/session-25.scope
             ├─76184 "sshd-session: ceph-admin [priv]"
             └─76187 "sshd-session: ceph-admin@notty"

Jan 31 07:20:44 compute-0 systemd[1]: Started Session 25 of User ceph-admin.
Jan 31 07:20:44 compute-0 sudo[76188]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Jan 31 07:20:44 compute-0 sudo[76188]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Jan 31 07:20:44 compute-0 sudo[76188]: pam_unix(sudo:session): session closed for user root
Jan 31 07:20:45 compute-0 sudo[76213]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/ls /var/lib/ceph/2f5ab832-5f2e-5a84-bd93-cf8bab960ee2/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d
Jan 31 07:20:45 compute-0 sudo[76213]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Jan 31 07:20:45 compute-0 sudo[76213]: pam_unix(sudo:session): session closed for user root

● session-26.scope - Session 26 of User ceph-admin
     Loaded: loaded (/run/systemd/transient/session-26.scope; transient)
  Transient: yes
     Active: active (running) since Sat 2026-01-31 07:20:45 UTC; 2h 16min ago
         IO: 0B read, 0B written
      Tasks: 2
     Memory: 1.2M (peak: 4.5M)
        CPU: 266ms
     CGroup: /user.slice/user-42477.slice/session-26.scope
             ├─76238 "sshd-session: ceph-admin [priv]"
             └─76241 "sshd-session: ceph-admin@notty"

Jan 31 07:20:45 compute-0 systemd[1]: Started Session 26 of User ceph-admin.
Jan 31 07:20:45 compute-0 sudo[76242]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Jan 31 07:20:45 compute-0 sudo[76242]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Jan 31 07:20:45 compute-0 sudo[76242]: pam_unix(sudo:session): session closed for user root
Jan 31 07:20:45 compute-0 sudo[76267]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/mkdir -p /var/lib/ceph/2f5ab832-5f2e-5a84-bd93-cf8bab960ee2
Jan 31 07:20:45 compute-0 sudo[76267]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Jan 31 07:20:45 compute-0 sudo[76267]: pam_unix(sudo:session): session closed for user root

● session-27.scope - Session 27 of User ceph-admin
     Loaded: loaded (/run/systemd/transient/session-27.scope; transient)
  Transient: yes
     Active: active (running) since Sat 2026-01-31 07:20:45 UTC; 2h 16min ago
         IO: 0B read, 0B written
      Tasks: 2
     Memory: 1.2M (peak: 4.3M)
        CPU: 301ms
     CGroup: /user.slice/user-42477.slice/session-27.scope
             ├─76292 "sshd-session: ceph-admin [priv]"
             └─76295 "sshd-session: ceph-admin@notty"

Jan 31 07:20:45 compute-0 systemd[1]: Started Session 27 of User ceph-admin.
Jan 31 07:20:45 compute-0 sudo[76296]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Jan 31 07:20:45 compute-0 sudo[76296]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Jan 31 07:20:45 compute-0 sudo[76296]: pam_unix(sudo:session): session closed for user root
Jan 31 07:20:45 compute-0 sudo[76321]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/mkdir -p /tmp/cephadm-2f5ab832-5f2e-5a84-bd93-cf8bab960ee2/var/lib/ceph/2f5ab832-5f2e-5a84-bd93-cf8bab960ee2
Jan 31 07:20:45 compute-0 sudo[76321]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Jan 31 07:20:45 compute-0 sudo[76321]: pam_unix(sudo:session): session closed for user root

● session-28.scope - Session 28 of User ceph-admin
     Loaded: loaded (/run/systemd/transient/session-28.scope; transient)
  Transient: yes
     Active: active (running) since Sat 2026-01-31 07:20:45 UTC; 2h 16min ago
         IO: 0B read, 0B written
      Tasks: 2
     Memory: 1.2M (peak: 4.5M)
        CPU: 321ms
     CGroup: /user.slice/user-42477.slice/session-28.scope
             ├─76346 "sshd-session: ceph-admin [priv]"
             └─76349 "sshd-session: ceph-admin@notty"

Jan 31 07:20:45 compute-0 systemd[1]: Started Session 28 of User ceph-admin.
Jan 31 07:20:46 compute-0 sudo[76350]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Jan 31 07:20:46 compute-0 sudo[76350]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Jan 31 07:20:46 compute-0 sudo[76350]: pam_unix(sudo:session): session closed for user root
Jan 31 07:20:46 compute-0 sudo[76375]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/touch /tmp/cephadm-2f5ab832-5f2e-5a84-bd93-cf8bab960ee2/var/lib/ceph/2f5ab832-5f2e-5a84-bd93-cf8bab960ee2/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d.new
Jan 31 07:20:46 compute-0 sudo[76375]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Jan 31 07:20:46 compute-0 sudo[76375]: pam_unix(sudo:session): session closed for user root

● session-29.scope - Session 29 of User ceph-admin
     Loaded: loaded (/run/systemd/transient/session-29.scope; transient)
  Transient: yes
     Active: active (running) since Sat 2026-01-31 07:20:46 UTC; 2h 16min ago
         IO: 0B read, 0B written
      Tasks: 2
     Memory: 1.2M (peak: 4.1M)
        CPU: 317ms
     CGroup: /user.slice/user-42477.slice/session-29.scope
             ├─76400 "sshd-session: ceph-admin [priv]"
             └─76403 "sshd-session: ceph-admin@notty"

Jan 31 07:20:46 compute-0 systemd[1]: Started Session 29 of User ceph-admin.
Jan 31 07:20:46 compute-0 sudo[76404]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Jan 31 07:20:46 compute-0 sudo[76404]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Jan 31 07:20:46 compute-0 sudo[76404]: pam_unix(sudo:session): session closed for user root
Jan 31 07:20:46 compute-0 sudo[76429]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/chown -R ceph-admin /tmp/cephadm-2f5ab832-5f2e-5a84-bd93-cf8bab960ee2
Jan 31 07:20:46 compute-0 sudo[76429]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Jan 31 07:20:46 compute-0 sudo[76429]: pam_unix(sudo:session): session closed for user root

● session-30.scope - Session 30 of User ceph-admin
     Loaded: loaded (/run/systemd/transient/session-30.scope; transient)
  Transient: yes
     Active: active (running) since Sat 2026-01-31 07:20:46 UTC; 2h 16min ago
         IO: 0B read, 0B written
      Tasks: 2
     Memory: 1.2M (peak: 4.5M)
        CPU: 271ms
     CGroup: /user.slice/user-42477.slice/session-30.scope
             ├─76454 "sshd-session: ceph-admin [priv]"
             └─76457 "sshd-session: ceph-admin@notty"

Jan 31 07:20:46 compute-0 systemd[1]: Started Session 30 of User ceph-admin.
Jan 31 07:20:46 compute-0 sudo[76458]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Jan 31 07:20:46 compute-0 sudo[76458]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Jan 31 07:20:46 compute-0 sudo[76458]: pam_unix(sudo:session): session closed for user root
Jan 31 07:20:46 compute-0 sudo[76483]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/chmod 644 /tmp/cephadm-2f5ab832-5f2e-5a84-bd93-cf8bab960ee2/var/lib/ceph/2f5ab832-5f2e-5a84-bd93-cf8bab960ee2/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d.new
Jan 31 07:20:46 compute-0 sudo[76483]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Jan 31 07:20:46 compute-0 sudo[76483]: pam_unix(sudo:session): session closed for user root

● session-31.scope - Session 31 of User ceph-admin
     Loaded: loaded (/run/systemd/transient/session-31.scope; transient)
  Transient: yes
     Active: active (running) since Sat 2026-01-31 07:20:46 UTC; 2h 16min ago
         IO: 0B read, 216.0K written
      Tasks: 2
     Memory: 1.5M (peak: 3.4M)
        CPU: 262ms
     CGroup: /user.slice/user-42477.slice/session-31.scope
             ├─76508 "sshd-session: ceph-admin [priv]"
             └─76511 "sshd-session: ceph-admin@notty"

Jan 31 07:20:46 compute-0 systemd[1]: Started Session 31 of User ceph-admin.

● session-32.scope - Session 32 of User ceph-admin
     Loaded: loaded (/run/systemd/transient/session-32.scope; transient)
  Transient: yes
     Active: active (running) since Sat 2026-01-31 07:20:47 UTC; 2h 16min ago
         IO: 0B read, 0B written
      Tasks: 2
     Memory: 1.2M (peak: 4.3M)
        CPU: 335ms
     CGroup: /user.slice/user-42477.slice/session-32.scope
             ├─76535 "sshd-session: ceph-admin [priv]"
             └─76538 "sshd-session: ceph-admin@notty"

Jan 31 07:20:47 compute-0 systemd[1]: Started Session 32 of User ceph-admin.
Jan 31 07:20:47 compute-0 sudo[76539]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Jan 31 07:20:47 compute-0 sudo[76539]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Jan 31 07:20:47 compute-0 sudo[76539]: pam_unix(sudo:session): session closed for user root
Jan 31 07:20:47 compute-0 sudo[76564]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/mv /tmp/cephadm-2f5ab832-5f2e-5a84-bd93-cf8bab960ee2/var/lib/ceph/2f5ab832-5f2e-5a84-bd93-cf8bab960ee2/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d.new /var/lib/ceph/2f5ab832-5f2e-5a84-bd93-cf8bab960ee2/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d
Jan 31 07:20:47 compute-0 sudo[76564]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Jan 31 07:20:47 compute-0 sudo[76564]: pam_unix(sudo:session): session closed for user root

● session-33.scope - Session 33 of User ceph-admin
     Loaded: loaded (/run/systemd/transient/session-33.scope; transient)
  Transient: yes
     Active: active (running) since Sat 2026-01-31 07:20:47 UTC; 2h 16min ago
         IO: 224.0K read, 688.1M written
      Tasks: 2
     Memory: 277.3M (peak: 445.7M)
        CPU: 8min 15.775s
     CGroup: /user.slice/user-42477.slice/session-33.scope
             ├─76589 "sshd-session: ceph-admin [priv]"
             └─76592 "sshd-session: ceph-admin@notty"

Jan 31 09:36:53 compute-0 podman[448609]: 2026-01-31 09:36:53.199539136 +0000 UTC m=+0.125531881 container attach 52d0c0ac81b961a974a80e50ed2724c529d249c83767a41c57b0fe1b833d3e0d (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=trusting_shannon, CEPH_REF=reef, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9)
Jan 31 09:36:53 compute-0 podman[448609]: 2026-01-31 09:36:53.968702237 +0000 UTC m=+0.894694952 container died 52d0c0ac81b961a974a80e50ed2724c529d249c83767a41c57b0fe1b833d3e0d (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=trusting_shannon, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, ceph=True, org.label-schema.license=GPLv2)
Jan 31 09:36:54 compute-0 podman[448609]: 2026-01-31 09:36:54.027626451 +0000 UTC m=+0.953619176 container remove 52d0c0ac81b961a974a80e50ed2724c529d249c83767a41c57b0fe1b833d3e0d (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=trusting_shannon, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/)
Jan 31 09:36:54 compute-0 sudo[448282]: pam_unix(sudo:session): session closed for user root
Jan 31 09:36:54 compute-0 sudo[449069]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Jan 31 09:36:54 compute-0 sudo[449069]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Jan 31 09:36:54 compute-0 sudo[449069]: pam_unix(sudo:session): session closed for user root
Jan 31 09:36:54 compute-0 sudo[449113]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/ls /etc/sysctl.d
Jan 31 09:36:54 compute-0 sudo[449113]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Jan 31 09:36:54 compute-0 sudo[449113]: pam_unix(sudo:session): session closed for user root

● session-78.scope - Session 78 of User zuul
     Loaded: loaded (/run/systemd/transient/session-78.scope; transient)
  Transient: yes
     Active: active (running) since Sat 2026-01-31 09:36:23 UTC; 36s ago
         IO: 3.0M read, 375.6M written
      Tasks: 19
     Memory: 611.9M (peak: 681.6M)
        CPU: 1min 46.108s
     CGroup: /user.slice/user-1000.slice/session-78.scope
             ├─442577 "sshd-session: zuul [priv]"
             ├─442580 "sshd-session: zuul@notty"
             ├─442581 sudo bash -c "rm -rf /var/tmp/sos-osp && mkdir /var/tmp/sos-osp && sos report --batch --all-logs --tmp-dir=/var/tmp/sos-osp  -p container,openstack_edpm,system,storage,virt"
             ├─442605 /usr/bin/python3 -s /sbin/sos report --batch --all-logs --tmp-dir=/var/tmp/sos-osp -p container,openstack_edpm,system,storage,virt
             ├─449422 timeout 15s turbostat --debug sleep 10
             ├─449779 timeout 300s systemctl status --all
             ├─449781 systemctl status --all
             ├─449800 timeout 300s semanage boolean -l
             ├─449801 /usr/bin/python3 -EsI /usr/sbin/semanage boolean -l
             ├─449804 timeout 300s ceph osd numa-status --format json-pretty
             └─449805 /usr/bin/python3 -s /usr/bin/ceph osd numa-status --format json-pretty

Jan 31 09:36:23 compute-0 systemd[1]: Started Session 78 of User zuul.
Jan 31 09:36:23 compute-0 sudo[442581]:     zuul : PWD=/home/zuul ; USER=root ; COMMAND=/bin/bash -c 'rm -rf /var/tmp/sos-osp && mkdir /var/tmp/sos-osp && sos report --batch --all-logs --tmp-dir=/var/tmp/sos-osp  -p container,openstack_edpm,system,storage,virt'
Jan 31 09:36:23 compute-0 sudo[442581]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Jan 31 09:36:52 compute-0 ovs-appctl[448538]: ovs|00001|daemon_unix|WARN|/var/run/openvswitch/ovs-monitor-ipsec.pid: open: No such file or directory
Jan 31 09:36:52 compute-0 ovs-appctl[448545]: ovs|00001|daemon_unix|WARN|/var/run/openvswitch/ovs-monitor-ipsec.pid: open: No such file or directory

○ 1ae13fee90b46f5eb19bd9030483f3244c23c433b8f4e46eb5cc0bd2c6a35d52-1a8f1cf84db28910.service - /usr/bin/podman healthcheck run 1ae13fee90b46f5eb19bd9030483f3244c23c433b8f4e46eb5cc0bd2c6a35d52
     Loaded: loaded (/run/systemd/transient/1ae13fee90b46f5eb19bd9030483f3244c23c433b8f4e46eb5cc0bd2c6a35d52-1a8f1cf84db28910.service; transient)
  Transient: yes
     Active: inactive (dead) since Sat 2026-01-31 09:36:50 UTC; 8s ago
   Duration: 116ms
TriggeredBy: ● 1ae13fee90b46f5eb19bd9030483f3244c23c433b8f4e46eb5cc0bd2c6a35d52-1a8f1cf84db28910.timer
    Process: 447309 ExecStart=/usr/bin/podman healthcheck run 1ae13fee90b46f5eb19bd9030483f3244c23c433b8f4e46eb5cc0bd2c6a35d52 (code=exited, status=0/SUCCESS)
   Main PID: 447309 (code=exited, status=0/SUCCESS)
        CPU: 72ms

Jan 31 09:36:50 compute-0 podman[447309]: 2026-01-31 09:36:50.747469282 +0000 UTC m=+0.099252465 container health_status 1ae13fee90b46f5eb19bd9030483f3244c23c433b8f4e46eb5cc0bd2c6a35d52 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=ovn_metadata_agent, health_status=healthy, health_failing_streak=0, health_log=, config_Unit apparmor.service could not be found.
Unit apt-daily.service could not be found.
Unit auto-cpufreq.service could not be found.
Unit autofs.service could not be found.
data={'cgroupns': 'host', 'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'EDPM_CONFIG_HASH': 'c9c07335481e70451acb503caf3b3b3a05811a07f9fde1e24aebece19089a266-25bdd24b66af043d77baba2a46a2d5dc0c63491fff70f82946d87e0106b3878e-25bdd24b66af043d77baba2a46a2d5dc0c63491fff70f82946d87e0106b3878e-25bdd24b66af043d77baba2a46a2d5dc0c63491fff70f82946d87e0106b3878e-0823bd3e096c75f72e4a95820d41b0d4b6a1172bd2892ddb9f29b788a11bc87d'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_metadata_agent', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified', 'net': 'host', 'pid': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/run/openvswitch:/run/openvswitch:z', '/var/lib/openstack/neutron-ovn-metadata-agent:/etc/neutron.conf.d:z', '/run/netns:/run/netns:shared', '/var/lib/kolla/config_files/ovn_metadata_agent.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/neutron:/var/lib/neutron:shared,z', '/var/lib/neutron/ovn_metadata_haproxy_wrapper:/usr/local/bin/haproxy:ro', '/var/lib/neutron/kill_scripts:/etc/neutron/kill_scripts:ro', '/var/lib/openstack/cacerts/neutron-metadata/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_metadata_agent:/openstack:ro,z']}, config_id=ovn_metadata_agent, io.buildah.version=1.41.3, org.label-schema.build-date=20260127, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, maintainer=OpenStack Kubernetes Operator team, tcib_build_tag=b85d0548925081ae8c6bdd697658cec4, tcib_managed=true, managed_by=edpm_ansible, org.label-schema.name=CentOS Stream 9 Base Image, container_name=ovn_metadata_agent, org.label-schema.schema-version=1.0)

● auditd.service - Security Auditing Service
     Loaded: loaded (/usr/lib/systemd/system/auditd.service; enabled; preset: enabled)
     Active: active (running) since Sat 2026-01-31 06:24:59 UTC; 3h 12min ago
       Docs: man:auditd(8)
             https://github.com/linux-audit/audit-documentation
   Main PID: 697 (auditd)
         IO: 4.0K read, 41.6M written
      Tasks: 4 (limit: 48560)
     Memory: 26.3M (peak: 26.8M)
        CPU: 8.330s
     CGroup: /system.slice/auditd.service
             ├─697 /sbin/auditd
             └─699 /usr/sbin/sedispatch

Jan 31 07:34:40 compute-0 auditd[697]: Audit daemon rotating log files
Jan 31 08:35:24 compute-0 auditd[697]: Audit daemon rotating log files

○ auth-rpcgss-module.service - Kernel Module supporting RPCSEC_GSS
     Loaded: loaded (/usr/lib/systemd/system/auth-rpcgss-module.service; static)
     Active: inactive (dead)
  Condition: start condition failed at Sat 2026-01-31 06:24:59 UTC; 3h 12min ago

○ blk-availability.service - Availability of block devices
     Loaded: loaded (/usr/lib/systemd/system/blk-availability.service; disabled; preset: disabled)
     Active: inactive (dead)

● ceph-2f5ab832-5f2e-5a84-bd93-cf8bab960ee2@crash.compute-0.service - Ceph crash.compute-0 for 2f5ab832-5f2e-5a84-bd93-cf8bab960ee2
     Loaded: loaded (/etc/systemd/system/ceph-2f5ab832-5f2e-5a84-bd93-cf8bab960ee2@.service; enabled; preset: disabled)
     Active: active (running) since Sat 2026-01-31 07:21:14 UTC; 2h 15min ago
   Main PID: 81764 (conmon)
         IO: 0B read, 3.2M written
      Tasks: 3 (limit: 48560)
     Memory: 12.2M (peak: 33.4M)
        CPU: 761ms
     CGroup: /system.slice/system-ceph\x2d2f5ab832\x2d5f2e\x2d5a84\x2dbd93\x2dcf8bab960ee2.slice/ceph-2f5ab832-5f2e-5a84-bd93-cf8bab960ee2@crash.compute-0.service
             ├─libpod-payload-287b42eb8058430f66d49fad6a858b93e85e13d224b0307dc6db988fdd517c44
             │ ├─81766 /run/podman-init -- /usr/bin/ceph-crash -n client.crash.compute-0
             │ └─81774 /usr/bin/python3 -s /usr/bin/ceph-crash -n client.crash.compute-0
             └─runtime
               └─81764 /usr/bin/conmon --api-version 1 -c 287b42eb8058430f66d49fad6a858b93e85e13d224b0307dc6db988fdd517c44 -u 287b42eb8058430f66d49fad6a858b93e85e13d224b0307dc6db988fdd517c44 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/287b42eb8058430f66d49fad6a858b93e85e13d224b0307dc6db988fdd517c44/userdata -p /run/containers/storage/overlay-containers/287b42eb8058430f66d49fad6a858b93e85e13d224b0307dc6db988fdd517c44/userdata/pidfile -n ceph-2f5ab832-5f2e-5a84-bd93-cf8bab960ee2-crash-compute-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/287b42eb8058430f66d49fad6a858b93e85e13d224b0307dc6db988fdd517c44 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/287b42eb8058430f66d49fad6a858b93e85e13d224b0307dc6db988fdd517c44/userdata/oci-log --conmon-pidfile /run/ceph-2f5ab832-5f2e-5a84-bd93-cf8bab960ee2@crash.compute-0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 287b42eb8058430f66d49fad6a858b93e85e13d224b0307dc6db988fdd517c44

Jan 31 08:01:15 compute-0 ceph-2f5ab832-5f2e-5a84-bd93-cf8bab960ee2-crash-compute-0[81764]: ERROR:ceph-crash:Error scraping /var/lib/ceph/crash: [Errno 13] Permission denied: '/var/lib/ceph/crash'
Jan 31 08:11:15 compute-0 ceph-2f5ab832-5f2e-5a84-bd93-cf8bab960ee2-crash-compute-0[81764]: ERROR:ceph-crash:Error scraping /var/lib/ceph/crash: [Errno 13] Permission denied: '/var/lib/ceph/crash'
Jan 31 08:21:15 compute-0 ceph-2f5ab832-5f2e-5a84-bd93-cf8bab960ee2-crash-compute-0[81764]: ERROR:ceph-crash:Error scraping /var/lib/ceph/crash: [Errno 13] Permission denied: '/var/lib/ceph/crash'
Jan 31 08:31:15 compute-0 ceph-2f5ab832-5f2e-5a84-bd93-cf8bab960ee2-crash-compute-0[81764]: ERROR:ceph-crash:Error scraping /var/lib/ceph/crash: [Errno 13] Permission denied: '/var/lib/ceph/crash'
Jan 31 08:41:15 compute-0 ceph-2f5ab832-5f2e-5a84-bd93-cf8bab960ee2-crash-compute-0[81764]: ERROR:ceph-crash:Error scraping /var/lib/ceph/crash: [Errno 13] Permission denied: '/var/lib/ceph/crash'
Jan 31 08:51:15 compute-0 ceph-2f5ab832-5f2e-5a84-bd93-cf8bab960ee2-crash-compute-0[81764]: ERROR:ceph-crash:Error scraping /var/lib/ceph/crash: [Errno 13] Permission denied: '/var/lib/ceph/crash'
Jan 31 09:01:15 compute-0 ceph-2f5ab832-5f2e-5a84-bd93-cf8bab960ee2-crash-compute-0[81764]: ERROR:ceph-crash:Error scraping /var/lib/ceph/crash: [Errno 13] Permission denied: '/var/lib/ceph/crash'
Jan 31 09:11:15 compute-0 ceph-2f5ab832-5f2e-5a84-bd93-cf8bab960ee2-crash-compute-0[81764]: ERROR:ceph-crash:Error scraping /var/lib/ceph/crash: [Errno 13] Permission denied: '/var/lib/ceph/crash'
Jan 31 09:21:15 compute-0 ceph-2f5ab832-5f2e-5a84-bd93-cf8bab960ee2-crash-compute-0[81764]: ERROR:ceph-crash:Error scraping /var/lib/ceph/crash: [Errno 13] Permission denied: '/var/lib/ceph/crash'
Jan 31 09:31:15 compute-0 ceph-2f5ab832-5f2e-5a84-bd93-cf8bab960ee2-crash-compute-0[81764]: ERROR:ceph-crash:Error scraping /var/lib/ceph/crash: [Errno 13] Permission denied: '/var/lib/ceph/crash'

● ceph-2f5ab832-5f2e-5a84-bd93-cf8bab960ee2@haproxy.rgw.default.compute-0.evwczw.service - Ceph haproxy.rgw.default.compute-0.evwczw for 2f5ab832-5f2e-5a84-bd93-cf8bab960ee2
     Loaded: loaded (/etc/systemd/system/ceph-2f5ab832-5f2e-5a84-bd93-cf8bab960ee2@.service; enabled; preset: disabled)
     Active: active (running) since Sat 2026-01-31 07:23:29 UTC; 2h 13min ago
   Main PID: 96042 (conmon)
         IO: 1.2M read, 166.0K written
      Tasks: 11 (limit: 48560)
     Memory: 7.2M (peak: 23.4M)
        CPU: 10.569s
     CGroup: /system.slice/system-ceph\x2d2f5ab832\x2d5f2e\x2d5a84\x2dbd93\x2dcf8bab960ee2.slice/ceph-2f5ab832-5f2e-5a84-bd93-cf8bab960ee2@haproxy.rgw.default.compute-0.evwczw.service
             ├─libpod-payload-e85bd24af30cc47f8586afc6e524047e12ba657e07d03a65812cdd8f8d12c0fe
             │ ├─96044 /run/podman-init -- docker-entrypoint.sh haproxy -f /var/lib/haproxy/haproxy.cfg
             │ ├─96046 haproxy -W -db -f /var/lib/haproxy/haproxy.cfg
             │ └─96048 haproxy -W -db -f /var/lib/haproxy/haproxy.cfg
             └─runtime
               └─96042 /usr/bin/conmon --api-version 1 -c e85bd24af30cc47f8586afc6e524047e12ba657e07d03a65812cdd8f8d12c0fe -u e85bd24af30cc47f8586afc6e524047e12ba657e07d03a65812cdd8f8d12c0fe -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/e85bd24af30cc47f8586afc6e524047e12ba657e07d03a65812cdd8f8d12c0fe/userdata -p /run/containers/storage/overlay-containers/e85bd24af30cc47f8586afc6e524047e12ba657e07d03a65812cdd8f8d12c0fe/userdata/pidfile -n ceph-2f5ab832-5f2e-5a84-bd93-cf8bab960ee2-haproxy-rgw-default-compute-0-evwczw --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/e85bd24af30cc47f8586afc6e524047e12ba657e07d03a65812cdd8f8d12c0fe --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/e85bd24af30cc47f8586afc6e524047e12ba657e07d03a65812cdd8f8d12c0fe/userdata/oci-log --conmon-pidfile /run/ceph-2f5ab832-5f2e-5a84-bd93-cf8bab960ee2@haproxy.rgw.default.compute-0.evwczw.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg e85bd24af30cc47f8586afc6e524047e12ba657e07d03a65812cdd8f8d12c0fe

Jan 31 07:23:29 compute-0 systemd[1]: Starting Ceph haproxy.rgw.default.compute-0.evwczw for 2f5ab832-5f2e-5a84-bd93-cf8bab960ee2...
Jan 31 07:23:29 compute-0 podman[96027]: 2026-01-31 07:23:29.854238835 +0000 UTC m=+0.045696181 container create e85bd24af30cc47f8586afc6e524047e12ba657e07d03a65812cdd8f8d12c0fe (image=quay.io/ceph/haproxy:2.3, name=ceph-2f5ab832-5f2e-5a84-bd93-cf8bab960ee2-haproxy-rgw-default-compute-0-evwczw)
Jan 31 07:23:29 compute-0 podman[96027]: 2026-01-31 07:23:29.903219664 +0000 UTC m=+0.094677010 container init e85bd24af30cc47f8586afc6e524047e12ba657e07d03a65812cdd8f8d12c0fe (image=quay.io/ceph/haproxy:2.3, name=ceph-2f5ab832-5f2e-5a84-bd93-cf8bab960ee2-haproxy-rgw-default-compute-0-evwczw)
Jan 31 07:23:29 compute-0 podman[96027]: 2026-01-31 07:23:29.907857638 +0000 UTC m=+0.099314974 container start e85bd24af30cc47f8586afc6e524047e12ba657e07d03a65812cdd8f8d12c0fe (image=quay.io/ceph/haproxy:2.3, name=ceph-2f5ab832-5f2e-5a84-bd93-cf8bab960ee2-haproxy-rgw-default-compute-0-evwczw)
Jan 31 07:23:29 compute-0 bash[96027]: e85bd24af30cc47f8586afc6e524047e12ba657e07d03a65812cdd8f8d12c0fe
Jan 31 07:23:29 compute-0 podman[96027]: 2026-01-31 07:23:29.832535163 +0000 UTC m=+0.023992539 image pull e85424b0d443f37ddd2dd8a3bb2ef6f18dd352b987723a921b64289023af2914 quay.io/ceph/haproxy:2.3
Jan 31 07:23:29 compute-0 systemd[1]: Started Ceph haproxy.rgw.default.compute-0.evwczw for 2f5ab832-5f2e-5a84-bd93-cf8bab960ee2.
Jan 31 07:23:29 compute-0 ceph-2f5ab832-5f2e-5a84-bd93-cf8bab960ee2-haproxy-rgw-default-compute-0-evwczw[96042]: [NOTICE] 030/072329 (2) : New worker #1 (4) forked

● ceph-2f5ab832-5f2e-5a84-bd93-cf8bab960ee2@keepalived.rgw.default.compute-0.wujrgc.service - Ceph keepalived.rgw.default.compute-0.wujrgc for 2f5ab832-5f2e-5a84-bd93-cf8bab960ee2
     Loaded: loaded (/etc/systemd/system/ceph-2f5ab832-5f2e-5a84-bd93-cf8bab960ee2@.service; enabled; preset: disabled)
     Active: active (running) since Sat 2026-01-31 07:23:37 UTC; 2h 13min ago
   Main PID: 96456 (conmon)
         IO: 0B read, 193.0K written
      Tasks: 4 (limit: 48560)
     Memory: 2.9M (peak: 22.9M)
        CPU: 27.327s
     CGroup: /system.slice/system-ceph\x2d2f5ab832\x2d5f2e\x2d5a84\x2dbd93\x2dcf8bab960ee2.slice/ceph-2f5ab832-5f2e-5a84-bd93-cf8bab960ee2@keepalived.rgw.default.compute-0.wujrgc.service
             ├─libpod-payload-9f037d7b549dfea18e07bcd207334bdb6be640ac888fb6a410ce9d0db364b790
             │ ├─96458 /run/podman-init -- ./init.sh
             │ ├─96460 /usr/sbin/keepalived -n -l -f /etc/keepalived/keepalived.conf
             │ └─96462 /usr/sbin/keepalived -n -l -f /etc/keepalived/keepalived.conf
             └─runtime
               └─96456 /usr/bin/conmon --api-version 1 -c 9f037d7b549dfea18e07bcd207334bdb6be640ac888fb6a410ce9d0db364b790 -u 9f037d7b549dfea18e07bcd207334bdb6be640ac888fb6a410ce9d0db364b790 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/9f037d7b549dfea18e07bcd207334bdb6be640ac888fb6a410ce9d0db364b790/userdata -p /run/containers/storage/overlay-containers/9f037d7b549dfea18e07bcd207334bdb6be640ac888fb6a410ce9d0db364b790/userdata/pidfile -n ceph-2f5ab832-5f2e-5a84-bd93-cf8bab960ee2-keepalived-rgw-default-compute-0-wujrgc --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/9f037d7b549dfea18e07bcd207334bdb6be640ac888fb6a410ce9d0db364b790 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/9f037d7b549dfea18e07bcd207334bdb6be640ac888fb6a410ce9d0db364b790/userdata/oci-log --conmon-pidfile /run/ceph-2f5ab832-5f2e-5a84-bd93-cf8bab960ee2@keepalived.rgw.default.compute-0.wujrgc.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 9f037d7b549dfea18e07bcd207334bdb6be640ac888fb6a410ce9d0db364b790

Jan 31 07:23:37 compute-0 ceph-2f5ab832-5f2e-5a84-bd93-cf8bab960ee2-keepalived-rgw-default-compute-0-wujrgc[96456]: Sat Jan 31 07:23:37 2026: Command line: '/usr/sbin/keepalived' '-n' '-l' '-f' '/etc/keepalived/keepalived.conf'
Jan 31 07:23:37 compute-0 ceph-2f5ab832-5f2e-5a84-bd93-cf8bab960ee2-keepalived-rgw-default-compute-0-wujrgc[96456]: Sat Jan 31 07:23:37 2026: Configuration file /etc/keepalived/keepalived.conf
Jan 31 07:23:37 compute-0 ceph-2f5ab832-5f2e-5a84-bd93-cf8bab960ee2-keepalived-rgw-default-compute-0-wujrgc[96456]: Sat Jan 31 07:23:37 2026: NOTICE: setting config option max_auto_priority should result in better keepalived performance
Jan 31 07:23:37 compute-0 ceph-2f5ab832-5f2e-5a84-bd93-cf8bab960ee2-keepalived-rgw-default-compute-0-wujrgc[96456]: Sat Jan 31 07:23:37 2026: Starting VRRP child process, pid=4
Jan 31 07:23:37 compute-0 systemd[1]: Started Ceph keepalived.rgw.default.compute-0.wujrgc for 2f5ab832-5f2e-5a84-bd93-cf8bab960ee2.
Jan 31 07:23:37 compute-0 ceph-2f5ab832-5f2e-5a84-bd93-cf8bab960ee2-keepalived-rgw-default-compute-0-wujrgc[96456]: Sat Jan 31 07:23:37 2026: Startup complete
Jan 31 07:23:37 compute-0 ceph-2f5ab832-5f2e-5a84-bd93-cf8bab960ee2-keepalived-rgw-default-compute-0-wujrgc[96456]: Sat Jan 31 07:23:37 2026: (VI_0) Entering BACKUP STATE (init)
Jan 31 07:23:37 compute-0 ceph-2f5ab832-5f2e-5a84-bd93-cf8bab960ee2-keepalived-rgw-default-compute-0-wujrgc[96456]: Sat Jan 31 07:23:37 2026: VRRP_Script(check_backend) succeeded
Jan 31 07:23:40 compute-0 ceph-2f5ab832-5f2e-5a84-bd93-cf8bab960ee2-keepalived-rgw-default-compute-0-wujrgc[96456]: Sat Jan 31 07:23:40 2026: (VI_0) Entering MASTER STATE
Jan 31 07:23:45 compute-0 ceph-2f5ab832-5f2e-5a84-bd93-cf8bab960ee2-keepalived-rgw-default-compute-0-wujrgc[96456]: Sat Jan 31 07:23:45 2026: (VI_0) Received advert from 192.168.122.102 with lower priority 90, ours 100, forcing new election

● ceph-2f5ab832-5f2e-5a84-bd93-cf8bab960ee2@mds.cephfs.compute-0.jroeqh.service - Ceph mds.cephfs.compute-0.jroeqh for 2f5ab832-5f2e-5a84-bd93-cf8bab960ee2
     Loaded: loaded (/etc/systemd/system/ceph-2f5ab832-5f2e-5a84-bd93-cf8bab960ee2@.service; enabled; preset: disabled)
     Active: active (running) since Sat 2026-01-31 07:23:24 UTC; 2h 13min ago
   Main PID: 94914 (conmon)
         IO: 0B read, 208.0K written
      Tasks: 18 (limit: 48560)
     Memory: 26.8M (peak: 27.3M)
        CPU: 3.554s
     CGroup: /system.slice/system-ceph\x2d2f5ab832\x2d5f2e\x2d5a84\x2dbd93\x2dcf8bab960ee2.slice/ceph-2f5ab832-5f2e-5a84-bd93-cf8bab960ee2@mds.cephfs.compute-0.jroeqh.service
             ├─libpod-payload-41a77f8e2fff2b8ec08ee3239657a26d889a4c43656e169833ff12edd4843c0e
             │ ├─94916 /run/podman-init -- /usr/bin/ceph-mds -n mds.cephfs.compute-0.jroeqh -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ └─94918 /usr/bin/ceph-mds -n mds.cephfs.compute-0.jroeqh -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             └─runtime
               └─94914 /usr/bin/conmon --api-version 1 -c 41a77f8e2fff2b8ec08ee3239657a26d889a4c43656e169833ff12edd4843c0e -u 41a77f8e2fff2b8ec08ee3239657a26d889a4c43656e169833ff12edd4843c0e -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/41a77f8e2fff2b8ec08ee3239657a26d889a4c43656e169833ff12edd4843c0e/userdata -p /run/containers/storage/overlay-containers/41a77f8e2fff2b8ec08ee3239657a26d889a4c43656e169833ff12edd4843c0e/userdata/pidfile -n ceph-2f5ab832-5f2e-5a84-bd93-cf8bab960ee2-mds-cephfs-compute-0-jroeqh --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/41a77f8e2fff2b8ec08ee3239657a26d889a4c43656e169833ff12edd4843c0e --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/41a77f8e2fff2b8ec08ee3239657a26d889a4c43656e169833ff12edd4843c0e/userdata/oci-log --conmon-pidfile /run/ceph-2f5ab832-5f2e-5a84-bd93-cf8bab960ee2@mds.cephfs.compute-0.jroeqh.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 41a77f8e2fff2b8ec08ee3239657a26d889a4c43656e169833ff12edd4843c0e

Jan 31 09:36:30 compute-0 ceph-mds[94918]: mds.cephfs.compute-0.jroeqh Can't run that command on an inactive MDS!
Jan 31 09:36:31 compute-0 ceph-mds[94918]: mds.cephfs.compute-0.jroeqh asok_command: dump_ops_in_flight {prefix=dump_ops_in_flight} (starting...)
Jan 31 09:36:31 compute-0 ceph-mds[94918]: mds.cephfs.compute-0.jroeqh Can't run that command on an inactive MDS!
Jan 31 09:36:31 compute-0 ceph-mds[94918]: mds.cephfs.compute-0.jroeqh asok_command: get subtrees {prefix=get subtrees} (starting...)
Jan 31 09:36:31 compute-0 ceph-mds[94918]: mds.cephfs.compute-0.jroeqh Can't run that command on an inactive MDS!
Jan 31 09:36:31 compute-0 ceph-mds[94918]: mds.cephfs.compute-0.jroeqh asok_command: ops {prefix=ops} (starting...)
Jan 31 09:36:31 compute-0 ceph-mds[94918]: mds.cephfs.compute-0.jroeqh Can't run that command on an inactive MDS!
Jan 31 09:36:32 compute-0 ceph-mds[94918]: mds.cephfs.compute-0.jroeqh asok_command: session ls {prefix=session ls} (starting...)
Jan 31 09:36:32 compute-0 ceph-mds[94918]: mds.cephfs.compute-0.jroeqh Can't run that command on an inactive MDS!
Jan 31 09:36:32 compute-0 ceph-mds[94918]: mds.cephfs.compute-0.jroeqh asok_command: status {prefix=status} (starting...)

● ceph-2f5ab832-5f2e-5a84-bd93-cf8bab960ee2@mgr.compute-0.ddmhwk.service - Ceph mgr.compute-0.ddmhwk for 2f5ab832-5f2e-5a84-bd93-cf8bab960ee2
     Loaded: loaded (/etc/systemd/system/ceph-2f5ab832-5f2e-5a84-bd93-cf8bab960ee2@.service; enabled; preset: disabled)
     Active: active (running) since Sat 2026-01-31 07:19:50 UTC; 2h 17min ago
   Main PID: 74685 (conmon)
         IO: 0B read, 7.4M written
      Tasks: 149 (limit: 48560)
     Memory: 555.7M (peak: 559.6M)
        CPU: 3min 25.911s
     CGroup: /system.slice/system-ceph\x2d2f5ab832\x2d5f2e\x2d5a84\x2dbd93\x2dcf8bab960ee2.slice/ceph-2f5ab832-5f2e-5a84-bd93-cf8bab960ee2@mgr.compute-0.ddmhwk.service
             ├─libpod-payload-27d29d56922973df23bd6f1e58ddc4d731ae614392da047fc3969a22adc43583
             │ ├─74687 /run/podman-init -- /usr/bin/ceph-mgr -n mgr.compute-0.ddmhwk -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ └─74689 /usr/bin/ceph-mgr -n mgr.compute-0.ddmhwk -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             └─runtime
               └─74685 /usr/bin/conmon --api-version 1 -c 27d29d56922973df23bd6f1e58ddc4d731ae614392da047fc3969a22adc43583 -u 27d29d56922973df23bd6f1e58ddc4d731ae614392da047fc3969a22adc43583 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/27d29d56922973df23bd6f1e58ddc4d731ae614392da047fc3969a22adc43583/userdata -p /run/containers/storage/overlay-containers/27d29d56922973df23bd6f1e58ddc4d731ae614392da047fc3969a22adc43583/userdata/pidfile -n ceph-2f5ab832-5f2e-5a84-bd93-cf8bab960ee2-mgr-compute-0-ddmhwk --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/27d29d56922973df23bd6f1e58ddc4d731ae614392da047fc3969a22adc43583 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/27d29d56922973df23bd6f1e58ddc4d731ae614392da047fc3969a22adc43583/userdata/oci-log --conmon-pidfile /run/ceph-2f5ab832-5f2e-5a84-bd93-cf8bab960ee2@mgr.compute-0.ddmhwk.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 27d29d56922973df23bd6f1e58ddc4d731ae614392da047fc3969a22adc43583

Jan 31 09:36:56 compute-0 ceph-mgr[74689]: log_channel(audit) log [DBG] : from='client.43479 -' entity='client.admin' cmd=[{"prefix": "fs status", "target": ["mon-mgr", ""], "format": "json-pretty"}]: dispatch
Jan 31 09:36:56 compute-0 ceph-mgr[74689]: log_channel(audit) log [DBG] : from='client.45998 -' entity='client.admin' cmd=[{"prefix": "device ls", "target": ["mon-mgr", ""], "format": "json-pretty"}]: dispatch
Jan 31 09:36:57 compute-0 ceph-mgr[74689]: log_channel(audit) log [DBG] : from='client.46019 -' entity='client.admin' cmd=[{"prefix": "fs status", "target": ["mon-mgr", ""], "format": "json-pretty"}]: dispatch
Jan 31 09:36:58 compute-0 ceph-mgr[74689]: log_channel(audit) log [DBG] : from='client.43512 -' entity='client.admin' cmd=[{"prefix": "osd blocked-by", "target": ["mon-mgr", ""], "format": "json-pretty"}]: dispatch
Jan 31 09:36:58 compute-0 ceph-mgr[74689]: log_channel(cluster) log [DBG] : pgmap v4515: 305 pgs: 305 active+clean; 120 MiB data, 1.6 GiB used, 19 GiB / 21 GiB avail
Jan 31 09:36:58 compute-0 ceph-mgr[74689]: log_channel(audit) log [DBG] : from='client.43527 -' entity='client.admin' cmd=[{"prefix": "osd df", "output_method": "tree", "target": ["mon-mgr", ""], "format": "json-pretty"}]: dispatch
Jan 31 09:36:58 compute-0 ceph-mgr[74689]: log_channel(audit) log [DBG] : from='client.46037 -' entity='client.admin' cmd=[{"prefix": "osd blocked-by", "target": ["mon-mgr", ""], "format": "json-pretty"}]: dispatch
Jan 31 09:36:59 compute-0 ceph-mgr[74689]: log_channel(audit) log [DBG] : from='client.53218 -' entity='client.admin' cmd=[{"prefix": "fs status", "target": ["mon-mgr", ""], "format": "json-pretty"}]: dispatch
Jan 31 09:36:59 compute-0 ceph-mgr[74689]: log_channel(audit) log [DBG] : from='client.43539 -' entity='client.admin' cmd=[{"prefix": "osd df", "target": ["mon-mgr", ""], "format": "json-pretty"}]: dispatch
Jan 31 09:36:59 compute-0 ceph-mgr[74689]: log_channel(audit) log [DBG] : from='client.46052 -' entity='client.admin' cmd=[{"prefix": "osd df", "output_method": "tree", "target": ["mon-mgr", ""], "format": "json-pretty"}]: dispatch

● ceph-2f5ab832-5f2e-5a84-bd93-cf8bab960ee2@mon.compute-0.service - Ceph mon.compute-0 for 2f5ab832-5f2e-5a84-bd93-cf8bab960ee2
     Loaded: loaded (/etc/systemd/system/ceph-2f5ab832-5f2e-5a84-bd93-cf8bab960ee2@.service; enabled; preset: disabled)
     Active: active (running) since Sat 2026-01-31 07:19:48 UTC; 2h 17min ago
   Main PID: 74390 (conmon)
         IO: 1.9M read, 1.2G written
      Tasks: 27 (limit: 48560)
     Memory: 183.5M (peak: 200.3M)
        CPU: 2min 28.376s
     CGroup: /system.slice/system-ceph\x2d2f5ab832\x2d5f2e\x2d5a84\x2dbd93\x2dcf8bab960ee2.slice/ceph-2f5ab832-5f2e-5a84-bd93-cf8bab960ee2@mon.compute-0.service
             ├─libpod-payload-8a056797e460928a5aeedd3e2298bafe24e7f52c20f4a7487148750cf8988d86
             │ ├─74392 /run/podman-init -- /usr/bin/ceph-mon -n mon.compute-0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false
             │ └─74394 /usr/bin/ceph-mon -n mon.compute-0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false
             └─runtime
               └─74390 /usr/bin/conmon --api-version 1 -c 8a056797e460928a5aeedd3e2298bafe24e7f52c20f4a7487148750cf8988d86 -u 8a056797e460928a5aeedd3e2298bafe24e7f52c20f4a7487148750cf8988d86 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/8a056797e460928a5aeedd3e2298bafe24e7f52c20f4a7487148750cf8988d86/userdata -p /run/containers/storage/overlay-containers/8a056797e460928a5aeedd3e2298bafe24e7f52c20f4a7487148750cf8988d86/userdata/pidfile -n ceph-2f5ab832-5f2e-5a84-bd93-cf8bab960ee2-mon-compute-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/8a056797e460928a5aeedd3e2298bafe24e7f52c20f4a7487148750cf8988d86 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/8a056797e460928a5aeedd3e2298bafe24e7f52c20f4a7487148750cf8988d86/userdata/oci-log --conmon-pidfile /run/ceph-2f5ab832-5f2e-5a84-bd93-cf8bab960ee2@mon.compute-0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 8a056797e460928a5aeedd3e2298bafe24e7f52c20f4a7487148750cf8988d86

Jan 31 09:36:58 compute-0 ceph-mon[74394]: from='client.? 192.168.122.101:0/662448864' entity='client.admin' cmd=[{"prefix": "fs ls", "format": "json-pretty"}]: dispatch
Jan 31 09:36:58 compute-0 ceph-mon[74394]: from='client.? 192.168.122.102:0/2406645576' entity='client.admin' cmd=[{"prefix": "df", "format": "json-pretty"}]: dispatch
Jan 31 09:36:58 compute-0 ceph-mon[74394]: from='client.? 192.168.122.100:0/3837907227' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json-pretty"}]: dispatch
Jan 31 09:36:58 compute-0 ceph-mon[74394]: from='client.? 192.168.122.101:0/3656307438' entity='client.admin' cmd=[{"prefix": "mds stat", "format": "json-pretty"}]: dispatch
Jan 31 09:36:58 compute-0 ceph-mon[74394]: from='client.? 192.168.122.102:0/212475397' entity='client.admin' cmd=[{"prefix": "fs dump", "format": "json-pretty"}]: dispatch
Jan 31 09:36:58 compute-0 ceph-mon[74394]: mon.compute-0@0(leader) e3 handle_command mon_command({"prefix": "osd blocklist ls", "format": "json-pretty"} v 0) v1
Jan 31 09:36:58 compute-0 ceph-mon[74394]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/4105510240' entity='client.admin' cmd=[{"prefix": "osd blocklist ls", "format": "json-pretty"}]: dispatch
Jan 31 09:36:58 compute-0 ceph-mon[74394]: mon.compute-0@0(leader).osd e434 _set_new_cache_sizes cache_size:1020054731 inc_alloc: 343932928 full_alloc: 348127232 kv_alloc: 318767104
Jan 31 09:36:59 compute-0 ceph-mon[74394]: mon.compute-0@0(leader) e3 handle_command mon_command({"prefix": "osd dump", "format": "json-pretty"} v 0) v1
Jan 31 09:36:59 compute-0 ceph-mon[74394]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/2316675257' entity='client.admin' cmd=[{"prefix": "osd dump", "format": "json-pretty"}]: dispatch

● ceph-2f5ab832-5f2e-5a84-bd93-cf8bab960ee2@osd.0.service - Ceph osd.0 for 2f5ab832-5f2e-5a84-bd93-cf8bab960ee2
     Loaded: loaded (/etc/systemd/system/ceph-2f5ab832-5f2e-5a84-bd93-cf8bab960ee2@.service; enabled; preset: disabled)
     Active: active (running) since Sat 2026-01-31 07:21:59 UTC; 2h 14min ago
   Main PID: 84876 (conmon)
         IO: 3.4G read, 10.7G written
      Tasks: 60 (limit: 48560)
     Memory: 1.3G (peak: 1.6G)
        CPU: 2min 5.562s
     CGroup: /system.slice/system-ceph\x2d2f5ab832\x2d5f2e\x2d5a84\x2dbd93\x2dcf8bab960ee2.slice/ceph-2f5ab832-5f2e-5a84-bd93-cf8bab960ee2@osd.0.service
             ├─libpod-payload-af2e3815ded07052013af44e961eb30d6aebcbabf404f7e8579600fe4cb398c1
             │ ├─84878 /run/podman-init -- /usr/bin/ceph-osd -n osd.0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ └─84880 /usr/bin/ceph-osd -n osd.0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             └─runtime
               └─84876 /usr/bin/conmon --api-version 1 -c af2e3815ded07052013af44e961eb30d6aebcbabf404f7e8579600fe4cb398c1 -u af2e3815ded07052013af44e961eb30d6aebcbabf404f7e8579600fe4cb398c1 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/af2e3815ded07052013af44e961eb30d6aebcbabf404f7e8579600fe4cb398c1/userdata -p /run/containers/storage/overlay-containers/af2e3815ded07052013af44e961eb30d6aebcbabf404f7e8579600fe4cb398c1/userdata/pidfile -n ceph-2f5ab832-5f2e-5a84-bd93-cf8bab960ee2-osd-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/af2e3815ded07052013af44e961eb30d6aebcbabf404f7e8579600fe4cb398c1 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/af2e3815ded07052013af44e961eb30d6aebcbabf404f7e8579600fe4cb398c1/userdata/oci-log --conmon-pidfile /run/ceph-2f5ab832-5f2e-5a84-bd93-cf8bab960ee2@osd.0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg af2e3815ded07052013af44e961eb30d6aebcbabf404f7e8579600fe4cb398c1

Jan 31 09:36:35 compute-0 ceph-osd[84880]: do_command 'counter schema' '{prefix=counter schema}' result is 0 bytes
Jan 31 09:36:35 compute-0 ceph-osd[84880]: prioritycache tune_memory target: 4294967296 mapped: 416350208 unmapped: 75046912 heap: 491397120 old mem: 2845415832 new mem: 2845415832
Jan 31 09:36:35 compute-0 ceph-osd[84880]: monclient: tick
Jan 31 09:36:35 compute-0 ceph-osd[84880]: monclient: _check_auth_tickets
Jan 31 09:36:35 compute-0 ceph-osd[84880]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2026-01-31T09:36:04.343652+0000)
Jan 31 09:36:35 compute-0 ceph-osd[84880]: prioritycache tune_memory target: 4294967296 mapped: 416350208 unmapped: 75046912 heap: 491397120 old mem: 2845415832 new mem: 2845415832
Jan 31 09:36:35 compute-0 ceph-osd[84880]: monclient: tick
Jan 31 09:36:35 compute-0 ceph-osd[84880]: monclient: _check_auth_tickets
Jan 31 09:36:35 compute-0 ceph-osd[84880]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2026-01-31T09:36:05.343787+0000)
Jan 31 09:36:35 compute-0 ceph-osd[84880]: do_command 'log dump' '{prefix=log dump}'

● ceph-2f5ab832-5f2e-5a84-bd93-cf8bab960ee2@rgw.rgw.compute-0.pnpmok.service - Ceph rgw.rgw.compute-0.pnpmok for 2f5ab832-5f2e-5a84-bd93-cf8bab960ee2
     Loaded: loaded (/etc/systemd/system/ceph-2f5ab832-5f2e-5a84-bd93-cf8bab960ee2@.service; enabled; preset: disabled)
     Active: active (running) since Sat 2026-01-31 07:23:20 UTC; 2h 13min ago
   Main PID: 94347 (conmon)
         IO: 0B read, 8.6M written
      Tasks: 605 (limit: 48560)
     Memory: 126.5M (peak: 127.5M)
        CPU: 52.561s
     CGroup: /system.slice/system-ceph\x2d2f5ab832\x2d5f2e\x2d5a84\x2dbd93\x2dcf8bab960ee2.slice/ceph-2f5ab832-5f2e-5a84-bd93-cf8bab960ee2@rgw.rgw.compute-0.pnpmok.service
             ├─libpod-payload-3d2c60d2a43b72ac915708b2ea14f4ac8b5ff4660c01be5af30a821a7121dba9
             │ ├─94349 /run/podman-init -- /usr/bin/radosgw -n client.rgw.rgw.compute-0.pnpmok -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ └─94351 /usr/bin/radosgw -n client.rgw.rgw.compute-0.pnpmok -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             └─runtime
               └─94347 /usr/bin/conmon --api-version 1 -c 3d2c60d2a43b72ac915708b2ea14f4ac8b5ff4660c01be5af30a821a7121dba9 -u 3d2c60d2a43b72ac915708b2ea14f4ac8b5ff4660c01be5af30a821a7121dba9 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/3d2c60d2a43b72ac915708b2ea14f4ac8b5ff4660c01be5af30a821a7121dba9/userdata -p /run/containers/storage/overlay-containers/3d2c60d2a43b72ac915708b2ea14f4ac8b5ff4660c01be5af30a821a7121dba9/userdata/pidfile -n ceph-2f5ab832-5f2e-5a84-bd93-cf8bab960ee2-rgw-rgw-compute-0-pnpmok --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/3d2c60d2a43b72ac915708b2ea14f4ac8b5ff4660c01be5af30a821a7121dba9 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/3d2c60d2a43b72ac915708b2ea14f4ac8b5ff4660c01be5af30a821a7121dba9/userdata/oci-log --conmon-pidfile /run/ceph-2f5ab832-5f2e-5a84-bd93-cf8bab960ee2@rgw.rgw.compute-0.pnpmok.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 3d2c60d2a43b72ac915708b2ea14f4ac8b5ff4660c01be5af30a821a7121dba9

Jan 31 09:36:55 compute-0 radosgw[94351]: beast: 0x7fb0bb49a6f0: 192.168.122.100 - anonymous [31/Jan/2026:09:36:55.800 +0000] "HEAD / HTTP/1.0" 200 0 - - - latency=0.001000032s
Jan 31 09:36:56 compute-0 radosgw[94351]: ====== starting new request req=0x7fb0bb49a6f0 =====
Jan 31 09:36:56 compute-0 radosgw[94351]: ====== req done req=0x7fb0bb49a6f0 op status=0 http_status=200 latency=0.000000000s ======
Jan 31 09:36:56 compute-0 radosgw[94351]: beast: 0x7fb0bb49a6f0: 192.168.122.102 - anonymous [31/Jan/2026:09:36:56.458 +0000] "HEAD / HTTP/1.0" 200 0 - - - latency=0.000000000s
Jan 31 09:36:57 compute-0 radosgw[94351]: ====== starting new request req=0x7fb0bb49a6f0 =====
Jan 31 09:36:57 compute-0 radosgw[94351]: ====== req done req=0x7fb0bb49a6f0 op status=0 http_status=200 latency=0.001000032s ======
Jan 31 09:36:57 compute-0 radosgw[94351]: beast: 0x7fb0bb49a6f0: 192.168.122.100 - anonymous [31/Jan/2026:09:36:57.802 +0000] "HEAD / HTTP/1.0" 200 0 - - - latency=0.001000032s
Jan 31 09:36:58 compute-0 radosgw[94351]: ====== starting new request req=0x7fb0bb49a6f0 =====
Jan 31 09:36:58 compute-0 radosgw[94351]: ====== req done req=0x7fb0bb49a6f0 op status=0 http_status=200 latency=0.000000000s ======
Jan 31 09:36:58 compute-0 radosgw[94351]: beast: 0x7fb0bb49a6f0: 192.168.122.102 - anonymous [31/Jan/2026:09:36:58.460 +0000] "HEAD / HTTP/1.0" 200 0 - - - latency=0.000000000s

● ceph-osd-losetup-0.service - Ceph OSD losetup
     Loaded: loaded (/etc/systemd/system/ceph-osd-losetup-0.service; enabled; preset: disabled)
     Active: active (exited) since Sat 2026-01-31 07:19:15 UTC; 2h 17min ago
   Main PID: 72541 (code=exited, status=0/SUCCESS)
        CPU: 13ms

Jan 31 07:19:15 compute-0 systemd[1]: Starting Ceph OSD losetup...
Jan 31 07:19:15 compute-0 bash[72542]: /dev/loop3: [64513]:4355666 (/var/lib/ceph-osd-0.img)
Jan 31 07:19:15 compute-0 systemd[1]: Finished Ceph OSD losetup.

● chronyd.service - NTP client/server
     Loaded: loaded (/usr/lib/systemd/system/chronyd.service; enabled; preset: enabled)
     Active: active (running) since Sat 2026-01-31 07:17:13 UTC; 2h 19min ago
       Docs: man:chronyd(8)
             man:chrony.conf(5)
   Main PID: 58578 (chronyd)
         IO: 0B read, 8.0K written
      Tasks: 1 (limit: 48560)
     Memory: 1.0M (peak: 1.9M)
        CPU: 62ms
     CGroup: /system.slice/chronyd.service
             └─58578 /usr/sbin/chronyd -F 2

Jan 31 07:17:13 compute-0 systemd[1]: Starting NTP client/server...
Jan 31 07:17:13 compute-0 chronyd[58578]: chronyd version 4.8 starting (+CMDMON +REFCLOCK +RTC +PRIVDROP +SCFILTER +SIGND +NTS +SECHASH +IPV6 +DEBUG)
Jan 31 07:17:13 compute-0 chronyd[58578]: Frequency -31.982 +/- 0.094 ppm read from /var/lib/chrony/drift
Jan 31 07:17:13 compute-0 chronyd[58578]: Loaded seccomp filter (level 2)
Jan 31 07:17:13 compute-0 systemd[1]: Started NTP client/server.
Jan 31 07:19:22 compute-0 chronyd[58578]: Selected source 23.133.168.245 (pool.ntp.org)

● cloud-config.service - Cloud-init: Config Stage
     Loaded: loaded (/usr/lib/systemd/system/cloud-config.service; enabled; preset: disabled)
     Active: active (exited) since Sat 2026-01-31 06:25:03 UTC; 3h 11min ago
   Main PID: 995 (code=exited, status=0/SUCCESS)
        CPU: 369ms

● cloud-final.service - Cloud-init: Final Stage
     Loaded: loaded (/usr/lib/systemd/system/cloud-final.service; enabled; preset: disabled)
     Active: active (exited) since Sat 2026-01-31 06:25:03 UTC; 3h 11min ago
   Main PID: 1214 (code=exited, status=0/SUCCESS)
        CPU: 403ms

● cloud-init-local.service - Cloud-init: Local Stage (pre-network)
     Loaded: loaded (/usr/lib/systemd/system/cloud-init-local.service; enabled; preset: disabled)
     Active: active (exited) since Sat 2026-01-31 06:25:01 UTC; 3h 11min ago
   Main PID: 810 (code=exited, status=0/SUCCESS)
        CPU: 656ms

● cloud-init.service - Cloud-init: Network Stage
     Loaded: loaded (/usr/lib/systemd/system/cloud-init.service; enabled; preset: disabled)
     Active: active (exited) since Sat 2026-01-31 06:25:03 UTC; 3h 11min ago
   Main PID: 884 (code=exited, status=0/SUCCESS)
        CPU: 902ms

○ cpupower.service - Configure CPU power related settings
     Loaded: loaded (/usr/lib/systemd/system/cpupower.service; disabled; preset: disabled)
     Active: inactive (dead)

● crond.service - Command Scheduler
     Loaded: loaded (/usr/lib/systemd/system/crond.service; enabled; preset: enabled)
     Active: active (running) since Sat 2026-01-31 06:25:03 UTC; 3h 11min ago
   Main PID: 1002 (crond)
         IO: 160.0K read, 12.0K written
      Tasks: 1 (limit: 48560)
     Memory: 1.2M (peak: 5.0M)
        CPU: 227ms
     CGroup: /system.slice/crond.service
             └─1002 /usr/sbin/crond -n

Jan 31 07:31:01 compute-0 anacron[30939]: Job `cron.daily' started
Jan 31 07:31:01 compute-0 anacron[30939]: Job `cron.daily' terminated
Jan 31 07:51:01 compute-0 anacron[30939]: Job `cron.weekly' started
Jan 31 07:51:01 compute-0 anacron[30939]: Job `cron.weekly' terminated
Jan 31 08:01:01 compute-0 CROND[280490]: (root) CMD (run-parts /etc/cron.hourly)
Jan 31 08:11:01 compute-0 anacron[30939]: Job `cron.monthly' started
Jan 31 08:11:01 compute-0 anacron[30939]: Job `cron.monthly' terminated
Jan 31 08:11:01 compute-0 anacron[30939]: Normal exit (3 jobs run)
Jan 31 09:01:01 compute-0 CROND[388417]: (root) CMD (run-parts /etc/cron.hourly)
Jan 31 09:01:01 compute-0 CROND[388416]: (root) CMDEND (run-parts /etc/cron.hourly)

○ d068e3ccc9a9ec2248f269659070439009ec5c33951d360f88b13647cd275613-280d62f938195e00.service - /usr/bin/podman healthcheck run d068e3ccc9a9ec2248f269659070439009ec5c33951d360f88b13647cd275613
     Loaded: loaded (/run/systemd/transient/d068e3ccc9a9ec2248f269659070439009ec5c33951d360f88b13647cd275613-280d62f938195e00.service; transient)
  Transient: yes
     Active: inactive (dead) since Sat 2026-01-31 09:36:50 UTC; 8s ago
   Duration: 117ms
TriggeredBy: ● d068e3ccc9a9ec2248f269659070439009ec5c33951d360f88b13647cd275613-280d62f938195e00.timer
    Process: 447313 ExecStart=/usr/bin/podman healthcheck run d068e3ccc9a9ec2248f269659070439009ec5c33951d360f88b13647cd275613 (code=exited, status=0/SUCCESS)
   Main PID: 447313 (code=exited, status=0/SUCCESS)
        CPU: 72ms

Jan 31 09:36:50 compute-0 podman[447313]: 2026-01-31 09:36:50.754547364 +0000 UTC m=+0.106313286 container health_status d068e3ccc9a9ec2248f269659070439009ec5c33951d360f88b13647cd275613 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, managed_by=edpm_ansible, tcib_build_tag=b85d0548925081ae8c6bdd697658cec4, maintainer=OpenStack Kubernetes Operator team, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, io.buildah.version=1.41.3, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, tcib_managed=true, container_name=ovn_controller, org.label-schema.build-date=20260127, config_data=Unit display-manager.service could not be found.
{'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'EDPM_CONFIG_HASH': 'c9c07335481e70451acb503caf3b3b3a05811a07f9fde1e24aebece19089a266-25bdd24b66af043d77baba2a46a2d5dc0c63491fff70f82946d87e0106b3878e-25bdd24b66af043d77baba2a46a2d5dc0c63491fff70f82946d87e0106b3878e-25bdd24b66af043d77baba2a46a2d5dc0c63491fff70f82946d87e0106b3878e'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, config_id=ovn_controller)

● dbus-broker.service - D-Bus System Message Bus
     Loaded: loaded (/usr/lib/systemd/system/dbus-broker.service; enabled; preset: enabled)
     Active: active (running) since Sat 2026-01-31 06:25:00 UTC; 3h 11min ago
TriggeredBy: ● dbus.socket
       Docs: man:dbus-broker-launch(1)
   Main PID: 791 (dbus-broker-lau)
         IO: 0B read, 0B written
      Tasks: 2 (limit: 48560)
     Memory: 2.9M (peak: 3.7M)
        CPU: 9.121s
     CGroup: /system.slice/dbus-broker.service
             ├─791 /usr/bin/dbus-broker-launch --scope system --audit
             └─808 dbus-broker --log 4 --controller 9 --machine-id bf0bc0bb03de29b24cba1cc9599cf5d0 --max-bytes 536870912 --max-fds 4096 --max-matches 131072 --audit

Jan 31 07:14:22 compute-0 dbus-broker-launch[791]: Noticed file-system modification, trigger reload.
Jan 31 07:15:08 compute-0 dbus-broker-launch[808]: avc:  op=load_policy lsm=selinux seqno=7 res=1
Jan 31 07:15:23 compute-0 dbus-broker-launch[808]: avc:  op=load_policy lsm=selinux seqno=8 res=1
Jan 31 07:30:23 compute-0 dbus-broker-launch[808]: avc:  op=load_policy lsm=selinux seqno=9 res=1
Jan 31 07:33:45 compute-0 dbus-broker-launch[808]: avc:  op=load_policy lsm=selinux seqno=10 res=1
Jan 31 07:33:52 compute-0 dbus-broker-launch[808]: avc:  op=load_policy lsm=selinux seqno=11 res=1
Jan 31 07:34:30 compute-0 dbus-broker-launch[808]: avc:  op=load_policy lsm=selinux seqno=12 res=1
Jan 31 07:34:31 compute-0 dbus-broker-launch[791]: Noticed file-system modification, trigger reload.
Jan 31 07:34:31 compute-0 dbus-broker-launch[791]: Noticed file-system modification, trigger reload.
Jan 31 07:36:06 compute-0 dbus-broker-launch[808]: avc:  op=load_policy lsm=selinux seqno=13 res=1

○ dm-event.service - Device-mapper event daemon
     Loaded: loaded (/usr/lib/systemd/system/dm-event.service; static)
     Active: inactive (dead)
TriggeredBy: ● dm-event.socket
       Docs: man:dmeventd(8)

○ dnf-makecache.service - dnf makecache
     Loaded: loaded (/usr/lib/systemd/system/dnf-makecache.service; static)
     Active: inactive (dead) since Sat 2026-01-31 08:36:21 UTC; 1h 0min ago
TriggeredBy: ● dnf-makecache.timer
    Process: 345135 ExecStart=/usr/bin/dnf makecache --timer (code=exited, status=0/SUCCESS)
   Main PID: 345135 (code=exited, status=0/SUCCESS)
        CPU: 259ms

Jan 31 08:36:21 compute-0 systemd[1]: Starting dnf makecache...
Jan 31 08:36:21 compute-0 dnf[345135]: Metadata cache refreshed recently.
Jan 31 08:36:21 compute-0 systemd[1]: dnf-makecache.service: Deactivated successfully.
Jan 31 08:36:21 compute-0 systemd[1]: Finished dnf makecache.

○ dracut-cmdline.service - dracut cmdline hook
     Loaded: loaded (/usr/lib/systemd/system/dracut-cmdline.service; static)
     Active: inactive (dead) since Sat 2026-01-31 06:24:58 UTC; 3h 12min ago
   Duration: 1.442s
       Docs: man:dracut-cmdline.service(8)
             man:dracut.bootup(7)
   Main PID: 324 (code=exited, status=0/SUCCESS)
        CPU: 89ms

Jan 31 06:24:56 localhost systemd[1]: Starting dracut cmdline hook...
Jan 31 06:24:56 localhost dracut-cmdline[324]: dracut-9 dracut-057-102.git20250818.el9
Jan 31 06:24:56 localhost dracut-cmdline[324]: Using kernel command line parameters:    BOOT_IMAGE=(hd0,msdos1)/boot/vmlinuz-5.14.0-665.el9.x86_64 root=UUID=822f14ea-6e7e-41df-b0d8-fbe282d9ded8 ro console=ttyS0,115200n8 no_timer_check net.ifnames=0 crashkernel=1G-2G:192M,2G-64G:256M,64G-:512M
Jan 31 06:24:56 localhost systemd[1]: Finished dracut cmdline hook.
Jan 31 06:24:58 localhost systemd[1]: dracut-cmdline.service: Deactivated successfully.
Jan 31 06:24:58 localhost systemd[1]: Stopped dracut cmdline hook.

○ dracut-initqueue.service - dracut initqueue hook
     Loaded: loaded (/usr/lib/systemd/system/dracut-initqueue.service; static)
     Active: inactive (dead) since Sat 2026-01-31 06:24:58 UTC; 3h 12min ago
   Duration: 765ms
       Docs: man:dracut-initqueue.service(8)
             man:dracut.bootup(7)
   Main PID: 500 (code=exited, status=0/SUCCESS)
        CPU: 21ms

Jan 31 06:24:57 localhost systemd[1]: Starting dracut initqueue hook...
Jan 31 06:24:57 localhost systemd[1]: Finished dracut initqueue hook.
Jan 31 06:24:58 localhost systemd[1]: dracut-initqueue.service: Deactivated successfully.
Jan 31 06:24:58 localhost systemd[1]: Stopped dracut initqueue hook.

○ dracut-mount.service - dracut mount hook
     Loaded: loaded (/usr/lib/systemd/system/dracut-mount.service; static)
     Active: inactive (dead) since Sat 2026-01-31 06:24:58 UTC; 3h 12min ago
   Duration: 113ms
       Docs: man:dracut-mount.service(8)
             man:dracut.bootup(7)
   Main PID: 568 (code=exited, status=0/SUCCESS)
        CPU: 10ms

Jan 31 06:24:58 localhost systemd[1]: Starting dracut mount hook...
Jan 31 06:24:58 localhost systemd[1]: Finished dracut mount hook.
Jan 31 06:24:58 localhost systemd[1]: dracut-mount.service: Deactivated successfully.
Jan 31 06:24:58 localhost systemd[1]: Stopped dracut mount hook.

○ dracut-pre-mount.service - dracut pre-mount hook
     Loaded: loaded (/usr/lib/systemd/system/dracut-pre-mount.service; static)
     Active: inactive (dead) since Sat 2026-01-31 06:24:58 UTC; 3h 12min ago
   Duration: 727ms
       Docs: man:dracut-pre-mount.service(8)
             man:dracut.bootup(7)
   Main PID: 546 (code=exited, status=0/SUCCESS)
        CPU: 7ms

Jan 31 06:24:57 localhost systemd[1]: Starting dracut pre-mount hook...
Jan 31 06:24:57 localhost systemd[1]: Finished dracut pre-mount hook.
Jan 31 06:24:58 localhost systemd[1]: dracut-pre-mount.service: Deactivated successfully.
Jan 31 06:24:58 localhost systemd[1]: Stopped dracut pre-mount hook.

○ dracut-pre-pivot.service - dracut pre-pivot and cleanup hook
     Loaded: loaded (/usr/lib/systemd/system/dracut-pre-pivot.service; static)
     Active: inactive (dead) since Sat 2026-01-31 06:24:58 UTC; 3h 12min ago
   Duration: 32ms
       Docs: man:dracut-pre-pivot.service(8)
             man:dracut.bootup(7)
   Main PID: 573 (code=exited, status=0/SUCCESS)
        CPU: 56ms

Jan 31 06:24:58 localhost systemd[1]: Starting dracut pre-pivot and cleanup hook...
Jan 31 06:24:58 localhost systemd[1]: Finished dracut pre-pivot and cleanup hook.
Jan 31 06:24:58 localhost systemd[1]: dracut-pre-pivot.service: Deactivated successfully.
Jan 31 06:24:58 localhost systemd[1]: Stopped dracut pre-pivot and cleanup hook.

○ dracut-pre-trigger.service - dracut pre-trigger hook
     Loaded: loaded (/usr/lib/systemd/system/dracut-pre-trigger.service; static)
     Active: inactive (dead) since Sat 2026-01-31 06:24:58 UTC; 3h 12min ago
   Duration: 1.184s
       Docs: man:dracut-pre-trigger.service(8)
             man:dracut.bootup(7)
   Main PID: 465 (code=exited, status=0/SUCCESS)
        CPU: 13ms

Jan 31 06:24:56 localhost systemd[1]: Starting dracut pre-trigger hook...
Jan 31 06:24:57 localhost systemd[1]: Finished dracut pre-trigger hook.
Jan 31 06:24:58 localhost systemd[1]: dracut-pre-trigger.service: Deactivated successfully.
Jan 31 06:24:58 localhost systemd[1]: Stopped dracut pre-trigger hook.

○ dracut-pre-udev.service - dracut pre-udev hook
     Loaded: loaded (/usr/lib/systemd/system/dracut-pre-udev.service; static)
     Active: inactive (dead) since Sat 2026-01-31 06:24:58 UTC; 3h 12min ago
   Duration: 1.244s
       Docs: man:dracut-pre-udev.service(8)
             man:dracut.bootup(7)
   Main PID: 413 (code=exited, status=0/SUCCESS)
        CPU: 195ms

Jan 31 06:24:56 localhost systemd[1]: Starting dracut pre-udev hook...
Jan 31 06:24:56 localhost rpc.statd[441]: Version 2.5.4 starting
Jan 31 06:24:56 localhost rpc.statd[441]: Initializing NSM state
Jan 31 06:24:56 localhost rpc.idmapd[446]: Setting log level to 0
Jan 31 06:24:56 localhost systemd[1]: Finished dracut pre-udev hook.
Jan 31 06:24:58 localhost rpc.idmapd[446]: exiting on signal 15
Jan 31 06:24:58 localhost systemd[1]: dracut-pre-udev.service: Deactivated successfully.
Jan 31 06:24:58 localhost systemd[1]: Stopped dracut pre-udev hook.

○ dracut-shutdown-onfailure.service - Service executing upon dracut-shutdown failure to perform cleanup
     Loaded: loaded (/usr/lib/systemd/system/dracut-shutdown-onfailure.service; static)
     Active: inactive (dead)
       Docs: man:dracut-shutdown.service(8)

● dracut-shutdown.service - Restore /run/initramfs on shutdown
     Loaded: loaded (/usr/lib/systemd/system/dracut-shutdown.service; static)
     Active: active (exited) since Sat 2026-01-31 06:25:00 UTC; 3h 11min ago
       Docs: man:dracut-shutdown.service(8)
   Main PID: 811 (code=exited, status=0/SUCCESS)
        CPU: 2ms

● edpm-container-shutdown.service - EDPM Container Shutdown
     Loaded: loaded (/etc/systemd/system/edpm-container-shutdown.service; enabled; preset: enabled)
     Active: active (exited) since Sat 2026-01-31 07:17:40 UTC; 2h 19min ago
       Docs: https://github.com/openstack-k8s-operators/docs
   Main PID: 61572 (code=exited, status=0/SUCCESS)
        CPU: 3ms

Jan 31 07:17:40 compute-0 systemd[1]: Starting EDPM Container Shutdown...
Jan 31 07:17:40 compute-0 systemd[1]: Finished EDPM Container Shutdown.

○ edpm_libvirt_guests.service - Suspend libvirt Guests in edpm
     Loaded: loaded (/etc/systemd/system/edpm_libvirt_guests.service; enabled; preset: disabled)
     Active: inactive (dead)
       Docs: man:libvirtd(8)
             https://libvirt.org

● edpm_nova_compute.service - nova_compute container
     Loaded: loaded (/etc/systemd/system/edpm_nova_compute.service; enabled; preset: disabled)
     Active: active (running) since Sat 2026-01-31 07:40:00 UTC; 1h 56min ago
    Process: 247384 ExecStart=/usr/bin/podman start nova_compute (code=exited, status=0/SUCCESS)
   Main PID: 247399 (conmon)
         IO: 0B read, 100.0K written
      Tasks: 1 (limit: 48560)
     Memory: 684.0K (peak: 16.5M)
        CPU: 3.062s
     CGroup: /system.slice/edpm_nova_compute.service
             └─247399 /usr/bin/conmon --api-version 1 -c 16f3cf77eee09ab7709c67d820482fbb1b64354510093143a021a8f1ec2ccd87 -u 16f3cf77eee09ab7709c67d820482fbb1b64354510093143a021a8f1ec2ccd87 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/16f3cf77eee09ab7709c67d820482fbb1b64354510093143a021a8f1ec2ccd87/userdata -p /run/containers/storage/overlay-containers/16f3cf77eee09ab7709c67d820482fbb1b64354510093143a021a8f1ec2ccd87/userdata/pidfile -n nova_compute --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/16f3cf77eee09ab7709c67d820482fbb1b64354510093143a021a8f1ec2ccd87 --full-attach -s -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/16f3cf77eee09ab7709c67d820482fbb1b64354510093143a021a8f1ec2ccd87/userdata/oci-log --conmon-pidfile /run/nova_compute.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg 16f3cf77eee09ab7709c67d820482fbb1b64354510093143a021a8f1ec2ccd87

Jan 31 09:36:56 compute-0 nova_compute[247399]: 2026-01-31 09:36:56.128 247403 DEBUG oslo_service.periodic_task [None req-85be7847-7ccd-4892-a7aa-161de3959a02 - - - - - -] Running periodic task ComputeManager._reclaim_queued_deletes run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210[00m
Jan 31 09:36:56 compute-0 nova_compute[247399]: 2026-01-31 09:36:56.129 247403 DEBUG nova.compute.manager [None req-85be7847-7ccd-4892-a7aa-161de3959a02 - - - - - -] CONF.reclaim_instance_interval <= 0, skipping... _reclaim_queued_deletes /usr/lib/python3.9/site-packages/nova/compute/manager.py:10477[00m
Jan 31 09:36:57 compute-0 nova_compute[247399]: 2026-01-31 09:36:57.393 247403 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263[00m
Jan 31 09:36:58 compute-0 nova_compute[247399]: 2026-01-31 09:36:58.193 247403 DEBUG oslo_service.periodic_task [None req-85be7847-7ccd-4892-a7aa-161de3959a02 - - - - - -] Running periodic task ComputeManager._check_instance_build_time run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210[00m
Jan 31 09:36:58 compute-0 nova_compute[247399]: 2026-01-31 09:36:58.197 247403 DEBUG oslo_service.periodic_task [None req-85be7847-7ccd-4892-a7aa-161de3959a02 - - - - - -] Running periodic task ComputeManager._heal_instance_info_cache run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210[00m
Jan 31 09:36:58 compute-0 nova_compute[247399]: 2026-01-31 09:36:58.198 247403 DEBUG nova.compute.manager [None req-85be7847-7ccd-4892-a7aa-161de3959a02 - - - - - -] Starting heal instance info cache _heal_instance_info_cache /usr/lib/python3.9/site-packages/nova/compute/manager.py:9858[00m
Jan 31 09:36:58 compute-0 nova_compute[247399]: 2026-01-31 09:36:58.198 247403 DEBUG nova.compute.manager [None req-85be7847-7ccd-4892-a7aa-161de3959a02 - - - - - -] Rebuilding the list of instances to heal _heal_instance_info_cache /usr/lib/python3.9/site-packages/nova/compute/manager.py:9862[00m
Jan 31 09:36:58 compute-0 nova_compute[247399]: 2026-01-31 09:36:58.354 247403 DEBUG nova.compute.manager [None req-85be7847-7ccd-4892-a7aa-161de3959a02 - - - - - -] Didn't find any instances for network info cache update. _heal_instance_info_cache /usr/lib/python3.9/site-packages/nova/compute/manager.py:9944[00m
Jan 31 09:36:59 compute-0 nova_compute[247399]: 2026-01-31 09:36:59.197 247403 DEBUG oslo_service.periodic_task [None req-85be7847-7ccd-4892-a7aa-161de3959a02 - - - - - -] Running periodic task ComputeManager._poll_rebooting_instances run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210[00m
Jan 31 09:36:59 compute-0 nova_compute[247399]: 2026-01-31 09:36:59.198 247403 DEBUG oslo_service.periodic_task [None req-85be7847-7ccd-4892-a7aa-161de3959a02 - - - - - -] Running periodic task ComputeManager._poll_volume_usage run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210[00m

● edpm_ovn_controller.service - ovn_controller container
     Loaded: loaded (/etc/systemd/system/edpm_ovn_controller.service; enabled; preset: disabled)
     Active: active (running) since Sat 2026-01-31 07:31:19 UTC; 2h 5min ago
   Main PID: 149152 (conmon)
         IO: 0B read, 120.0K written
      Tasks: 1 (limit: 48560)
     Memory: 696.0K (peak: 18.7M)
        CPU: 353ms
     CGroup: /system.slice/edpm_ovn_controller.service
             └─149152 /usr/bin/conmon --api-version 1 -c d068e3ccc9a9ec2248f269659070439009ec5c33951d360f88b13647cd275613 -u d068e3ccc9a9ec2248f269659070439009ec5c33951d360f88b13647cd275613 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/d068e3ccc9a9ec2248f269659070439009ec5c33951d360f88b13647cd275613/userdata -p /run/containers/storage/overlay-containers/d068e3ccc9a9ec2248f269659070439009ec5c33951d360f88b13647cd275613/userdata/pidfile -n ovn_controller --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/d068e3ccc9a9ec2248f269659070439009ec5c33951d360f88b13647cd275613 --full-attach -s -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/d068e3ccc9a9ec2248f269659070439009ec5c33951d360f88b13647cd275613/userdata/oci-log --conmon-pidfile /run/ovn_controller.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg d068e3ccc9a9ec2248f269659070439009ec5c33951d360f88b13647cd275613

Jan 31 09:16:50 compute-0 ovn_controller[149152]: 2026-01-31T09:16:50Z|00930|binding|INFO|Releasing lport 5bebd274-c8f9-4e5f-96fa-6c8eecac7fa3 from this chassis (sb_readonly=0)
Jan 31 09:16:54 compute-0 ovn_controller[149152]: 2026-01-31T09:16:54Z|00931|binding|INFO|Releasing lport 5bebd274-c8f9-4e5f-96fa-6c8eecac7fa3 from this chassis (sb_readonly=0)
Jan 31 09:17:05 compute-0 ovn_controller[149152]: 2026-01-31T09:17:05Z|00130|pinctrl(ovn_pinctrl0)|INFO|DHCPOFFER fa:16:3e:b8:fa:d2 10.100.0.12
Jan 31 09:17:05 compute-0 ovn_controller[149152]: 2026-01-31T09:17:05Z|00131|pinctrl(ovn_pinctrl0)|INFO|DHCPACK fa:16:3e:b8:fa:d2 10.100.0.12
Jan 31 09:17:24 compute-0 ovn_controller[149152]: 2026-01-31T09:17:24Z|00932|memory_trim|INFO|Detected inactivity (last active 30005 ms ago): trimming memory
Jan 31 09:18:16 compute-0 ovn_controller[149152]: 2026-01-31T09:18:16Z|00933|memory_trim|INFO|Detected inactivity (last active 30027 ms ago): trimming memory
Jan 31 09:18:50 compute-0 ovn_controller[149152]: 2026-01-31T09:18:50Z|00934|binding|INFO|Releasing lport 6c698385-414a-410d-8bd1-082bba741f94 from this chassis (sb_readonly=0)
Jan 31 09:18:50 compute-0 ovn_controller[149152]: 2026-01-31T09:18:50Z|00935|binding|INFO|Setting lport 6c698385-414a-410d-8bd1-082bba741f94 down in Southbound
Jan 31 09:18:50 compute-0 ovn_controller[149152]: 2026-01-31T09:18:50Z|00936|binding|INFO|Removing iface tap6c698385-41 ovn-installed in OVS
Jan 31 09:19:40 compute-0 ovn_controller[149152]: 2026-01-31T09:19:40Z|00937|memory_trim|INFO|Detected inactivity (last active 30018 ms ago): trimming memory

● edpm_ovn_metadata_agent.service - ovn_metadata_agent container
     Loaded: loaded (/etc/systemd/system/edpm_ovn_metadata_agent.service; enabled; preset: disabled)
     Active: active (running) since Sat 2026-01-31 07:32:28 UTC; 2h 4min ago
   Main PID: 159729 (conmon)
         IO: 0B read, 96.5K written
      Tasks: 1 (limit: 48560)
     Memory: 712.0K (peak: 21.6M)
        CPU: 634ms
     CGroup: /system.slice/edpm_ovn_metadata_agent.service
             └─159729 /usr/bin/conmon --api-version 1 -c 1ae13fee90b46f5eb19bd9030483f3244c23c433b8f4e46eb5cc0bd2c6a35d52 -u 1ae13fee90b46f5eb19bd9030483f3244c23c433b8f4e46eb5cc0bd2c6a35d52 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/1ae13fee90b46f5eb19bd9030483f3244c23c433b8f4e46eb5cc0bd2c6a35d52/userdata -p /run/containers/storage/overlay-containers/1ae13fee90b46f5eb19bd9030483f3244c23c433b8f4e46eb5cc0bd2c6a35d52/userdata/pidfile -n ovn_metadata_agent --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/1ae13fee90b46f5eb19bd9030483f3244c23c433b8f4e46eb5cc0bd2c6a35d52 --full-attach -s -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/1ae13fee90b46f5eb19bd9030483f3244c23c433b8f4e46eb5cc0bd2c6a35d52/userdata/oci-log --conmon-pidfile /run/ovn_metadata_agent.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg 1ae13fee90b46f5eb19bd9030483f3244c23c433b8f4e46eb5cc0bd2c6a35d52

Jan 31 09:33:30 compute-0 ovn_metadata_agent[159729]: 2026-01-31 09:33:30.580 159734 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" "released" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: held 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423[00m
Jan 31 09:34:30 compute-0 ovn_metadata_agent[159729]: 2026-01-31 09:34:30.580 159734 DEBUG oslo_concurrency.lockutils [-] Acquiring lock "_check_child_processes" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404[00m
Jan 31 09:34:30 compute-0 ovn_metadata_agent[159729]: 2026-01-31 09:34:30.580 159734 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" acquired by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: waited 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409[00m
Jan 31 09:34:30 compute-0 ovn_metadata_agent[159729]: 2026-01-31 09:34:30.581 159734 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" "released" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: held 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423[00m
Jan 31 09:35:30 compute-0 ovn_metadata_agent[159729]: 2026-01-31 09:35:30.581 159734 DEBUG oslo_concurrency.lockutils [-] Acquiring lock "_check_child_processes" by "neutron.agent.linux.external_process.ProcessMonitor._chUnit fcoe.service could not be found.
Unit hv_kvp_daemon.service could not be found.
eck_child_processes" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404[00m
Jan 31 09:35:30 compute-0 ovn_metadata_agent[159729]: 2026-01-31 09:35:30.582 159734 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" acquired by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409[00m
Jan 31 09:35:30 compute-0 ovn_metadata_agent[159729]: 2026-01-31 09:35:30.582 159734 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" "released" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: held 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423[00m
Jan 31 09:36:30 compute-0 ovn_metadata_agent[159729]: 2026-01-31 09:36:30.583 159734 DEBUG oslo_concurrency.lockutils [-] Acquiring lock "_check_child_processes" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404[00m
Jan 31 09:36:30 compute-0 ovn_metadata_agent[159729]: 2026-01-31 09:36:30.583 159734 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" acquired by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409[00m
Jan 31 09:36:30 compute-0 ovn_metadata_agent[159729]: 2026-01-31 09:36:30.584 159734 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" "released" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: held 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423[00m

○ emergency.service - Emergency Shell
     Loaded: loaded (/usr/lib/systemd/system/emergency.service; static)
     Active: inactive (dead)
       Docs: man:sulogin(8)

● getty@tty1.service - Getty on tty1
     Loaded: loaded (/usr/lib/systemd/system/getty@.service; enabled; preset: enabled)
     Active: active (running) since Sat 2026-01-31 06:25:03 UTC; 3h 11min ago
       Docs: man:agetty(8)
             man:systemd-getty-generator(8)
             http://0pointer.de/blog/projects/serial-console.html
   Main PID: 1003 (agetty)
         IO: 0B read, 0B written
      Tasks: 1 (limit: 48560)
     Memory: 228.0K (peak: 560.0K)
        CPU: 7ms
     CGroup: /system.slice/system-getty.slice/getty@tty1.service
             └─1003 /sbin/agetty -o "-p -- \\u" --noclear - linux

● gssproxy.service - GSSAPI Proxy Daemon
     Loaded: loaded (/usr/lib/systemd/system/gssproxy.service; disabled; preset: disabled)
     Active: active (running) since Sat 2026-01-31 06:25:01 UTC; 3h 11min ago
   Main PID: 871 (gssproxy)
         IO: 0B read, 0B written
      Tasks: 6 (limit: 48560)
     Memory: 1.8M (peak: 3.5M)
        CPU: 22ms
     CGroup: /system.slice/gssproxy.service
             └─871 /usr/sbin/gssproxy -D

○ import-state.service - Import network configuration from initramfs
     Loaded: loaded (/usr/lib/systemd/system/import-state.service; enabled; preset: enabled)
     Active: inactive (dead)

○ initrd-cleanup.service - Cleaning Up and Shutting Down Daemons
     Loaded: loaded (/usr/lib/systemd/system/initrd-cleanup.service; static)
     Active: inactive (dead) since Sat 2026-01-31 06:24:58 UTC; 3h 12min ago
   Main PID: 614 (code=exited, status=0/SUCCESS)
        CPU: 10ms

Jan 31 06:24:58 localhost systemd[1]: Starting Cleaning Up and Shutting Down Daemons...
Jan 31 06:24:58 localhost systemd[1]: initrd-cleanup.service: Deactivated successfully.
Jan 31 06:24:58 localhost systemd[1]: Finished Cleaning Up and Shutting Down Daemons.

○ initrd-parse-etc.service - Mountpoints Configured in the Real Root
     Loaded: loaded (/usr/lib/systemd/system/initrd-parse-etc.service; static)
     Active: inactive (dead) since Sat 2026-01-31 06:24:58 UTC; 3h 12min ago
   Main PID: 567 (code=exited, status=0/SUCCESS)
        CPU: 11ms

Jan 31 06:24:58 localhost systemd[1]: Starting Mountpoints Configured in the Real Root...
Jan 31 06:24:58 localhost systemd[1]: initrd-parse-etc.service: Deactivated successfully.
Jan 31 06:24:58 localhost systemd[1]: Finished Mountpoints Configured in the Real Root.

○ initrd-switch-root.service - Switch Root
     Loaded: loaded (/usr/lib/systemd/system/initrd-switch-root.service; static)
     Active: inactive (dead) since Sat 2026-01-31 06:24:59 UTC; 3h 12min ago
   Main PID: 618 (code=exited, status=0/SUCCESS)
        CPU: 4ms

Jan 31 06:24:58 localhost systemd[1]: Starting Switch Root...

○ initrd-udevadm-cleanup-db.service - Cleanup udev Database
     Loaded: loaded (/usr/lib/systemd/system/initrd-udevadm-cleanup-db.service; static)
     Active: inactive (dead) since Sat 2026-01-31 06:24:58 UTC; 3h 12min ago
   Main PID: 616 (code=exited, status=0/SUCCESS)
        CPU: 5ms

Jan 31 06:24:58 localhost systemd[1]: Starting Cleanup udev Database...
Jan 31 06:24:58 localhost systemd[1]: initrd-udevadm-cleanup-db.service: Deactivated successfully.
Jan 31 06:24:58 localhost systemd[1]: Finished Cleanup udev Database.

○ ip6tables.service - IPv6 firewall with ip6tables
     Loaded: loaded (/usr/lib/systemd/system/ip6tables.service; disabled; preset: disabled)
     Active: inactive (dead)

○ iptables.service - IPv4 firewall with iptables
     Loaded: loaded (/usr/lib/systemd/system/iptables.service; disabled; preset: disabled)
     Active: inactive (dead) since Sat 2026-01-31 07:17:49 UTC; 2h 19min ago
   Duration: 52min 49.103s
   Main PID: 812 (code=exited, status=0/SUCCESS)
        CPU: 90ms

Jan 31 07:17:49 compute-0 systemd[1]: Stopping IPv4 firewall with iptables...
Jan 31 07:17:49 compute-0 iptables.init[62823]: iptables: Setting chains to policy ACCEPT: raw mangle filter nat [  OK  ]
Jan 31 07:17:49 compute-0 iptables.init[62823]: iptables: Flushing firewall rules: [  OK  ]
Jan 31 07:17:49 compute-0 systemd[1]: iptables.service: Deactivated successfully.
Jan 31 07:17:49 compute-0 systemd[1]: Stopped IPv4 firewall with iptables.

● irqbalance.service - irqbalance daemon
     Loaded: loaded (/usr/lib/systemd/system/irqbalance.service; enabled; preset: enabled)
     Active: active (running) since Sat 2026-01-31 06:25:00 UTC; 3h 11min ago
       Docs: man:irqbalance(1)
             https://github.com/Irqbalance/irqbalance
   Main PID: 813 (irqbalance)
         IO: 0B read, 0B written
      Tasks: 2 (limit: 48560)
     Memory: 1.1M (peak: 1.6M)
        CPU: 662ms
     CGroup: /system.slice/irqbalance.service
             └─813 /usr/sbin/irqbalance

○ iscsi-init.service - One time configuration for iscsi.service
     Loaded: loaded (/usr/lib/systemd/system/iscsi-init.service; disabled; preset: disabled)
     Active: inactive (dead)
  Condition: start condition failed at Sat 2026-01-31 07:38:00 UTC; 1h 58min ago

Jan 31 07:37:18 compute-0 systemd[1]: One time configuration for iscsi.service was skipped because of an unmet condition check (ConditionPathExists=!/etc/iscsi/initiatorname.iscsi).
Jan 31 07:38:00 compute-0 systemd[1]: One time configuration for iscsi.service was skipped because of an unmet condition check (ConditionPathExists=!/etc/iscsi/initiatorname.iscsi).

○ iscsi-onboot.service - Special handling of early boot iSCSI sessions
     Loaded: loaded (/usr/lib/systemd/system/iscsi-onboot.service; enabled; preset: enabled)
     Active: inactive (dead)
       Docs: man:iscsiadm(8)
             man:iscsid(8)

● iscsi-shutdown.service - Logout off all iSCSI sessions on shutdown
     Loaded: loaded (/usr/lib/systemd/system/iscsi-shutdown.service; static)
     Active: active (exited) since Sat 2026-01-31 07:37:18 UTC; 1h 59min ago
       Docs: man:iscsid(8)
             man:iscsiadm(8)
   Main PID: 223440 (code=exited, status=0/SUCCESS)
        CPU: 3ms

Jan 31 07:37:18 compute-0 systemd[1]: Starting Logout off all iSCSI sessions on shutdown...
Jan 31 07:37:18 compute-0 systemd[1]: Finished Logout off all iSCSI sessions on shutdown.

○ iscsi-starter.service
     Loaded: loaded (/usr/lib/systemd/system/iscsi-starter.service; enabled; preset: disabled)
     AUnit lvm2-activation-early.service could not be found.
ctive: inactive (dead)

○ iscsi.service - Login and scanning of iSCSI devices
     Loaded: loaded (/usr/lib/systemd/system/iscsi.service; indirect; preset: enabled)
     Active: inactive (dead)
       Docs: man:iscsiadm(8)
             man:iscsid(8)

● iscsid.service - Open-iSCSI
     Loaded: loaded (/usr/lib/systemd/system/iscsid.service; enabled; preset: disabled)
     Active: active (running) since Sat 2026-01-31 07:38:00 UTC; 1h 58min ago
TriggeredBy: ● iscsid.socket
       Docs: man:iscsid(8)
             man:iscsiuio(8)
             man:iscsiadm(8)
   Main PID: 230940 (iscsid)
     Status: "Ready to process requests"
         IO: 0B read, 0B written
      Tasks: 1 (limit: 48560)
     Memory: 1.9M (peak: 2.0M)
        CPU: 8ms
     CGroup: /system.slice/iscsid.service
             └─230940 /usr/sbin/iscsid -f

Jan 31 07:38:00 compute-0 systemd[1]: Starting Open-iSCSI...
Jan 31 07:38:00 compute-0 systemd[1]: Started Open-iSCSI.

○ iscsiuio.service - iSCSI UserSpace I/O driver
     Loaded: loaded (/usr/lib/systemd/system/iscsiuio.service; disabled; preset: disabled)
     Active: inactive (dead)
TriggeredBy: ○ iscsiuio.socket
       Docs: man:iscsiuio(8)

● kdump.service - Crash recovery kernel arming
     Loaded: loaded (/usr/lib/systemd/system/kdump.service; enabled; preset: enabled)
     Active: active (exited) since Sat 2026-01-31 06:25:12 UTC; 3h 11min ago
   Main PID: 1001 (code=exited, status=0/SUCCESS)
        CPU: 14.436s

● kmod-static-nodes.service - Create List of Static Device Nodes
     Loaded: loaded (/usr/lib/systemd/system/kmod-static-nodes.service; static)
     Active: active (exited) since Sat 2026-01-31 06:24:59 UTC; 3h 12min ago
   Main PID: 668 (code=exited, status=0/SUCCESS)
        CPU: 3ms

● ldconfig.service - Rebuild Dynamic Linker Cache
     Loaded: loaded (/usr/lib/systemd/system/ldconfig.service; static)
     Active: active (exited) since Sat 2026-01-31 06:25:00 UTC; 3h 11min ago
       Docs: man:ldconfig(8)
   Main PID: 690 (code=exited, status=0/SUCCESS)
        CPU: 43ms

○ libvirtd.service
     Loaded: masked (Reason: Unit libvirtd.service is masked.)
     Active: inactive (dead)
TriggeredBy: ○ libvirtd.socket
             ○ libvirtd-admin.socket
             ○ libvirtd-ro.socket

○ loadmodules.service - Load legacy module configuration
     Loaded: loaded (/usr/lib/systemd/system/loadmodules.service; enabled; preset: enabled)
     Active: inactive (dead)

○ logrotate.service - Rotate log files
     Loaded: loaded (/usr/lib/systemd/system/logrotate.service; static)
     Active: inactive (dead)
TriggeredBy: ● logrotate.timer
       Docs: man:logrotate(8)
             man:logrotate.conf(5)

○ lvm2-lvmpolld.service - LVM2 poll daemon
     Loaded: loaded (/usr/lib/systemd/system/lvm2-lvmpolld.service; static)
     Active: inactive (dead)
TriggeredBy: ● lvm2-lvmpolld.socket
       Docs: man:lvmpolld(8)

● lvm2-monitor.service - Monitoring of LVM2 mirrors, snapshots etc. using dmeventd or progress polling
     Loaded: loaded (/usr/lib/systemd/system/lvm2-monitor.service; enabled; preset: enabled)
     Active: active (exited) since Sat 2026-01-31 07:12:17 UTC; 2h 24min ago
       Docs: man:dmeventd(8)
             man:lvcreate(8)
             man:lvchange(8)
             man:vgchange(8)
   Main PID: 34058 (code=exited, status=0/SUCCESS)
        CPU: 14ms

○ mdmonitor.service - Software RAID monitoring and management
     Loaded: loaded (/usr/lib/systemd/system/mdmonitor.service; enabled; preset: enabled)
     Active: inactive (dead)

○ microcode.service - Load CPU microcode update
     Loaded: loaded (/usr/lib/systemd/system/microcode.service; enabled; preset: enabled)
     Active: inactive (dead)
  Condition: start condition failed at Sat 2026-01-31 06:25:00 UTC; 3h 11min ago

○ modprobe@configfs.service - Load Kernel Module configfs
     Loaded: loaded (/usr/lib/systemd/system/modprobe@.service; static)
     Active: inactive (dead) since Sat 2026-01-31 06:25:00 UTC; 3h 11min ago
       Docs: man:modprobe(8)
   Main PID: 735 (code=exited, status=0/SUCCESS)
        CPU: 4ms

○ modprobe@drm.service - Load Kernel Module drm
     Loaded: loaded (/usr/lib/systemd/system/modprobe@.service; static)
     Active: inactive (dead) since Sat 2026-01-31 06:24:59 UTC; 3h 12min ago
       Docs: man:modprobe(8)
   Main PID: 670 (code=exited, status=0/SUCCESS)
        CPU: 77ms

○ modprobe@efi_pstore.service - Load Kernel Module efi_pstore
     Loaded: loaded (/usr/lib/systemd/system/modprobe@.service; static)
     Active: inactive (dead) since Sat 2026-01-31 06:24:59 UTC; 3h 12min ago
       Docs: man:modprobe(8)
   Main PID: 671 (code=exited, status=0/SUCCESS)
        CPU: 8ms

○ modprobe@fuse.service - Load Kernel Module fuse
     Loaded: loaded (/usr/lib/systemd/system/modprobe@.service; static)
     Active: inactive (dead) since Sat 2026-01-31 06:24:59 UTC; 3h 12min ago
       Docs: man:modprobe(8)
   Main PID: 672 (code=exited, status=0/SUCCESS)
        CPU: 20ms

● multipathd.service - Device-Mapper Multipath Device Controller
     Loaded: loaded (/usr/lib/systemd/system/multipathd.service; enabled; preset: enabled)
     Active: active (running) since Sat 2026-01-31 07:38:01 UTC; 1h 58min ago
TriggeredBy: ● multipathd.socket
   Main PID: 231100 (multipathd)
     Status: "up"
         IO: 0B read, 0B written
      Tasks: 7
     Memory: 18.4M (peak: 18.7M)
        CPU: 776ms
     CGroup: /system.slice/multipathd.service
             └─231100 /sbin/multipathd -d -s

Jan 31 07:38:01 compute-0 systemd[1]: Starting Device-Mapper Multipath Device Controller...
Jan 31 07:38:01 compute-0 multipathd[231100]: --------start up--------
Jan 31 07:38:01 compute-0 multipathd[231100]: read /etc/multipath.conf
Jan 31 07:38:01 compute-0 multipathd[231100]: path checkers start up
Jan 31 07:38:01 compute-0 systemd[1]: Started Device-Mapper Multipath Device Controller.

○ netns-placeholder.service - Create netns directory
     Loaded: loaded (/etc/systemd/system/netns-placeholder.service; enabled; preset: enabled)
     Active: inactive (dead) since Sat 2026-01-31 07:32:03 UTC; 2h 4min ago
   Main PID: 156755 (code=exited, status=0/SUCCESS)
        CPU: 7ms

Jan 31 07:32:03 compute-0 systemd[1]: Starting Create netns directory...
Jan 31 07:32:03 compute-0 systemd[1]: netns-placeholder.service: Deactivated successfully.
Jan 31 07:32:03 compute-0 systemd[1]: Finished Create netns directory.

○ network.service - LSB: Bring up/down networking
     Loaded: loaded (/etc/rc.d/init.d/network; generated)
     Active: inactive (dead)
       Docs: man:systemd-sysv-generator(8)

● NetworkManager-wait-online.service - Network Manager Wait Online
     Loaded: loaded (/usr/lib/systemd/system/NetworkManager-wait-online.service; enabled; preset: disabled)
     Active: active (exited) since Sat 2026-01-31 07:15:33 UTC; 2h 21min ago
       Docs: man:NetworkManager-wait-online.service(8)
   Main PID: 49031 (code=exited, status=0/SUCCESS)
        CPU: 17ms

Jan 31 07:15:33 compute-0 systemd[1]: Starting Network Manager Wait Online...
Jan 31 07:15:33 compute-0 systemd[1]: Finished Network Manager Wait Online.

● NetworkManager.service - Network Manager
     Loaded: loaded (/usr/lib/systemd/system/NetworkManager.service; enabled; preset: enabled)
    Drop-In: /usr/lib/systemd/system/NetworkManager.service.d
             └─NetworkManager-ovs.conf
     Active: active (running) since Sat 2026-01-31 07:15:33 UTC; 2h 21min ago
       Docs: man:NetworkManager(8)
   Main PID: 49013 (NetworkManager)
         IO: 104.0K read, 327.0K written
      Tasks: 3 (limit: 48560)
     Memory: 6.0M (peak: 7.8M)
        CPU: 1min 16.325s
     CGroup: /system.slice/NetworkManager.service
             └─49013 /usr/sbin/NetworkManager --no-daemon

Jan 31 09:16:48 compute-0 NetworkManager[49013]: <info>  [1769851008.8324] manager: (tap6c698385-41): new Open vSwitch Port device (/org/freedesktop/NetworkManager/Devices/431)
Jan 31 09:16:49 compute-0 NetworkManager[49013]: <info>  [1769851009.8938] manager: (tap6c698385-41): new Tun device (/org/freedesktop/NetworkManager/Devices/432)
Jan 31 09:16:49 compute-0 NetwUnit ntpd.service could not be found.
Unit ntpdate.service could not be found.
orkManager[49013]: <info>  [1769851009.9295] device (tap6c698385-41): state change: unmanaged -> unavailable (reason 'connection-assumed', managed-type: 'external')
Jan 31 09:16:49 compute-0 NetworkManager[49013]: <info>  [1769851009.9303] device (tap6c698385-41): state change: unavailable -> disconnected (reason 'none', managed-type: 'external')
Jan 31 09:16:49 compute-0 NetworkManager[49013]: <info>  [1769851009.9951] manager: (tap3f3cc872-50): new Veth device (/org/freedesktop/NetworkManager/Devices/433)
Jan 31 09:16:50 compute-0 NetworkManager[49013]: <info>  [1769851010.0400] device (tap3f3cc872-50): carrier: link connected
Jan 31 09:16:50 compute-0 NetworkManager[49013]: <info>  [1769851010.1470] manager: (tap3f3cc872-50): new Open vSwitch Port device (/org/freedesktop/NetworkManager/Devices/434)
Jan 31 09:16:54 compute-0 NetworkManager[49013]: <info>  [1769851014.6441] manager: (patch-provnet-9633882b-fa09-4c13-9ab8-69ba69661845-to-br-int): new Open vSwitch Port device (/org/freedesktop/NetworkManager/Devices/435)
Jan 31 09:16:54 compute-0 NetworkManager[49013]: <info>  [1769851014.6450] manager: (patch-br-int-to-provnet-9633882b-fa09-4c13-9ab8-69ba69661845): new Open vSwitch Port device (/org/freedesktop/NetworkManager/Devices/436)
Jan 31 09:18:50 compute-0 NetworkManager[49013]: <info>  [1769851130.4843] device (tap6c698385-41): state change: disconnected -> unmanaged (reason 'unmanaged', managed-type: 'removed')

○ nfs-idmapd.service - NFSv4 ID-name mapping service
     Loaded: loaded (/usr/lib/systemd/system/nfs-idmapd.service; static)
     Active: inactive (dead)
       Docs: man:idmapd(8)

○ nfs-mountd.service - NFS Mount Daemon
     Loaded: loaded (/usr/lib/systemd/system/nfs-mountd.service; static)
     Active: inactive (dead)
       Docs: man:rpc.mountd(8)

○ nfs-server.service - NFS server and services
     Loaded: loaded (/usr/lib/systemd/system/nfs-server.service; disabled; preset: disabled)
     Active: inactive (dead)
       Docs: man:rpc.nfsd(8)
             man:exportfs(8)

○ nfs-utils.service - NFS server and client services
     Loaded: loaded (/usr/lib/systemd/system/nfs-utils.service; static)
     Active: inactive (dead)

○ nfsdcld.service - NFSv4 Client Tracking Daemon
     Loaded: loaded (/usr/lib/systemd/system/nfsdcld.service; static)
     Active: inactive (dead)
       Docs: man:nfsdcld(8)

● nftables.service - Netfilter Tables
     Loaded: loaded (/usr/lib/systemd/system/nftables.service; enabled; preset: disabled)
     Active: active (exited) since Sat 2026-01-31 07:17:51 UTC; 2h 19min ago
       Docs: man:nft(8)
   Main PID: 63215 (code=exited, status=0/SUCCESS)
        CPU: 23ms

Jan 31 07:17:51 compute-0 systemd[1]: Starting Netfilter Tables...
Jan 31 07:17:51 compute-0 systemd[1]: Finished Netfilter Tables.

● nis-domainname.service - Read and set NIS domainname from /etc/sysconfig/network
     Loaded: loaded (/usr/lib/systemd/system/nis-domainname.service; enabled; preset: enabled)
     Active: active (exited) since Sat 2026-01-31 06:24:59 UTC; 3h 12min ago
   Main PID: 673 (code=exited, status=0/SUCCESS)
        CPU: 2ms

○ nvmefc-boot-connections.service - Auto-connect to subsystems on FC-NVME devices found during boot
     Loaded: loaded (/usr/lib/systemd/system/nvmefc-boot-connections.service; enabled; preset: enabled)
     Active: inactive (dead)

● openvswitch.service - Open vSwitch
     Loaded: loaded (/usr/lib/systemd/system/openvswitch.service; enabled; preset: disabled)
     Active: active (exited) since Sat 2026-01-31 07:15:16 UTC; 2h 21min ago
   Main PID: 47322 (code=exited, status=0/SUCCESS)
        CPU: 1ms

Jan 31 07:15:16 compute-0 systemd[1]: Starting Open vSwitch...
Jan 31 07:15:16 compute-0 systemd[1]: Finished Open vSwitch.

● ovs-delete-transient-ports.service - Open vSwitch Delete Transient Ports
     Loaded: loaded (/usr/lib/systemd/system/ovs-delete-transient-ports.service; static)
     Active: active (exited) since Sat 2026-01-31 07:15:15 UTC; 2h 21min ago
   Main PID: 47259 (code=exited, status=0/SUCCESS)
        CPU: 19ms

Jan 31 07:15Unit pacemaker.service could not be found.
Unit plymouth-quit-wait.service could not be found.
Unit plymouth-start.service could not be found.
:15 compute-0 systemd[1]: Starting Open vSwitch Delete Transient Ports...
Jan 31 07:15:15 compute-0 systemd[1]: Finished Open vSwitch Delete Transient Ports.

● ovs-vswitchd.service - Open vSwitch Forwarding Unit
     Loaded: loaded (/usr/lib/systemd/system/ovs-vswitchd.service; static)
     Active: active (running) since Sat 2026-01-31 07:15:16 UTC; 2h 21min ago
   Main PID: 47313 (ovs-vswitchd)
         IO: 3.4M read, 1.5M written
      Tasks: 13 (limit: 48560)
     Memory: 247.5M (peak: 250.4M)
        CPU: 35.999s
     CGroup: /system.slice/ovs-vswitchd.service
             └─47313 ovs-vswitchd unix:/var/run/openvswitch/db.sock -vconsole:emer -vsyslog:err -vfile:info --mlockall --user openvswitch:hugetlbfs --no-chdir --log-file=/var/log/openvswitch/ovs-vswitchd.log --pidfile=/var/run/openvswitch/ovs-vswitchd.pid --detach

Jan 31 07:15:15 compute-0 systemd[1]: Starting Open vSwitch Forwarding Unit...
Jan 31 07:15:16 compute-0 ovs-ctl[47303]: Inserting openvswitch module [  OK  ]
Jan 31 07:15:16 compute-0 ovs-ctl[47272]: Starting ovs-vswitchd [  OK  ]
Jan 31 07:15:16 compute-0 ovs-ctl[47272]: Enabling remote OVSDB managers [  OK  ]
Jan 31 07:15:16 compute-0 ovs-vsctl[47320]: ovs|00001|vsctl|INFO|Called as ovs-vsctl --no-wait add Open_vSwitch . external-ids hostname=compute-0
Jan 31 07:15:16 compute-0 systemd[1]: Started Open vSwitch Forwarding Unit.

● ovsdb-server.service - Open vSwitch Database Unit
     Loaded: loaded (/usr/lib/systemd/system/ovsdb-server.service; static)
     Active: active (running) since Sat 2026-01-31 07:15:15 UTC; 2h 21min ago
   Main PID: 47231 (ovsdb-server)
         IO: 1.2M read, 2.3M written
      Tasks: 1 (limit: 48560)
     Memory: 5.6M (peak: 38.7M)
        CPU: 31.314s
     CGroup: /system.slice/ovsdb-server.service
             └─47231 ovsdb-server /etc/openvswitch/conf.db -vconsole:emer -vsyslog:err -vfile:info --remote=punix:/var/run/openvswitch/db.sock --private-key=db:Open_vSwitch,SSL,private_key --certificate=db:Open_vSwitch,SSL,certificate --bootstrap-ca-cert=db:Open_vSwitch,SSL,ca_cert --user openvswitch:hugetlbfs --no-chdir --log-file=/var/log/openvswitch/ovsdb-server.log --pidfile=/var/run/openvswitch/ovsdb-server.pid --detach

Jan 31 07:15:15 compute-0 chown[47177]: /usr/bin/chown: cannot access '/run/openvswitch': No such file or directory
Jan 31 07:15:15 compute-0 ovs-ctl[47182]: /etc/openvswitch/conf.db does not exist ... (warning).
Jan 31 07:15:15 compute-0 ovs-ctl[47182]: Creating empty database /etc/openvswitch/conf.db [  OK  ]
Jan 31 07:15:15 compute-0 ovs-ctl[47182]: Starting ovsdb-server [  OK  ]
Jan 31 07:15:15 compute-0 ovs-vsctl[47232]: ovs|00001|vsctl|INFO|Called as ovs-vsctl --no-wait -- init -- set Open_vSwitch . db-version=8.5.1
Jan 31 07:15:15 compute-0 ovs-vsctl[47248]: ovs|00001|vsctl|INFO|Called as ovs-vsctl --no-wait set Open_vSwitch . ovs-version=3.3.5-115.el9s "external-ids:system-id=\"59a8b96c-18d5-4426-968c-99837b56953c\"" "external-ids:rundir=\"/var/run/openvswitch\"" "system-type=\"centos\"" "system-version=\"9\""
Jan 31 07:15:15 compute-0 ovs-ctl[47182]: Configuring Open vSwitch system IDs [  OK  ]
Jan 31 07:15:15 compute-0 ovs-ctl[47182]: Enabling remote OVSDB managers [  OK  ]
Jan 31 07:15:15 compute-0 systemd[1]: Started Open vSwitch Database Unit.
Jan 31 07:15:15 compute-0 ovs-vsctl[47258]: ovs|00001|vsctl|INFO|Called as ovs-vsctl --no-wait add Open_vSwitch . external-ids hostname=compute-0

● polkit.service - Authorization Manager
     Loaded: loaded (/usr/lib/systemd/system/polkit.service; static)
     Active: active (running) since Sat 2026-01-31 07:14:28 UTC; 2h 22min ago
       Docs: man:polkit(8)
   Main PID: 43497 (polkitd)
         IO: 19.2M read, 0B written
      Tasks: 12 (limit: 48560)
     Memory: 25.0M (peak: 26.5M)
        CPU: 3.506s
     CGroup: /system.slice/polkit.service
             └─43497 /usr/lib/polkit-1/polkitd --no-debug

Jan 31 07:34:35 compute-0 polkitd[43497]: Collecting garbage unconditionally...
Jan 31 07:34:35 compute-0 polkitd[43497]: Loading rules from directory /etc/polkit-1/rules.d
Jan 31 0Unit power-profiles-daemon.service could not be found.
Unit rpc-svcgssd.service could not be found.
7:34:35 compute-0 polkitd[43497]: Loading rules from directory /usr/share/polkit-1/rules.d
Jan 31 07:34:35 compute-0 polkitd[43497]: Finished loading, compiling and executing 3 rules
Jan 31 07:36:23 compute-0 polkitd[43497]: Registered Authentication Agent for unix-process:214774:428954 (system bus name :1.2884 [pkttyagent --process 214774 --notify-fd 4 --fallback], object path /org/freedesktop/PolicyKit1/AuthenticationAgent, locale en_US.UTF-8)
Jan 31 07:36:23 compute-0 polkitd[43497]: Unregistered Authentication Agent for unix-process:214774:428954 (system bus name :1.2884, object path /org/freedesktop/PolicyKit1/AuthenticationAgent, locale en_US.UTF-8) (disconnected from bus)
Jan 31 07:36:23 compute-0 polkitd[43497]: Registered Authentication Agent for unix-process:214773:428953 (system bus name :1.2885 [pkttyagent --process 214773 --notify-fd 4 --fallback], object path /org/freedesktop/PolicyKit1/AuthenticationAgent, locale en_US.UTF-8)
Jan 31 07:36:23 compute-0 polkitd[43497]: Unregistered Authentication Agent for unix-process:214773:428953 (system bus name :1.2885, object path /org/freedesktop/PolicyKit1/AuthenticationAgent, locale en_US.UTF-8) (disconnected from bus)
Jan 31 07:36:26 compute-0 polkitd[43497]: Registered Authentication Agent for unix-process:215417:429199 (system bus name :1.2896 [pkttyagent --process 215417 --notify-fd 5 --fallback], object path /org/freedesktop/PolicyKit1/AuthenticationAgent, locale en_US.UTF-8)
Jan 31 07:36:26 compute-0 polkitd[43497]: Unregistered Authentication Agent for unix-process:215417:429199 (system bus name :1.2896, object path /org/freedesktop/PolicyKit1/AuthenticationAgent, locale en_US.UTF-8) (disconnected from bus)

○ raid-check.service - RAID setup health check
     Loaded: loaded (/usr/lib/systemd/system/raid-check.service; static)
     Active: inactive (dead)
TriggeredBy: ○ raid-check.timer

○ rbdmap.service - Map RBD devices
     Loaded: loaded (/usr/lib/systemd/system/rbdmap.service; disabled; preset: disabled)
     Active: inactive (dead)

○ rc-local.service - /etc/rc.d/rc.local Compatibility
     Loaded: loaded (/usr/lib/systemd/system/rc-local.service; static)
     Active: inactive (dead)
       Docs: man:systemd-rc-local-generator(8)

○ rescue.service - Rescue Shell
     Loaded: loaded (/usr/lib/systemd/system/rescue.service; static)
     Active: inactive (dead)
       Docs: man:sulogin(8)

○ rpc-gssd.service - RPC security service for NFS client and server
     Loaded: loaded (/usr/lib/systemd/system/rpc-gssd.service; static)
     Active: inactive (dead)
  Condition: start condition failed at Sat 2026-01-31 06:25:01 UTC; 3h 11min ago
       Docs: man:rpc.gssd(8)

● rpc-statd-notify.service - Notify NFS peers of a restart
     Loaded: loaded (/usr/lib/systemd/system/rpc-statd-notify.service; static)
     Active: active (exited) since Sat 2026-01-31 06:25:03 UTC; 3h 11min ago
       Docs: man:sm-notify(8)
             man:rpc.statd(8)
        CPU: 6ms

○ rpc-statd.service - NFS status monitor for NFSv2/3 locking.
     Loaded: loaded (/usr/lib/systemd/system/rpc-statd.service; static)
     Active: inactive (dead)
       Docs: man:rpc.statd(8)

● rpcbind.service - RPC Bind
     Loaded: loaded (/usr/lib/systemd/system/rpcbind.service; enabled; preset: enabled)
     Active: active (running) since Sat 2026-01-31 06:24:59 UTC; 3h 12min ago
TriggeredBy: ● rpcbind.socket
       Docs: man:rpcbind(8)
   Main PID: 695 (rpcbind)
         IO: 0B read, 0B written
      Tasks: 1 (limit: 48560)
     Memory: 2.5M (peak: 2.8M)
        CPU: 42ms
     CGroup: /system.slice/rpcbind.service
             └─695 /usr/bin/rpcbind -w -f

● rsyslog.service - System Logging Service
     Loaded: loaded (/usr/lib/systemd/system/rsyslog.service; enabled; preset: enabled)
     Active: active (running) since Sat 2026-01-31 06:25:03 UTC; 3h 11min ago
       Docs: man:rsyslogd(8)
             https://www.rsyslog.com/doc/
   Main PID: 998 (rsyslogd)
         IO: 0B read, 49.4M written
      Tasks: 3 (limit: 48560)
     Memory: 47.1M (peak: 47.6M)
        CPU: 23.712s
  Unit sntp.service could not be found.
Unit sshd-keygen.service could not be found.
   CGroup: /system.slice/rsyslog.service
             └─998 /usr/sbin/rsyslogd -n

Jan 31 08:46:44 compute-0 rsyslogd[998]: imjournal: journal files changed, reloading...  [v8.2510.0-2.el9 try https://www.rsyslog.com/e/0 ]
Jan 31 08:56:06 compute-0 rsyslogd[998]: imjournal: journal files changed, reloading...  [v8.2510.0-2.el9 try https://www.rsyslog.com/e/0 ]
Jan 31 08:56:06 compute-0 rsyslogd[998]: imjournal: journal files changed, reloading...  [v8.2510.0-2.el9 try https://www.rsyslog.com/e/0 ]
Jan 31 09:05:06 compute-0 rsyslogd[998]: imjournal: journal files changed, reloading...  [v8.2510.0-2.el9 try https://www.rsyslog.com/e/0 ]
Jan 31 09:05:06 compute-0 rsyslogd[998]: imjournal: journal files changed, reloading...  [v8.2510.0-2.el9 try https://www.rsyslog.com/e/0 ]
Jan 31 09:17:32 compute-0 rsyslogd[998]: imjournal: journal files changed, reloading...  [v8.2510.0-2.el9 try https://www.rsyslog.com/e/0 ]
Jan 31 09:17:32 compute-0 rsyslogd[998]: imjournal: journal files changed, reloading...  [v8.2510.0-2.el9 try https://www.rsyslog.com/e/0 ]
Jan 31 09:20:32 compute-0 rsyslogd[998]: imjournal: journal files changed, reloading...  [v8.2510.0-2.el9 try https://www.rsyslog.com/e/0 ]
Jan 31 09:20:32 compute-0 rsyslogd[998]: imjournal: journal files changed, reloading...  [v8.2510.0-2.el9 try https://www.rsyslog.com/e/0 ]
Jan 31 09:36:35 compute-0 rsyslogd[998]: imjournal: journal files changed, reloading...  [v8.2510.0-2.el9 try https://www.rsyslog.com/e/0 ]

○ selinux-autorelabel-mark.service - Mark the need to relabel after reboot
     Loaded: loaded (/usr/lib/systemd/system/selinux-autorelabel-mark.service; enabled; preset: enabled)
     Active: inactive (dead)
  Condition: start condition failed at Sat 2026-01-31 06:24:59 UTC; 3h 12min ago

● serial-getty@ttyS0.service - Serial Getty on ttyS0
     Loaded: loaded (/usr/lib/systemd/system/serial-getty@.service; enabled-runtime; preset: disabled)
     Active: active (running) since Sat 2026-01-31 06:25:03 UTC; 3h 11min ago
       Docs: man:agetty(8)
             man:systemd-getty-generator(8)
             http://0pointer.de/blog/projects/serial-console.html
   Main PID: 1004 (agetty)
         IO: 0B read, 0B written
      Tasks: 1 (limit: 48560)
     Memory: 280.0K (peak: 524.0K)
        CPU: 5ms
     CGroup: /system.slice/system-serial\x2dgetty.slice/serial-getty@ttyS0.service
             └─1004 /sbin/agetty -o "-p -- \\u" --keep-baud 115200,57600,38400,9600 - vt220

○ sshd-keygen@ecdsa.service - OpenSSH ecdsa Server Key Generation
     Loaded: loaded (/usr/lib/systemd/system/sshd-keygen@.service; disabled; preset: disabled)
    Drop-In: /usr/lib/systemd/system/sshd-keygen@.service.d
             └─disable-sshd-keygen-if-cloud-init-active.conf
     Active: inactive (dead)
  Condition: start condition failed at Sat 2026-01-31 07:34:39 UTC; 2h 2min ago

Jan 31 07:34:39 compute-0 systemd[1]: OpenSSH ecdsa Server Key Generation was skipped because of an unmet condition check (ConditionPathExists=!/run/systemd/generator.early/multi-user.target.wants/cloud-init.target).

○ sshd-keygen@ed25519.service - OpenSSH ed25519 Server Key Generation
     Loaded: loaded (/usr/lib/systemd/system/sshd-keygen@.service; disabled; preset: disabled)
    Drop-In: /usr/lib/systemd/system/sshd-keygen@.service.d
             └─disable-sshd-keygen-if-cloud-init-active.conf
     Active: inactive (dead)
  Condition: start condition failed at Sat 2026-01-31 07:34:39 UTC; 2h 2min ago

Jan 31 07:34:39 compute-0 systemd[1]: OpenSSH ed25519 Server Key Generation was skipped because of an unmet condition check (ConditionPathExists=!/run/systemd/generator.early/multi-user.target.wants/cloud-init.target).

○ sshd-keygen@rsa.service - OpenSSH rsa Server Key Generation
     Loaded: loaded (/usr/lib/systemd/system/sshd-keygen@.service; disabled; preset: disabled)
    Drop-In: /usr/lib/systemd/system/sshd-keygen@.service.d
             └─disable-sshd-keygen-if-cloud-init-active.conf
     Active: inactive (dead)
  Condition: start condition failed at Sat 2026-01-31 07:34:39 UTC; 2Unit syslog.service could not be found.
h 2min ago

Jan 31 07:34:39 compute-0 systemd[1]: OpenSSH rsa Server Key Generation was skipped because of an unmet condition check (ConditionPathExists=!/run/systemd/generator.early/multi-user.target.wants/cloud-init.target).

● sshd.service - OpenSSH server daemon
     Loaded: loaded (/usr/lib/systemd/system/sshd.service; enabled; preset: enabled)
     Active: active (running) since Sat 2026-01-31 07:34:39 UTC; 2h 2min ago
       Docs: man:sshd(8)
             man:sshd_config(5)
   Main PID: 186742 (sshd)
         IO: 688.0K read, 0B written
      Tasks: 1 (limit: 48560)
     Memory: 4.1M (peak: 7.1M)
        CPU: 1.329s
     CGroup: /system.slice/sshd.service
             └─186742 "sshd: /usr/sbin/sshd -D [listener] 0 of 10-100 startups"

Jan 31 09:21:33 compute-0 sshd-session[424534]: pam_unix(sshd:session): session closed for user zuul
Jan 31 09:21:33 compute-0 sshd-session[424563]: Accepted publickey for zuul from 192.168.122.10 port 39860 ssh2: ECDSA SHA256:apQ7PrWRnJH/9O50FLg797xnKDb8AnpQrFOQIPxUTXM
Jan 31 09:21:33 compute-0 sshd-session[424563]: pam_unix(sshd:session): session opened for user zuul(uid=1000) by zuul(uid=0)
Jan 31 09:21:33 compute-0 sshd-session[424563]: pam_unix(sshd:session): session closed for user zuul
Jan 31 09:21:48 compute-0 sshd-session[424694]: error: kex_exchange_identification: read: Connection reset by peer
Jan 31 09:21:48 compute-0 sshd-session[424694]: Connection reset by 176.120.22.52 port 9446
Jan 31 09:29:31 compute-0 sshd-session[434802]: Received disconnect from 103.42.57.158 port 46102:11:  [preauth]
Jan 31 09:29:31 compute-0 sshd-session[434802]: Disconnected from authenticating user root 103.42.57.158 port 46102 [preauth]
Jan 31 09:36:23 compute-0 sshd-session[442577]: Accepted publickey for zuul from 192.168.122.10 port 41980 ssh2: ECDSA SHA256:apQ7PrWRnJH/9O50FLg797xnKDb8AnpQrFOQIPxUTXM
Jan 31 09:36:23 compute-0 sshd-session[442577]: pam_unix(sshd:session): session opened for user zuul(uid=1000) by zuul(uid=0)

○ sssd-kcm.service - SSSD Kerberos Cache Manager
     Loaded: loaded (/usr/lib/systemd/system/sssd-kcm.service; indirect; preset: disabled)
     Active: inactive (dead)
TriggeredBy: ● sssd-kcm.socket
       Docs: man:sssd-kcm(5)

○ sssd.service - System Security Services Daemon
     Loaded: loaded (/usr/lib/systemd/system/sssd.service; enabled; preset: enabled)
     Active: inactive (dead)
  Condition: start condition failed at Sat 2026-01-31 06:25:00 UTC; 3h 11min ago

○ sysstat-collect.service - system activity accounting tool
     Loaded: loaded (/usr/lib/systemd/system/sysstat-collect.service; static)
     Active: inactive (dead)
TriggeredBy: ○ sysstat-collect.timer
       Docs: man:sa1(8)

○ sysstat-summary.service - Generate a daily summary of process accounting
     Loaded: loaded (/usr/lib/systemd/system/sysstat-summary.service; static)
     Active: inactive (dead)
TriggeredBy: ○ sysstat-summary.timer
       Docs: man:sa2(8)

○ sysstat.service - Resets System Activity Logs
     Loaded: loaded (/usr/lib/systemd/system/sysstat.service; enabled; preset: enabled)
     Active: inactive (dead)

○ systemd-ask-password-console.service - Dispatch Password Requests to Console
     Loaded: loaded (/usr/lib/systemd/system/systemd-ask-password-console.service; static)
     Active: inactive (dead)
TriggeredBy: ● systemd-ask-password-console.path
       Docs: man:systemd-ask-password-console.service(8)

○ systemd-ask-password-wall.service - Forward Password Requests to Wall
     Loaded: loaded (/usr/lib/systemd/system/systemd-ask-password-wall.service; static)
     Active: inactive (dead)
TriggeredBy: ● systemd-ask-password-wall.path
       Docs: man:systemd-ask-password-wall.service(8)

○ systemd-binfmt.service - Set Up Additional Binary Formats
     Loaded: loaded (/usr/lib/systemd/system/systemd-binfmt.service; static)
     Active: inactive (dead)
  Condition: start condition failed at Sat 2026-01-31 06:24:59 UTC; 3h 12min ago
       Docs: man:systemd-binfmt.service(8)
             man:binfmt.d(5)
             https://docs.kernel.org/admin-guide/binfmt-misc.html
             https://www.freedesktop.org/wiki/Software/systemd/APIFileSystems

○ systemd-boot-random-seed.service - Update Boot Loader Random Seed
     Loaded: loaded (/usr/lib/systemd/system/systemd-boot-random-seed.service; static)
     Active: inactive (dead)
  Condition: start condition failed at Sat 2026-01-31 06:24:59 UTC; 3h 12min ago
       Docs: man:systemd-boot-random-seed.service(8)
             man:random(4)

● systemd-boot-update.service - Automatic Boot Loader Update
     Loaded: loaded (/usr/lib/systemd/system/systemd-boot-update.service; enabled; preset: enabled)
     Active: active (exited) since Sat 2026-01-31 06:24:59 UTC; 3h 12min ago
       Docs: man:bootctl(1)
   Main PID: 691 (code=exited, status=0/SUCCESS)
        CPU: 7ms

○ systemd-firstboot.service - First Boot Wizard
     Loaded: loaded (/usr/lib/systemd/system/systemd-firstboot.service; static)
     Active: inactive (dead)
  Condition: start condition failed at Sat 2026-01-31 06:24:59 UTC; 3h 12min ago
       Docs: man:systemd-firstboot(1)

○ systemd-fsck-root.service - File System Check on Root Device
     Loaded: loaded (/usr/lib/systemd/system/systemd-fsck-root.service; static)
     Active: inactive (dead) since Sat 2026-01-31 06:24:59 UTC; 3h 12min ago
   Duration: 1.745s
       Docs: man:systemd-fsck-root.service(8)
   Main PID: 551 (code=exited, status=0/SUCCESS)
        CPU: 13ms

Jan 31 06:24:57 localhost systemd[1]: Starting File System Check on /dev/disk/by-uuid/822f14ea-6e7e-41df-b0d8-fbe282d9ded8...
Jan 31 06:24:57 localhost systemd-fsck[553]: /usr/sbin/fsck.xfs: XFS file system.
Jan 31 06:24:57 localhost systemd[1]: Finished File System Check on /dev/disk/by-uuid/822f14ea-6e7e-41df-b0d8-fbe282d9ded8.

● systemd-hostnamed.service - Hostname Service
     Loaded: loaded (/usr/lib/systemd/system/systemd-hostnamed.service; static)
    Drop-In: /usr/lib/systemd/system/systemd-hostnamed.service.d
             └─disable-privatedevices.conf
     Active: active (running) since Sat 2026-01-31 09:36:39 UTC; 20s ago
       Docs: man:systemd-hostnamed.service(8)
             man:hostname(5)
             man:machine-info(5)
             man:org.freedesktop.resolve1(5)
   Main PID: 444949 (systemd-hostnam)
         IO: 0B read, 0B written
      Tasks: 1 (limit: 48560)
     Memory: 2.7M (peak: 3.8M)
        CPU: 78ms
     CGroup: /system.slice/systemd-hostnamed.service
             └─444949 /usr/lib/systemd/systemd-hostnamed

Jan 31 09:36:39 compute-0 systemd[1]: Starting Hostname Service...
Jan 31 09:36:39 compute-0 systemd[1]: Started Hostname Service.

● systemd-hwdb-update.service - Rebuild Hardware Database
     Loaded: loaded (/usr/lib/systemd/system/systemd-hwdb-update.service; static)
     Active: active (exited) since Sat 2026-01-31 06:24:59 UTC; 3h 12min ago
       Docs: man:hwdb(7)
             man:systemd-hwdb(8)
   Main PID: 683 (code=exited, status=0/SUCCESS)
        CPU: 528ms

○ systemd-initctl.service - initctl Compatibility Daemon
     Loaded: loaded (/usr/lib/systemd/system/systemd-initctl.service; static)
     Active: inactive (dead)
TriggeredBy: ● systemd-initctl.socket
       Docs: man:systemd-initctl.service(8)

● systemd-journal-catalog-update.service - Rebuild Journal Catalog
     Loaded: loaded (/usr/lib/systemd/system/systemd-journal-catalog-update.service; static)
     Active: active (exited) since Sat 2026-01-31 06:24:59 UTC; 3h 12min ago
       Docs: man:systemd-journald.service(8)
             man:journald.conf(5)
   Main PID: 696 (code=exited, status=0/SUCCESS)
        CPU: 13ms

● systemd-journal-flush.service - Flush Journal to Persistent Storage
     Loaded: loaded (/usr/lib/systemd/system/systemd-journal-flush.service; static)
     Active: active (exited) since Sat 2026-01-31 06:24:59 UTC; 3h 12min ago
       Docs: man:systemd-journald.service(8)
             man:journald.conf(5)
   Main PID: 684 (code=exited, status=0/SUCCESS)
        CPU: 6ms

● systemd-journald.service - Journal Service
     Loaded: loaded (/usr/lib/systemd/system/systemd-journald.service; static)
     Active: active (running) since Sat 2026-01-31 06:24:59 UTC; 3h 12min ago
TriggeredBy: ● systemd-journald-dev-log.socket
             ● systemd-journald.socket
       Docs: man:systemd-journald.service(8)
             man:journald.conf(5)
   Main PID: 674 (systemd-journal)
     Status: "Processing requests..."
         IO: 0B read, 0B written
      Tasks: 1 (limit: 48560)
     Memory: 149.9M (peak: 159.4M)
        CPU: 30.589s
     CGroup: /system.slice/systemd-journald.service
             └─674 /usr/lib/systemd/systemd-journald

● systemd-logind.service - User Login Management
     Loaded: loaded (/usr/lib/systemd/system/systemd-logind.service; static)
    Drop-In: /usr/lib/systemd/system/systemd-logind.service.d
             └─10-grub2-logind-service.conf
     Active: active (running) since Sat 2026-01-31 06:25:00 UTC; 3h 11min ago
       Docs: man:sd-login(3)
             man:systemd-logind.service(8)
             man:logind.conf(5)
             man:org.freedesktop.login1(5)
   Main PID: 818 (systemd-logind)
     Status: "Processing requests..."
         IO: 4.0K read, 0B written
      Tasks: 1 (limit: 48560)
     Memory: 7.2M (peak: 9.2M)
        CPU: 4.844s
     CGroup: /system.slice/systemd-logind.service
             └─818 /usr/lib/systemd/systemd-logind

Jan 31 09:20:07 compute-0 systemd-logind[818]: New session 75 of user zuul.
Jan 31 09:21:32 compute-0 systemd-logind[818]: Session 75 logged out. Waiting for processes to exit.
Jan 31 09:21:32 compute-0 systemd-logind[818]: Removed session 75.
Jan 31 09:21:32 compute-0 systemd-logind[818]: New session 76 of user zuul.
Jan 31 09:21:33 compute-0 systemd-logind[818]: Session 76 logged out. Waiting for processes to exit.
Jan 31 09:21:33 compute-0 systemd-logind[818]: Removed session 76.
Jan 31 09:21:33 compute-0 systemd-logind[818]: New session 77 of user zuul.
Jan 31 09:21:33 compute-0 systemd-logind[818]: Session 77 logged out. Waiting for processes to exit.
Jan 31 09:21:33 compute-0 systemd-logind[818]: Removed session 77.
Jan 31 09:36:23 compute-0 systemd-logind[818]: New session 78 of user zuul.

○ systemd-machine-id-commit.service - Commit a transient machine-id on disk
     Loaded: loaded (/usr/lib/systemd/system/systemd-machine-id-commit.service; static)
     Active: inactive (dead)
  Condition: start condition failed at Sat 2026-01-31 06:24:59 UTC; 3h 12min ago
       Docs: man:systemd-machine-id-commit.service(8)

● systemd-machined.service - Virtual Machine and Container Registration Service
     Loaded: loaded (/usr/lib/systemd/system/systemd-machined.service; static)
     Active: active (running) since Sat 2026-01-31 07:36:17 UTC; 2h 0min ago
       Docs: man:systemd-machined.service(8)
             man:org.freedesktop.machine1(5)
   Main PID: 212769 (systemd-machine)
     Status: "Processing requests..."
         IO: 0B read, 0B written
      Tasks: 1 (limit: 48560)
     Memory: 1.5M (peak: 2.3M)
        CPU: 4.053s
     CGroup: /system.slice/systemd-machined.service
             └─212769 /usr/lib/systemd/systemd-machined

Jan 31 09:08:24 compute-0 systemd-machined[212769]: Machine qemu-101-instance-000000d2 terminated.
Jan 31 09:08:39 compute-0 systemd-machined[212769]: Machine qemu-102-instance-000000d4 terminated.
Jan 31 09:10:40 compute-0 systemd-machined[212769]: New machine qemu-103-instance-000000d9.
Jan 31 09:11:49 compute-0 systemd-machined[212769]: Machine qemu-103-instance-000000d9 terminated.
Jan 31 09:13:01 compute-0 systemd-machined[212769]: New machine qemu-104-instance-000000dd.
Jan 31 09:13:25 compute-0 systemd-machined[212769]: Machine qemu-104-instance-000000dd terminated.
Jan 31 09:15:29 compute-0 systemd-machined[212769]: New machine qemu-105-instance-000000de.
Jan 31 09:15:54 compute-0 systemd-machined[212769]: Machine qemu-105-instance-000000de terminated.
Jan 31 09:16:49 compute-0 systemd-machined[212769]: New machine qemu-106-instance-000000df.
Jan 31 09:18:50 compute-0 systemd-machined[212769]: Machine qemu-106-instance-000000df terminated.

● systemd-modules-load.service - Load KernelUnit systemd-networkd-wait-online.service could not be found.
 Modules
     Loaded: loaded (/usr/lib/systemd/system/systemd-modules-load.service; static)
     Active: active (exited) since Sat 2026-01-31 07:37:52 UTC; 1h 59min ago
       Docs: man:systemd-modules-load.service(8)
             man:modules-load.d(5)
   Main PID: 229287 (code=exited, status=0/SUCCESS)
        CPU: 14ms

Jan 31 07:37:52 compute-0 systemd[1]: Starting Load Kernel Modules...
Jan 31 07:37:52 compute-0 systemd[1]: Finished Load Kernel Modules.

● systemd-network-generator.service - Generate network units from Kernel command line
     Loaded: loaded (/usr/lib/systemd/system/systemd-network-generator.service; enabled; preset: enabled)
     Active: active (exited) since Sat 2026-01-31 06:24:59 UTC; 3h 12min ago
       Docs: man:systemd-network-generator.service(8)
   Main PID: 675 (code=exited, status=0/SUCCESS)
        CPU: 6ms

○ systemd-pcrmachine.service - TPM2 PCR Machine ID Measurement
     Loaded: loaded (/usr/lib/systemd/system/systemd-pcrmachine.service; static)
     Active: inactive (dead)
  Condition: start condition failed at Sat 2026-01-31 06:24:59 UTC; 3h 12min ago
       Docs: man:systemd-pcrmachine.service(8)

○ systemd-pcrphase-initrd.service - TPM2 PCR Barrier (initrd)
     Loaded: loaded (/usr/lib/systemd/system/systemd-pcrphase-initrd.service; static)
     Active: inactive (dead)
       Docs: man:systemd-pcrphase-initrd.service(8)

○ systemd-pcrphase-sysinit.service - TPM2 PCR Barrier (Initialization)
     Loaded: loaded (/usr/lib/systemd/system/systemd-pcrphase-sysinit.service; static)
     Active: inactive (dead)
  Condition: start condition failed at Sat 2026-01-31 06:25:00 UTC; 3h 11min ago
       Docs: man:systemd-pcrphase-sysinit.service(8)

○ systemd-pcrphase.service - TPM2 PCR Barrier (User)
     Loaded: loaded (/usr/lib/systemd/system/systemd-pcrphase.service; static)
     Active: inactive (dead)
  Condition: start condition failed at Sat 2026-01-31 06:25:01 UTC; 3h 11min ago
       Docs: man:systemd-pcrphase.service(8)

○ systemd-pstore.service - Platform Persistent Storage Archival
     Loaded: loaded (/usr/lib/systemd/system/systemd-pstore.service; enabled; preset: enabled)
     Active: inactive (dead)
  Condition: start condition failed at Sat 2026-01-31 06:24:59 UTC; 3h 12min ago
       Docs: man:systemd-pstore(8)

● systemd-random-seed.service - Load/Save OS Random Seed
     Loaded: loaded (/usr/lib/systemd/system/systemd-random-seed.service; static)
     Active: active (exited) since Sat 2026-01-31 06:24:59 UTC; 3h 12min ago
       Docs: man:systemd-random-seed.service(8)
             man:random(4)
   Main PID: 685 (code=exited, status=0/SUCCESS)
        CPU: 5ms

● systemd-remount-fs.service - Remount Root and Kernel File Systems
     Loaded: loaded (/usr/lib/systemd/system/systemd-remount-fs.service; enabled-runtime; preset: disabled)
     Active: active (exited) since Sat 2026-01-31 06:24:59 UTC; 3h 12min ago
       Docs: man:systemd-remount-fs.service(8)
             https://www.freedesktop.org/wiki/Software/systemd/APIFileSystems
   Main PID: 676 (code=exited, status=0/SUCCESS)
        CPU: 12ms

○ systemd-repart.service - Repartition Root Disk
     Loaded: loaded (/usr/lib/systemd/system/systemd-repart.service; static)
     Active: inactive (dead)
  Condition: start condition failed at Sat 2026-01-31 06:24:59 UTC; 3h 12min ago
       Docs: man:systemd-repart.service(8)

○ systemd-rfkill.service - Load/Save RF Kill Switch Status
     Loaded: loaded (/usr/lib/systemd/system/systemd-rfkill.service; static)
     Active: inactive (dead)
TriggeredBy: ● systemd-rfkill.socket
       Docs: man:systemd-rfkill.service(8)

● systemd-sysctl.service - Apply Kernel Variables
     Loaded: loaded (/usr/lib/systemd/system/systemd-sysctl.service; static)
     Active: active (exited) since Sat 2026-01-31 07:14:40 UTC; 2h 22min ago
       Docs: man:systemd-sysctl.service(8)
             man:sysctl.d(5)
   Main PID: 44983 (code=exited, status=0/SUCCESS)
        CPU: 14ms

Jan 31 07:14:40 compute-0 systemd[1]: Starting Apply Kernel Variables...
Jan 31 07:14:40 computeUnit systemd-timesyncd.service could not be found.
Unit systemd-tmpfiles.service could not be found.
-0 systemd[1]: Finished Apply Kernel Variables.

○ systemd-sysext.service - Merge System Extension Images into /usr/ and /opt/
     Loaded: loaded (/usr/lib/systemd/system/systemd-sysext.service; disabled; preset: disabled)
     Active: inactive (dead)
       Docs: man:systemd-sysext.service(8)

● systemd-sysusers.service - Create System Users
     Loaded: loaded (/usr/lib/systemd/system/systemd-sysusers.service; static)
     Active: active (exited) since Sat 2026-01-31 06:24:59 UTC; 3h 12min ago
       Docs: man:sysusers.d(5)
             man:systemd-sysusers.service(8)
   Main PID: 686 (code=exited, status=0/SUCCESS)
        CPU: 14ms

○ systemd-tmpfiles-clean.service - Cleanup of Temporary Directories
     Loaded: loaded (/usr/lib/systemd/system/systemd-tmpfiles-clean.service; static)
     Active: inactive (dead) since Sat 2026-01-31 06:40:37 UTC; 2h 56min ago
TriggeredBy: ● systemd-tmpfiles-clean.timer
       Docs: man:tmpfiles.d(5)
             man:systemd-tmpfiles(8)
   Main PID: 7481 (code=exited, status=0/SUCCESS)
        CPU: 25ms

● systemd-tmpfiles-setup-dev.service - Create Static Device Nodes in /dev
     Loaded: loaded (/usr/lib/systemd/system/systemd-tmpfiles-setup-dev.service; static)
     Active: active (exited) since Sat 2026-01-31 06:24:59 UTC; 3h 12min ago
       Docs: man:tmpfiles.d(5)
             man:systemd-tmpfiles(8)
   Main PID: 688 (code=exited, status=0/SUCCESS)
        CPU: 29ms

● systemd-tmpfiles-setup.service - Create Volatile Files and Directories
     Loaded: loaded (/usr/lib/systemd/system/systemd-tmpfiles-setup.service; static)
     Active: active (exited) since Sat 2026-01-31 06:24:59 UTC; 3h 12min ago
       Docs: man:tmpfiles.d(5)
             man:systemd-tmpfiles(8)
   Main PID: 692 (code=exited, status=0/SUCCESS)
        CPU: 58ms

● systemd-udev-settle.service - Wait for udev To Complete Device Initialization
     Loaded: loaded (/usr/lib/systemd/system/systemd-udev-settle.service; static)
     Active: active (exited) since Sat 2026-01-31 07:37:47 UTC; 1h 59min ago
       Docs: man:systemd-udev-settle.service(8)
   Main PID: 228325 (code=exited, status=0/SUCCESS)
        CPU: 20ms

Jan 31 07:37:47 compute-0 systemd[1]: Starting Wait for udev To Complete Device Initialization...
Jan 31 07:37:47 compute-0 udevadm[228325]: systemd-udev-settle.service is deprecated. Please fix multipathd.service not to pull it in.
Jan 31 07:37:47 compute-0 systemd[1]: Finished Wait for udev To Complete Device Initialization.

● systemd-udev-trigger.service - Coldplug All udev Devices
     Loaded: loaded (/usr/lib/systemd/system/systemd-udev-trigger.service; static)
    Drop-In: /usr/lib/systemd/system/systemd-udev-trigger.service.d
             └─systemd-udev-trigger-no-reload.conf
     Active: active (exited) since Sat 2026-01-31 06:24:59 UTC; 3h 12min ago
       Docs: man:udev(7)
             man:systemd-udevd.service(8)
   Main PID: 680 (code=exited, status=0/SUCCESS)
        CPU: 59ms

● systemd-udevd.service - Rule-based Manager for Device Events and Files
     Loaded: loaded (/usr/lib/systemd/system/systemd-udevd.service; static)
     Active: active (running) since Sat 2026-01-31 06:25:00 UTC; 3h 12min ago
TriggeredBy: ● systemd-udevd-kernel.socket
             ● systemd-udevd-control.socket
       Docs: man:systemd-udevd.service(8)
             man:udev(7)
   Main PID: 725 (systemd-udevd)
     Status: "Processing with 32 children at max"
         IO: 58.5M read, 17.3M written
      Tasks: 1
     Memory: 36.8M (peak: 93.2M)
        CPU: 18.154s
     CGroup: /system.slice/systemd-udevd.service
             └─udev
               └─725 /usr/lib/systemd/systemd-udevd

Jan 31 09:10:40 compute-0 systemd-udevd[402732]: Network interface NamePolicy= disabled on kernel command line.
Jan 31 09:13:01 compute-0 systemd-udevd[406266]: Network interface NamePolicy= disabled on kernel command line.
Jan 31 09:13:01 compute-0 systemd-udevd[406268]: Network interface NamePolicy= disabled on kernel command line.
Jan 31 09:15:29 compute-0 systemd-udevd[410089]: Network interface Unit tlp.service could not be found.
NamePolicy= disabled on kernel command line.
Jan 31 09:15:29 compute-0 systemd-udevd[410093]: Network interface NamePolicy= disabled on kernel command line.
Jan 31 09:16:49 compute-0 systemd-udevd[412121]: Network interface NamePolicy= disabled on kernel command line.
Jan 31 09:20:15 compute-0 lvm[416735]: PV /dev/loop3 online, VG ceph_vg0 is complete.
Jan 31 09:20:15 compute-0 lvm[416735]: VG ceph_vg0 finished
Jan 31 09:36:29 compute-0 lvm[443179]: PV /dev/loop3 online, VG ceph_vg0 is complete.
Jan 31 09:36:29 compute-0 lvm[443179]: VG ceph_vg0 finished

● systemd-update-done.service - Update is Completed
     Loaded: loaded (/usr/lib/systemd/system/systemd-update-done.service; static)
     Active: active (exited) since Sat 2026-01-31 06:25:00 UTC; 3h 11min ago
       Docs: man:systemd-update-done.service(8)
   Main PID: 763 (code=exited, status=0/SUCCESS)
        CPU: 9ms

○ systemd-update-utmp-runlevel.service - Record Runlevel Change in UTMP
     Loaded: loaded (/usr/lib/systemd/system/systemd-update-utmp-runlevel.service; static)
     Active: inactive (dead) since Sat 2026-01-31 06:25:03 UTC; 3h 11min ago
       Docs: man:systemd-update-utmp-runlevel.service(8)
             man:utmp(5)
   Main PID: 1017 (code=exited, status=0/SUCCESS)
        CPU: 8ms

● systemd-update-utmp.service - Record System Boot/Shutdown in UTMP
     Loaded: loaded (/usr/lib/systemd/system/systemd-update-utmp.service; static)
     Active: active (exited) since Sat 2026-01-31 06:24:59 UTC; 3h 12min ago
       Docs: man:systemd-update-utmp.service(8)
             man:utmp(5)
   Main PID: 724 (code=exited, status=0/SUCCESS)
        CPU: 6ms

● systemd-user-sessions.service - Permit User Sessions
     Loaded: loaded (/usr/lib/systemd/system/systemd-user-sessions.service; static)
     Active: active (exited) since Sat 2026-01-31 06:25:03 UTC; 3h 11min ago
       Docs: man:systemd-user-sessions.service(8)
   Main PID: 1000 (code=exited, status=0/SUCCESS)
        CPU: 8ms

○ systemd-vconsole-setup.service - Setup Virtual Console
     Loaded: loaded (/usr/lib/systemd/system/systemd-vconsole-setup.service; static)
     Active: inactive (dead) since Sat 2026-01-31 06:24:58 UTC; 3h 12min ago
   Duration: 1.532s
       Docs: man:systemd-vconsole-setup.service(8)
             man:vconsole.conf(5)
   Main PID: 311 (code=exited, status=0/SUCCESS)
        CPU: 163ms

Jan 31 06:24:56 localhost systemd[1]: Finished Setup Virtual Console.
Jan 31 06:24:58 localhost systemd[1]: systemd-vconsole-setup.service: Deactivated successfully.
Jan 31 06:24:58 localhost systemd[1]: Stopped Setup Virtual Console.
Notice: journal has been rotated since unit was started, output may be incomplete.

● tuned.service - Dynamic System Tuning Daemon
     Loaded: loaded (/usr/lib/systemd/system/tuned.service; enabled; preset: enabled)
     Active: active (running) since Sat 2026-01-31 07:26:24 UTC; 2h 10min ago
       Docs: man:tuned(8)
             man:tuned.conf(5)
             man:tuned-adm(8)
   Main PID: 108256 (tuned)
         IO: 0B read, 0B written
      Tasks: 4 (limit: 48560)
     Memory: 13.7M (peak: 16.1M)
        CPU: 2.362s
     CGroup: /system.slice/tuned.service
             └─108256 /usr/bin/python3 -Es /usr/sbin/tuned -l -P

Jan 31 07:26:24 compute-0 systemd[1]: Starting Dynamic System Tuning Daemon...
Jan 31 07:26:24 compute-0 systemd[1]: Started Dynamic System Tuning Daemon.

○ unbound-anchor.service - update of the root trust anchor for DNSSEC validation in unbound
     Loaded: loaded (/usr/lib/systemd/system/unbound-anchor.service; static)
     Active: inactive (dead)
TriggeredBy: ● unbound-anchor.timer
       Docs: man:unbound-anchor(8)

● user-runtime-dir@1000.service - User Runtime Directory /run/user/1000
     Loaded: loaded (/usr/lib/systemd/system/user-runtime-dir@.service; static)
     Active: active (exited) since Sat 2026-01-31 06:31:00 UTC; 3h 5min ago
       Docs: man:user@.service(5)
   Main PID: 4305 (code=exited, status=0/SUCCESS)
        CPU: 10ms

● user-runtime-dir@42477.service - User Runtime Directory /run/user/42477
     Loaded: loaded (/usr/lib/systemd/system/user-runtime-dir@.service; static)
     Active: active (exited) since Sat 2026-01-31 07:20:43 UTC; 2h 16min ago
       Docs: man:user@.service(5)
   Main PID: 76059 (code=exited, status=0/SUCCESS)
        CPU: 11ms

Jan 31 07:20:43 compute-0 systemd[1]: Starting User Runtime Directory /run/user/42477...
Jan 31 07:20:43 compute-0 systemd[1]: Finished User Runtime Directory /run/user/42477.

● user@1000.service - User Manager for UID 1000
     Loaded: loaded (/usr/lib/systemd/system/user@.service; static)
    Drop-In: /usr/lib/systemd/system/user@.service.d
             └─10-login-barrier.conf
     Active: active (running) since Sat 2026-01-31 06:31:01 UTC; 3h 5min ago
       Docs: man:user@.service(5)
   Main PID: 4306 (systemd)
     Status: "Ready."
         IO: 676.0K read, 4.0K written
      Tasks: 5
     Memory: 9.6M (peak: 13.6M)
        CPU: 7.614s
     CGroup: /user.slice/user-1000.slice/user@1000.service
             ├─app.slice
             │ └─dbus-broker.service
             │   ├─20820 /usr/bin/dbus-broker-launch --scope user
             │   └─20831 dbus-broker --log 4 --controller 9 --machine-id bf0bc0bb03de29b24cba1cc9599cf5d0 --max-bytes 100000000000000 --max-fds 25000000000000 --max-matches 5000000000
             ├─init.scope
             │ ├─4306 /usr/lib/systemd/systemd --user
             │ └─4308 "(sd-pam)"
             └─user.slice
               └─podman-pause-d63f6fad.scope
                 └─20760 catatonit -P

● user@42477.service - User Manager for UID 42477
     Loaded: loaded (/usr/lib/systemd/system/user@.service; static)
    Drop-In: /usr/lib/systemd/system/user@.service.d
             └─10-login-barrier.conf
     Active: active (running) since Sat 2026-01-31 07:20:44 UTC; 2h 16min ago
       Docs: man:user@.service(5)
   Main PID: 76060 (systemd)
     Status: "Ready."
         IO: 0B read, 0B written
      Tasks: 2
     Memory: 7.6M (peak: 10.9M)
        CPU: 5.870s
     CGroup: /user.slice/user-42477.slice/user@42477.service
             └─init.scope
               ├─76060 /usr/lib/systemd/systemd --user
               └─76062 "(sd-pam)"

Jan 31 07:20:44 compute-0 systemd[76060]: Reached target Sockets.
Jan 31 07:20:44 compute-0 systemd[76060]: Reached target Basic System.
Jan 31 07:20:44 compute-0 systemd[76060]: Reached target Main User Target.
Jan 31 07:20:44 compute-0 systemd[76060]: Startup finished in 127ms.
Jan 31 07:20:44 compute-0 systemd[1]: Started User Manager for UID 42477.
Jan 31 07:22:44 compute-0 systemd[76060]: Starting Mark boot as successful...
Jan 31 07:22:44 compute-0 systemd[76060]: Finished Mark boot as successful.
Jan 31 07:25:47 compute-0 systemd[76060]: Created slice User Background Tasks Slice.
Jan 31 07:25:47 compute-0 systemd[76060]: Starting Cleanup of User's Temporary Files and Directories...
Jan 31 07:25:47 compute-0 systemd[76060]: Finished Cleanup of User's Temporary Files and Directories.

○ virtinterfaced.service - libvirt interface daemon
     Loaded: loaded (/usr/lib/systemd/system/virtinterfaced.service; disabled; preset: disabled)
     Active: inactive (dead)
TriggeredBy: ○ virtinterfaced.socket
             ○ virtinterfaced-admin.socket
             ○ virtinterfaced-ro.socket
       Docs: man:virtinterfaced(8)
             https://libvirt.org/

○ virtlockd.service - libvirt locking daemon
     Loaded: loaded (/usr/lib/systemd/system/virtlockd.service; disabled; preset: disabled)
     Active: inactive (dead)
TriggeredBy: ● virtlockd.socket
             ○ virtlockd-admin.socket
       Docs: man:virtlockd(8)
             https://libvirt.org/

● virtlogd.service - libvirt logging daemon
     Loaded: loaded (/usr/lib/systemd/system/virtlogd.service; enabled; preset: disabled)
     Active: active (running) since Sat 2026-01-31 07:36:13 UTC; 2h 0min ago
TriggeredBy: ● virtlogd.socket
             ● virtlogd-admin.socket
       Docs: man:virtlogd(8)
             https://libvirt.org/
   Main PID: 212140 (virtlogd)
         IO: 644.0K read, 8.2M written
      Tasks: 1 (limit: 48560)
     Memory: 4.3M (peak: 4.9M)
        CPU: 1min 41.255s
     CGroup: /system.slice/virtlogd.service
             └─212140 /usr/sbin/virtlogd

Jan 31 07:36:13 compute-0 systemd[1]: Starting libvirt logging daemon...
Jan 31 07:36:13 compute-0 systemd[1]: Started libvirt logging daemon.

○ virtnetworkd.service - libvirt network daemon
     Loaded: loaded (/usr/lib/systemd/system/virtnetworkd.service; disabled; preset: disabled)
     Active: inactive (dead)
TriggeredBy: ○ virtnetworkd-ro.socket
             ○ virtnetworkd.socket
             ○ virtnetworkd-admin.socket
       Docs: man:virtnetworkd(8)
             https://libvirt.org/

● virtnodedevd.service - libvirt nodedev daemon
     Loaded: loaded (/usr/lib/systemd/system/virtnodedevd.service; enabled; preset: disabled)
     Active: active (running) since Sat 2026-01-31 07:40:03 UTC; 1h 56min ago
TriggeredBy: ● virtnodedevd-admin.socket
             ● virtnodedevd-ro.socket
             ● virtnodedevd.socket
       Docs: man:virtnodedevd(8)
             https://libvirt.org/
   Main PID: 247723 (virtnodedevd)
         IO: 1.3M read, 0B written
      Tasks: 20 (limit: 48560)
     Memory: 7.2M (peak: 8.9M)
        CPU: 7.725s
     CGroup: /system.slice/virtnodedevd.service
             └─247723 /usr/sbin/virtnodedevd --timeout 120

Jan 31 08:39:13 compute-0 virtnodedevd[247723]: libvirt version: 11.10.0, package: 2.el9 (builder@centos.org, 2025-12-18-15:09:54, )
Jan 31 08:39:13 compute-0 virtnodedevd[247723]: hostname: compute-0
Jan 31 08:39:13 compute-0 virtnodedevd[247723]: ethtool ioctl error on tap77125c39-f9: No such device
Jan 31 08:39:13 compute-0 virtnodedevd[247723]: ethtool ioctl error on tap77125c39-f9: No such device
Jan 31 08:39:13 compute-0 virtnodedevd[247723]: ethtool ioctl error on tap77125c39-f9: No such device
Jan 31 08:39:13 compute-0 virtnodedevd[247723]: ethtool ioctl error on tap77125c39-f9: No such device
Jan 31 08:39:13 compute-0 virtnodedevd[247723]: ethtool ioctl error on tap77125c39-f9: No such device
Jan 31 08:39:13 compute-0 virtnodedevd[247723]: ethtool ioctl error on tap77125c39-f9: No such device
Jan 31 08:39:13 compute-0 virtnodedevd[247723]: ethtool ioctl error on tap77125c39-f9: No such device
Jan 31 08:39:13 compute-0 virtnodedevd[247723]: ethtool ioctl error on tap77125c39-f9: No such device

○ virtnwfilterd.service - libvirt nwfilter daemon
     Loaded: loaded (/usr/lib/systemd/system/virtnwfilterd.service; disabled; preset: disabled)
     Active: inactive (dead)
TriggeredBy: ○ virtnwfilterd-ro.socket
             ○ virtnwfilterd-admin.socket
             ○ virtnwfilterd.socket
       Docs: man:virtnwfilterd(8)
             https://libvirt.org/

○ virtproxyd.service - libvirt proxy daemon
     Loaded: loaded (/usr/lib/systemd/system/virtproxyd.service; enabled; preset: disabled)
     Active: inactive (dead) since Sat 2026-01-31 07:38:15 UTC; 1h 58min ago
   Duration: 2min 17ms
TriggeredBy: ● virtproxyd-ro.socket
             ● virtproxyd-tls.socket
             ● virtproxyd.socket
             ● virtproxyd-admin.socket
       Docs: man:virtproxyd(8)
             https://libvirt.org/
   Main PID: 212557 (code=exited, status=0/SUCCESS)
        CPU: 53ms

Jan 31 07:36:15 compute-0 systemd[1]: Starting libvirt proxy daemon...
Jan 31 07:36:15 compute-0 systemd[1]: Started libvirt proxy daemon.
Jan 31 07:38:15 compute-0 systemd[1]: virtproxyd.service: Deactivated successfully.

● virtqemud.service - libvirt QEMU daemon
     Loaded: loaded (/usr/lib/systemd/system/virtqemud.service; enabled; preset: enabled)
     Active: active (running) since Sat 2026-01-31 07:39:58 UTC; 1h 57min ago
TriggeredBy: ● virtqemud-admin.socket
             ● virtqemud-ro.socket
             ● virtqemud.socket
       Docs: man:virtqemud(8)
             https://libvirt.org/
   Main PID: 247123 (virtqemud)
         IO: 2.8M read, 137.0M written
      Tasks: 19 (limit: 32768)
     Memory: 27.9M (peak: 98.1M)
        CPU: 22.696s
     CGroup: /system.slice/Unit ypbind.service could not be found.
Unit yppasswdd.service could not be found.
Unit ypserv.service could not be found.
Unit ypxfrd.service could not be found.
virtqemud.service
             └─247123 /usr/sbin/virtqemud --timeout 120

Jan 31 08:45:51 compute-0 virtqemud[247123]: argument unsupported: QEMU guest agent is not configured
Jan 31 08:50:06 compute-0 virtqemud[247123]: argument unsupported: QEMU guest agent is not configured
Jan 31 08:52:18 compute-0 virtqemud[247123]: argument unsupported: QEMU guest agent is not configured
Jan 31 09:20:14 compute-0 virtqemud[247123]: Failed to connect socket to '/var/run/libvirt/virtnetworkd-sock-ro': No such file or directory
Jan 31 09:20:14 compute-0 virtqemud[247123]: Failed to connect socket to '/var/run/libvirt/virtnwfilterd-sock-ro': No such file or directory
Jan 31 09:20:14 compute-0 virtqemud[247123]: Failed to connect socket to '/var/run/libvirt/virtstoraged-sock-ro': No such file or directory
Jan 31 09:20:46 compute-0 virtqemud[247123]: Failed to connect socket to '/var/run/libvirt/virtstoraged-sock-ro': No such file or directory
Jan 31 09:36:29 compute-0 virtqemud[247123]: Failed to connect socket to '/var/run/libvirt/virtnetworkd-sock-ro': No such file or directory
Jan 31 09:36:29 compute-0 virtqemud[247123]: Failed to connect socket to '/var/run/libvirt/virtnwfilterd-sock-ro': No such file or directory
Jan 31 09:36:29 compute-0 virtqemud[247123]: Failed to connect socket to '/var/run/libvirt/virtstoraged-sock-ro': No such file or directory

● virtsecretd.service - libvirt secret daemon
     Loaded: loaded (/usr/lib/systemd/system/virtsecretd.service; enabled; preset: disabled)
     Active: active (running) since Sat 2026-01-31 07:44:52 UTC; 1h 52min ago
TriggeredBy: ● virtsecretd-admin.socket
             ● virtsecretd-ro.socket
             ● virtsecretd.socket
       Docs: man:virtsecretd(8)
             https://libvirt.org/
   Main PID: 253144 (virtsecretd)
         IO: 0B read, 0B written
      Tasks: 18 (limit: 48560)
     Memory: 4.1M (peak: 5.1M)
        CPU: 1.078s
     CGroup: /system.slice/virtsecretd.service
             └─253144 /usr/sbin/virtsecretd --timeout 120

Jan 31 07:44:52 compute-0 systemd[1]: Starting libvirt secret daemon...
Jan 31 07:44:52 compute-0 systemd[1]: Started libvirt secret daemon.

○ virtstoraged.service - libvirt storage daemon
     Loaded: loaded (/usr/lib/systemd/system/virtstoraged.service; disabled; preset: disabled)
     Active: inactive (dead)
TriggeredBy: ○ virtstoraged.socket
             ○ virtstoraged-ro.socket
             ○ virtstoraged-admin.socket
       Docs: man:virtstoraged(8)
             https://libvirt.org/

● -.slice - Root Slice
     Loaded: loaded
     Active: active since Sat 2026-01-31 06:24:55 UTC; 3h 12min ago
      Until: Sat 2026-01-31 06:24:55 UTC; 3h 12min ago
       Docs: man:systemd.special(7)
      Tasks: 1306
     Memory: 3.8G
        CPU: 1h 36min 9.300s
     CGroup: /
             ├─449424 turbostat --debug sleep 10
             ├─449429 sleep 10
             ├─init.scope
             │ └─1 /usr/lib/systemd/systemd --switched-root --system --deserialize 31
             ├─machine.slice
             │ ├─libpod-16f3cf77eee09ab7709c67d820482fbb1b64354510093143a021a8f1ec2ccd87.scope
             │ │ └─container
             │ │   ├─247401 dumb-init --single-child -- kolla_start
             │ │   ├─247403 /usr/bin/python3 /usr/bin/nova-compute
             │ │   ├─253078 /usr/bin/python3 /bin/privsep-helper --config-file /etc/nova/nova.conf --config-file /etc/nova/nova-compute.conf --config-dir /etc/nova/nova.conf.d --privsep_context vif_plug_ovs.privsep.vif_plug --privsep_sock_path /tmp/tmpkn8xotgf/privsep.sock
             │ │   ├─254621 /usr/bin/python3 /bin/privsep-helper --config-file /etc/nova/nova.conf --config-file /etc/nova/nova-compute.conf --config-dir /etc/nova/nova.conf.d --privsep_context os_brick.privileged.default --privsep_sock_path /tmp/tmpuc93k029/privsep.sock
             │ │   └─307490 /usr/bin/python3 /bin/privsep-helper --config-file /etc/nova/nova.conf --config-file /etc/nova/nova-compute.conf --config-dir /etc/nova/nova.conf.d --privsep_context nova.privsep.sys_admin_pctxt --privsep_sock_path /tmp/tmp1z21xzi6/privsep.sock
             │ ├─libpod-1ae13fee90b46f5eb19bd9030483f3244c23c433b8f4e46eb5cc0bd2c6a35d52.scope
             │ │ └─container
             │ │   ├─159731 dumb-init --single-child -- kolla_start
             │ │   ├─159734 "neutron-ovn-metadata-agent (/usr/bin/python3 /usr/bin/neutron-ovn-metadata-agent)"
             │ │   ├─159995 "neutron-ovn-metadata-agent (/usr/bin/python3 /usr/bin/neutron-ovn-metadata-agent)"
             │ │   ├─160056 /usr/bin/python3 /bin/privsep-helper --config-file /etc/neutron/neutron.conf --config-dir /etc/neutron.conf.d --privsep_context neutron.privileged.namespace_cmd --privsep_sock_path /tmp/tmp07osgxqf/privsep.sock
             │ │   ├─253234 /usr/bin/python3 /bin/privsep-helper --config-file /etc/neutron/neutron.conf --config-dir /etc/neutron.conf.d --privsep_context neutron.privileged.default --privsep_sock_path /tmp/tmpnmaxlr7e/privsep.sock
             │ │   └─253297 /usr/bin/python3 /bin/privsep-helper --config-file /etc/neutron/neutron.conf --config-dir /etc/neutron.conf.d --privsep_context neutron.privileged.link_cmd --privsep_sock_path /tmp/tmpyqrzsz_e/privsep.sock
             │ └─libpod-d068e3ccc9a9ec2248f269659070439009ec5c33951d360f88b13647cd275613.scope
             │   └─container
             │     ├─149154 dumb-init --single-child -- kolla_start
             │     └─149157 /usr/bin/ovn-controller --pidfile unix:/run/openvswitch/db.sock -p /etc/pki/tls/private/ovndb.key -c /etc/pki/tls/certs/ovndb.crt -C /etc/pki/tls/certs/ovndbca.crt
             ├─system.slice
             │ ├─NetworkManager.service
             │ │ └─49013 /usr/sbin/NetworkManager --no-daemon
             │ ├─auditd.service
             │ │ ├─697 /sbin/auditd
             │ │ └─699 /usr/sbin/sedispatch
             │ ├─chronyd.service
             │ │ └─58578 /usr/sbin/chronyd -F 2
             │ ├─crond.service
             │ │ └─1002 /usr/sbin/crond -n
             │ ├─dbus-broker.service
             │ │ ├─791 /usr/bin/dbus-broker-launch --scope system --audit
             │ │ └─808 dbus-broker --log 4 --controller 9 --machine-id bf0bc0bb03de29b24cba1cc9599cf5d0 --max-bytes 536870912 --max-fds 4096 --max-matches 131072 --audit
             │ ├─edpm_nova_compute.service
             │ │ └─247399 /usr/bin/conmon --api-version 1 -c 16f3cf77eee09ab7709c67d820482fbb1b64354510093143a021a8f1ec2ccd87 -u 16f3cf77eee09ab7709c67d820482fbb1b64354510093143a021a8f1ec2ccd87 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/16f3cf77eee09ab7709c67d820482fbb1b64354510093143a021a8f1ec2ccd87/userdata -p /run/containers/storage/overlay-containers/16f3cf77eee09ab7709c67d820482fbb1b64354510093143a021a8f1ec2ccd87/userdata/pidfile -n nova_compute --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/16f3cf77eee09ab7709c67d820482fbb1b64354510093143a021a8f1ec2ccd87 --full-attach -s -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/16f3cf77eee09ab7709c67d820482fbb1b64354510093143a021a8f1ec2ccd87/userdata/oci-log --conmon-pidfile /run/nova_compute.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg 16f3cf77eee09ab7709c67d820482fbb1b64354510093143a021a8f1ec2ccd87
             │ ├─edpm_ovn_controller.service
             │ │ └─149152 /usr/bin/conmon --api-version 1 -c d068e3ccc9a9ec2248f269659070439009ec5c33951d360f88b13647cd275613 -u d068e3ccc9a9ec2248f269659070439009ec5c33951d360f88b13647cd275613 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/d068e3ccc9a9ec2248f269659070439009ec5c33951d360f88b13647cd275613/userdata -p /run/containers/storage/overlay-containers/d068e3ccc9a9ec2248f269659070439009ec5c33951d360f88b13647cd275613/userdata/pidfile -n ovn_controller --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/d068e3ccc9a9ec2248f269659070439009ec5c33951d360f88b13647cd275613 --full-attach -s -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/d068e3ccc9a9ec2248f269659070439009ec5c33951d360f88b13647cd275613/userdata/oci-log --conmon-pidfile /run/ovn_controller.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg d068e3ccc9a9ec2248f269659070439009ec5c33951d360f88b13647cd275613
             │ ├─edpm_ovn_metadata_agent.service
             │ │ └─159729 /usr/bin/conmon --api-version 1 -c 1ae13fee90b46f5eb19bd9030483f3244c23c433b8f4e46eb5cc0bd2c6a35d52 -u 1ae13fee90b46f5eb19bd9030483f3244c23c433b8f4e46eb5cc0bd2c6a35d52 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/1ae13fee90b46f5eb19bd9030483f3244c23c433b8f4e46eb5cc0bd2c6a35d52/userdata -p /run/containers/storage/overlay-containers/1ae13fee90b46f5eb19bd9030483f3244c23c433b8f4e46eb5cc0bd2c6a35d52/userdata/pidfile -n ovn_metadata_agent --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/1ae13fee90b46f5eb19bd9030483f3244c23c433b8f4e46eb5cc0bd2c6a35d52 --full-attach -s -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/1ae13fee90b46f5eb19bd9030483f3244c23c433b8f4e46eb5cc0bd2c6a35d52/userdata/oci-log --conmon-pidfile /run/ovn_metadata_agent.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg 1ae13fee90b46f5eb19bd9030483f3244c23c433b8f4e46eb5cc0bd2c6a35d52
             │ ├─gssproxy.service
             │ │ └─871 /usr/sbin/gssproxy -D
             │ ├─irqbalance.service
             │ │ └─813 /usr/sbin/irqbalance
             │ ├─iscsid.service
             │ │ └─230940 /usr/sbin/iscsid -f
             │ ├─multipathd.service
             │ │ └─231100 /sbin/multipathd -d -s
             │ ├─ovs-vswitchd.service
             │ │ └─47313 ovs-vswitchd unix:/var/run/openvswitch/db.sock -vconsole:emer -vsyslog:err -vfile:info --mlockall --user openvswitch:hugetlbfs --no-chdir --log-file=/var/log/openvswitch/ovs-vswitchd.log --pidfile=/var/run/openvswitch/ovs-vswitchd.pid --detach
             │ ├─ovsdb-server.service
             │ │ └─47231 ovsdb-server /etc/openvswitch/conf.db -vconsole:emer -vsyslog:err -vfile:info --remote=punix:/var/run/openvswitch/db.sock --private-key=db:Open_vSwitch,SSL,private_key --certificate=db:Open_vSwitch,SSL,certificate --bootstrap-ca-cert=db:Open_vSwitch,SSL,ca_cert --user openvswitch:hugetlbfs --no-chdir --log-file=/var/log/openvswitch/ovsdb-server.log --pidfile=/var/run/openvswitch/ovsdb-server.pid --detach
             │ ├─polkit.service
             │ │ └─43497 /usr/lib/polkit-1/polkitd --no-debug
             │ ├─rpcbind.service
             │ │ └─695 /usr/bin/rpcbind -w -f
             │ ├─rsyslog.service
             │ │ └─998 /usr/sbin/rsyslogd -n
             │ ├─sshd.service
             │ │ └─186742 "sshd: /usr/sbin/sshd -D [listener] 0 of 10-100 startups"
             │ ├─system-ceph\x2d2f5ab832\x2d5f2e\x2d5a84\x2dbd93\x2dcf8bab960ee2.slice
             │ │ ├─ceph-2f5ab832-5f2e-5a84-bd93-cf8bab960ee2@crash.compute-0.service
             │ │ │ ├─libpod-payload-287b42eb8058430f66d49fad6a858b93e85e13d224b0307dc6db988fdd517c44
             │ │ │ │ ├─81766 /run/podman-init -- /usr/bin/ceph-crash -n client.crash.compute-0
             │ │ │ │ └─81774 /usr/bin/python3 -s /usr/bin/ceph-crash -n client.crash.compute-0
             │ │ │ └─runtime
             │ │ │   └─81764 /usr/bin/conmon --api-version 1 -c 287b42eb8058430f66d49fad6a858b93e85e13d224b0307dc6db988fdd517c44 -u 287b42eb8058430f66d49fad6a858b93e85e13d224b0307dc6db988fdd517c44 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/287b42eb8058430f66d49fad6a858b93e85e13d224b0307dc6db988fdd517c44/userdata -p /run/containers/storage/overlay-containers/287b42eb8058430f66d49fad6a858b93e85e13d224b0307dc6db988fdd517c44/userdata/pidfile -n ceph-2f5ab832-5f2e-5a84-bd93-cf8bab960ee2-crash-compute-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/287b42eb8058430f66d49fad6a858b93e85e13d224b0307dc6db988fdd517c44 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/287b42eb8058430f66d49fad6a858b93e85e13d224b0307dc6db988fdd517c44/userdata/oci-log --conmon-pidfile /run/ceph-2f5ab832-5f2e-5a84-bd93-cf8bab960ee2@crash.compute-0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 287b42eb8058430f66d49fad6a858b93e85e13d224b0307dc6db988fdd517c44
             │ │ ├─ceph-2f5ab832-5f2e-5a84-bd93-cf8bab960ee2@haproxy.rgw.default.compute-0.evwczw.service
             │ │ │ ├─libpod-payload-e85bd24af30cc47f8586afc6e524047e12ba657e07d03a65812cdd8f8d12c0fe
             │ │ │ │ ├─96044 /run/podman-init -- docker-entrypoint.sh haproxy -f /var/lib/haproxy/haproxy.cfg
             │ │ │ │ ├─96046 haproxy -W -db -f /var/lib/haproxy/haproxy.cfg
             │ │ │ │ └─96048 haproxy -W -db -f /var/lib/haproxy/haproxy.cfg
             │ │ │ └─runtime
             │ │ │   └─96042 /usr/bin/conmon --api-version 1 -c e85bd24af30cc47f8586afc6e524047e12ba657e07d03a65812cdd8f8d12c0fe -u e85bd24af30cc47f8586afc6e524047e12ba657e07d03a65812cdd8f8d12c0fe -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/e85bd24af30cc47f8586afc6e524047e12ba657e07d03a65812cdd8f8d12c0fe/userdata -p /run/containers/storage/overlay-containers/e85bd24af30cc47f8586afc6e524047e12ba657e07d03a65812cdd8f8d12c0fe/userdata/pidfile -n ceph-2f5ab832-5f2e-5a84-bd93-cf8bab960ee2-haproxy-rgw-default-compute-0-evwczw --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/e85bd24af30cc47f8586afc6e524047e12ba657e07d03a65812cdd8f8d12c0fe --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/e85bd24af30cc47f8586afc6e524047e12ba657e07d03a65812cdd8f8d12c0fe/userdata/oci-log --conmon-pidfile /run/ceph-2f5ab832-5f2e-5a84-bd93-cf8bab960ee2@haproxy.rgw.default.compute-0.evwczw.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg e85bd24af30cc47f8586afc6e524047e12ba657e07d03a65812cdd8f8d12c0fe
             │ │ ├─ceph-2f5ab832-5f2e-5a84-bd93-cf8bab960ee2@keepalived.rgw.default.compute-0.wujrgc.service
             │ │ │ ├─libpod-payload-9f037d7b549dfea18e07bcd207334bdb6be640ac888fb6a410ce9d0db364b790
             │ │ │ │ ├─96458 /run/podman-init -- ./init.sh
             │ │ │ │ ├─96460 /usr/sbin/keepalived -n -l -f /etc/keepalived/keepalived.conf
             │ │ │ │ └─96462 /usr/sbin/keepalived -n -l -f /etc/keepalived/keepalived.conf
             │ │ │ └─runtime
             │ │ │   └─96456 /usr/bin/conmon --api-version 1 -c 9f037d7b549dfea18e07bcd207334bdb6be640ac888fb6a410ce9d0db364b790 -u 9f037d7b549dfea18e07bcd207334bdb6be640ac888fb6a410ce9d0db364b790 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/9f037d7b549dfea18e07bcd207334bdb6be640ac888fb6a410ce9d0db364b790/userdata -p /run/containers/storage/overlay-containers/9f037d7b549dfea18e07bcd207334bdb6be640ac888fb6a410ce9d0db364b790/userdata/pidfile -n ceph-2f5ab832-5f2e-5a84-bd93-cf8bab960ee2-keepalived-rgw-default-compute-0-wujrgc --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/9f037d7b549dfea18e07bcd207334bdb6be640ac888fb6a410ce9d0db364b790 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/9f037d7b549dfea18e07bcd207334bdb6be640ac888fb6a410ce9d0db364b790/userdata/oci-log --conmon-pidfile /run/ceph-2f5ab832-5f2e-5a84-bd93-cf8bab960ee2@keepalived.rgw.default.compute-0.wujrgc.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 9f037d7b549dfea18e07bcd207334bdb6be640ac888fb6a410ce9d0db364b790
             │ │ ├─ceph-2f5ab832-5f2e-5a84-bd93-cf8bab960ee2@mds.cephfs.compute-0.jroeqh.service
             │ │ │ ├─libpod-payload-41a77f8e2fff2b8ec08ee3239657a26d889a4c43656e169833ff12edd4843c0e
             │ │ │ │ ├─94916 /run/podman-init -- /usr/bin/ceph-mds -n mds.cephfs.compute-0.jroeqh -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ │ │ └─94918 /usr/bin/ceph-mds -n mds.cephfs.compute-0.jroeqh -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ │ └─runtime
             │ │ │   └─94914 /usr/bin/conmon --api-version 1 -c 41a77f8e2fff2b8ec08ee3239657a26d889a4c43656e169833ff12edd4843c0e -u 41a77f8e2fff2b8ec08ee3239657a26d889a4c43656e169833ff12edd4843c0e -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/41a77f8e2fff2b8ec08ee3239657a26d889a4c43656e169833ff12edd4843c0e/userdata -p /run/containers/storage/overlay-containers/41a77f8e2fff2b8ec08ee3239657a26d889a4c43656e169833ff12edd4843c0e/userdata/pidfile -n ceph-2f5ab832-5f2e-5a84-bd93-cf8bab960ee2-mds-cephfs-compute-0-jroeqh --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/41a77f8e2fff2b8ec08ee3239657a26d889a4c43656e169833ff12edd4843c0e --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/41a77f8e2fff2b8ec08ee3239657a26d889a4c43656e169833ff12edd4843c0e/userdata/oci-log --conmon-pidfile /run/ceph-2f5ab832-5f2e-5a84-bd93-cf8bab960ee2@mds.cephfs.compute-0.jroeqh.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 41a77f8e2fff2b8ec08ee3239657a26d889a4c43656e169833ff12edd4843c0e
             │ │ ├─ceph-2f5ab832-5f2e-5a84-bd93-cf8bab960ee2@mgr.compute-0.ddmhwk.service
             │ │ │ ├─libpod-payload-27d29d56922973df23bd6f1e58ddc4d731ae614392da047fc3969a22adc43583
             │ │ │ │ ├─74687 /run/podman-init -- /usr/bin/ceph-mgr -n mgr.compute-0.ddmhwk -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ │ │ └─74689 /usr/bin/ceph-mgr -n mgr.compute-0.ddmhwk -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ │ └─runtime
             │ │ │   └─74685 /usr/bin/conmon --api-version 1 -c 27d29d56922973df23bd6f1e58ddc4d731ae614392da047fc3969a22adc43583 -u 27d29d56922973df23bd6f1e58ddc4d731ae614392da047fc3969a22adc43583 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/27d29d56922973df23bd6f1e58ddc4d731ae614392da047fc3969a22adc43583/userdata -p /run/containers/storage/overlay-containers/27d29d56922973df23bd6f1e58ddc4d731ae614392da047fc3969a22adc43583/userdata/pidfile -n ceph-2f5ab832-5f2e-5a84-bd93-cf8bab960ee2-mgr-compute-0-ddmhwk --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/27d29d56922973df23bd6f1e58ddc4d731ae614392da047fc3969a22adc43583 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/27d29d56922973df23bd6f1e58ddc4d731ae614392da047fc3969a22adc43583/userdata/oci-log --conmon-pidfile /run/ceph-2f5ab832-5f2e-5a84-bd93-cf8bab960ee2@mgr.compute-0.ddmhwk.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 27d29d56922973df23bd6f1e58ddc4d731ae614392da047fc3969a22adc43583
             │ │ ├─ceph-2f5ab832-5f2e-5a84-bd93-cf8bab960ee2@mon.compute-0.service
             │ │ │ ├─libpod-payload-8a056797e460928a5aeedd3e2298bafe24e7f52c20f4a7487148750cf8988d86
             │ │ │ │ ├─74392 /run/podman-init -- /usr/bin/ceph-mon -n mon.compute-0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false
             │ │ │ │ └─74394 /usr/bin/ceph-mon -n mon.compute-0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false
             │ │ │ └─runtime
             │ │ │   └─74390 /usr/bin/conmon --api-version 1 -c 8a056797e460928a5aeedd3e2298bafe24e7f52c20f4a7487148750cf8988d86 -u 8a056797e460928a5aeedd3e2298bafe24e7f52c20f4a7487148750cf8988d86 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/8a056797e460928a5aeedd3e2298bafe24e7f52c20f4a7487148750cf8988d86/userdata -p /run/containers/storage/overlay-containers/8a056797e460928a5aeedd3e2298bafe24e7f52c20f4a7487148750cf8988d86/userdata/pidfile -n ceph-2f5ab832-5f2e-5a84-bd93-cf8bab960ee2-mon-compute-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/8a056797e460928a5aeedd3e2298bafe24e7f52c20f4a7487148750cf8988d86 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/8a056797e460928a5aeedd3e2298bafe24e7f52c20f4a7487148750cf8988d86/userdata/oci-log --conmon-pidfile /run/ceph-2f5ab832-5f2e-5a84-bd93-cf8bab960ee2@mon.compute-0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 8a056797e460928a5aeedd3e2298bafe24e7f52c20f4a7487148750cf8988d86
             │ │ ├─ceph-2f5ab832-5f2e-5a84-bd93-cf8bab960ee2@osd.0.service
             │ │ │ ├─libpod-payload-af2e3815ded07052013af44e961eb30d6aebcbabf404f7e8579600fe4cb398c1
             │ │ │ │ ├─84878 /run/podman-init -- /usr/bin/ceph-osd -n osd.0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ │ │ └─84880 /usr/bin/ceph-osd -n osd.0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ │ └─runtime
             │ │ │   └─84876 /usr/bin/conmon --api-version 1 -c af2e3815ded07052013af44e961eb30d6aebcbabf404f7e8579600fe4cb398c1 -u af2e3815ded07052013af44e961eb30d6aebcbabf404f7e8579600fe4cb398c1 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/af2e3815ded07052013af44e961eb30d6aebcbabf404f7e8579600fe4cb398c1/userdata -p /run/containers/storage/overlay-containers/af2e3815ded07052013af44e961eb30d6aebcbabf404f7e8579600fe4cb398c1/userdata/pidfile -n ceph-2f5ab832-5f2e-5a84-bd93-cf8bab960ee2-osd-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/af2e3815ded07052013af44e961eb30d6aebcbabf404f7e8579600fe4cb398c1 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/af2e3815ded07052013af44e961eb30d6aebcbabf404f7e8579600fe4cb398c1/userdata/oci-log --conmon-pidfile /run/ceph-2f5ab832-5f2e-5a84-bd93-cf8bab960ee2@osd.0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg af2e3815ded07052013af44e961eb30d6aebcbabf404f7e8579600fe4cb398c1
             │ │ └─ceph-2f5ab832-5f2e-5a84-bd93-cf8bab960ee2@rgw.rgw.compute-0.pnpmok.service
             │ │   ├─libpod-payload-3d2c60d2a43b72ac915708b2ea14f4ac8b5ff4660c01be5af30a821a7121dba9
             │ │   │ ├─94349 /run/podman-init -- /usr/bin/radosgw -n client.rgw.rgw.compute-0.pnpmok -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │   │ └─94351 /usr/bin/radosgw -n client.rgw.rgw.compute-0.pnpmok -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │   └─runtime
             │ │     └─94347 /usr/bin/conmon --api-version 1 -c 3d2c60d2a43b72ac915708b2ea14f4ac8b5ff4660c01be5af30a821a7121dba9 -u 3d2c60d2a43b72ac915708b2ea14f4ac8b5ff4660c01be5af30a821a7121dba9 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/3d2c60d2a43b72ac915708b2ea14f4ac8b5ff4660c01be5af30a821a7121dba9/userdata -p /run/containers/storage/overlay-containers/3d2c60d2a43b72ac915708b2ea14f4ac8b5ff4660c01be5af30a821a7121dba9/userdata/pidfile -n ceph-2f5ab832-5f2e-5a84-bd93-cf8bab960ee2-rgw-rgw-compute-0-pnpmok --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/3d2c60d2a43b72ac915708b2ea14f4ac8b5ff4660c01be5af30a821a7121dba9 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/3d2c60d2a43b72ac915708b2ea14f4ac8b5ff4660c01be5af30a821a7121dba9/userdata/oci-log --conmon-pidfile /run/ceph-2f5ab832-5f2e-5a84-bd93-cf8bab960ee2@rgw.rgw.compute-0.pnpmok.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 3d2c60d2a43b72ac915708b2ea14f4ac8b5ff4660c01be5af30a821a7121dba9
             │ ├─system-getty.slice
             │ │ └─getty@tty1.service
             │ │   └─1003 /sbin/agetty -o "-p -- \\u" --noclear - linux
             │ ├─system-serial\x2dgetty.slice
             │ │ └─serial-getty@ttyS0.service
             │ │   └─1004 /sbin/agetty -o "-p -- \\u" --keep-baud 115200,57600,38400,9600 - vt220
             │ ├─systemd-hostnamed.service
             │ │ └─444949 /usr/lib/systemd/systemd-hostnamed
             │ ├─systemd-journald.service
             │ │ └─674 /usr/lib/systemd/systemd-journald
             │ ├─systemd-logind.service
             │ │ └─818 /usr/lib/systemd/systemd-logind
             │ ├─systemd-machined.service
             │ │ └─212769 /usr/lib/systemd/systemd-machined
             │ ├─systemd-udevd.service
             │ │ └─udev
             │ │   └─725 /usr/lib/systemd/systemd-udevd
             │ ├─tuned.service
             │ │ └─108256 /usr/bin/python3 -Es /usr/sbin/tuned -l -P
             │ ├─virtlogd.service
             │ │ └─212140 /usr/sbin/virtlogd
             │ ├─virtnodedevd.service
             │ │ └─247723 /usr/sbin/virtnodedevd --timeout 120
             │ ├─virtqemud.service
             │ │ └─247123 /usr/sbin/virtqemud --timeout 120
             │ └─virtsecretd.service
             │   └─253144 /usr/sbin/virtsecretd --timeout 120
             └─user.slice
               ├─user-1000.slice
               │ ├─session-1.scope
               │ │ └─4518 /usr/bin/python3
               │ ├─session-78.scope
               │ │ ├─442577 "sshd-session: zuul [priv]"
               │ │ ├─442580 "sshd-session: zuul@notty"
               │ │ ├─442581 sudo bash -c "rm -rf /var/tmp/sos-osp && mkdir /var/tmp/sos-osp && sos report --batch --all-logs --tmp-dir=/var/tmp/sos-osp  -p container,openstack_edpm,system,storage,virt"
               │ │ ├─442605 /usr/bin/python3 -s /sbin/sos report --batch --all-logs --tmp-dir=/var/tmp/sos-osp -p container,openstack_edpm,system,storage,virt
               │ │ ├─449422 timeout 15s turbostat --debug sleep 10
               │ │ ├─449779 timeout 300s systemctl status --all
               │ │ ├─449781 systemctl status --all
               │ │ ├─449827 timeout 300s semanage module -l
               │ │ ├─449828 /usr/bin/python3 -EsI /usr/sbin/semanage module -l
               │ │ ├─449829 timeout 300s ceph osd perf --format json-pretty
               │ │ └─449830 /usr/bin/python3 -s /usr/bin/ceph osd perf --format json-pretty
               │ └─user@1000.service
               │   ├─app.slice
               │   │ └─dbus-broker.service
               │   │   ├─20820 /usr/bin/dbus-broker-launch --scope user
               │   │   └─20831 dbus-broker --log 4 --controller 9 --machine-id bf0bc0bb03de29b24cba1cc9599cf5d0 --max-bytes 100000000000000 --max-fds 25000000000000 --max-matches 5000000000
               │   ├─init.scope
               │   │ ├─4306 /usr/lib/systemd/systemd --user
               │   │ └─4308 "(sd-pam)"
               │   └─user.slice
               │     └─podman-pause-d63f6fad.scope
               │       └─20760 catatonit -P
               └─user-42477.slice
                 ├─session-21.scope
                 │ ├─76056 "sshd-session: ceph-admin [priv]"
                 │ └─76078 "sshd-session: ceph-admin"
                 ├─session-23.scope
                 │ ├─76073 "sshd-session: ceph-admin [priv]"
                 │ └─76079 "sshd-session: ceph-admin@notty"
                 ├─session-24.scope
                 │ ├─76130 "sshd-session: ceph-admin [priv]"
                 │ └─76133 "sshd-session: ceph-admin@notty"
                 ├─session-25.scope
                 │ ├─76184 "sshd-session: ceph-admin [priv]"
                 │ └─76187 "sshd-session: ceph-admin@notty"
                 ├─session-26.scope
                 │ ├─76238 "sshd-session: ceph-admin [priv]"
                 │ └─76241 "sshd-session: ceph-admin@notty"
                 ├─session-27.scope
                 │ ├─76292 "sshd-session: ceph-admin [priv]"
                 │ └─76295 "sshd-session: ceph-admin@notty"
                 ├─session-28.scope
                 │ ├─76346 "sshd-session: ceph-admin [priv]"
                 │ └─76349 "sshd-session: ceph-admin@notty"
                 ├─session-29.scope
                 │ ├─76400 "sshd-session: ceph-admin [priv]"
                 │ └─76403 "sshd-session: ceph-admin@notty"
                 ├─session-30.scope
                 │ ├─76454 "sshd-session: ceph-admin [priv]"
                 │ └─76457 "sshd-session: ceph-admin@notty"
                 ├─session-31.scope
                 │ ├─76508 "sshd-session: ceph-admin [priv]"
                 │ └─76511 "sshd-session: ceph-admin@notty"
                 ├─session-32.scope
                 │ ├─76535 "sshd-session: ceph-admin [priv]"
                 │ └─76538 "sshd-session: ceph-admin@notty"
                 ├─session-33.scope
                 │ ├─76589 "sshd-session: ceph-admin [priv]"
                 │ └─76592 "sshd-session: ceph-admin@notty"
                 └─user@42477.service
                   └─init.scope
                     ├─76060 /usr/lib/systemd/systemd --user
                     └─76062 "(sd-pam)"

Jan 31 09:36:52 compute-0 systemd[1]: Started libpod-conmon-8c5a3ca73977d107763f6ed286ee477512d39185c4dce9219f645fb98284ffef.scope.
Jan 31 09:36:52 compute-0 systemd[1]: Started libcrun container.
Jan 31 09:36:52 compute-0 systemd[1]: libpod-8c5a3ca73977d107763f6ed286ee477512d39185c4dce9219f645fb98284ffef.scope: Deactivated successfully.
Jan 31 09:36:52 compute-0 systemd[1]: var-lib-containers-storage-overlay-2e5e6ad8884dbbeef499e54d58bcad7c5080f9ece4332a0f13d317225b91af9a-merged.mount: Deactivated successfully.
Jan 31 09:36:52 compute-0 systemd[1]: libpod-conmon-8c5a3ca73977d107763f6ed286ee477512d39185c4dce9219f645fb98284ffef.scope: Deactivated successfully.
Jan 31 09:36:53 compute-0 systemd[1]: Started libpod-conmon-52d0c0ac81b961a974a80e50ed2724c529d249c83767a41c57b0fe1b833d3e0d.scope.
Jan 31 09:36:53 compute-0 systemd[1]: Started libcrun container.
Jan 31 09:36:53 compute-0 systemd[1]: libpod-52d0c0ac81b961a974a80e50ed2724c529d249c83767a41c57b0fe1b833d3e0d.scope: Deactivated successfully.
Jan 31 09:36:53 compute-0 systemd[1]: var-lib-containers-storage-overlay-62330815f9f7a6a73d1693c90e625546c3e1011796b61560a5984393623fe9e2-merged.mount: Deactivated successfully.
Jan 31 09:36:54 compute-0 systemd[1]: libpod-conmon-52d0c0ac81b961a974a80e50ed2724c529d249c83767a41c57b0fe1b833d3e0d.scope: Deactivated successfully.

● machine.slice - Virtual Machine and Container Slice
     Loaded: loaded (/usr/lib/systemd/system/machine.slice; static)
     Active: active since Sat 2026-01-31 07:19:41 UTC; 2h 17min ago
      Until: Sat 2026-01-31 07:19:41 UTC; 2h 17min ago
       Docs: man:systemd.special(7)
         IO: 286.1M read, 163.5M written
      Tasks: 47
     Memory: 1.3G (peak: 2.1G)
        CPU: 37min 41.404s
     CGroup: /machine.slice
             ├─libpod-16f3cf77eee09ab7709c67d820482fbb1b64354510093143a021a8f1ec2ccd87.scope
             │ └─container
             │   ├─247401 dumb-init --single-child -- kolla_start
             │   ├─247403 /usr/bin/python3 /usr/bin/nova-compute
             │   ├─253078 /usr/bin/python3 /bin/privsep-helper --config-file /etc/nova/nova.conf --config-file /etc/nova/nova-compute.conf --config-dir /etc/nova/nova.conf.d --privsep_context vif_plug_ovs.privsep.vif_plug --privsep_sock_path /tmp/tmpkn8xotgf/privsep.sock
             │   ├─254621 /usr/bin/python3 /bin/privsep-helper --config-file /etc/nova/nova.conf --config-file /etc/nova/nova-compute.conf --config-dir /etc/nova/nova.conf.d --privsep_context os_brick.privileged.default --privsep_sock_path /tmp/tmpuc93k029/privsep.sock
             │   └─307490 /usr/bin/python3 /bin/privsep-helper --config-file /etc/nova/nova.conf --config-file /etc/nova/nova-compute.conf --config-dir /etc/nova/nova.conf.d --privsep_context nova.privsep.sys_admin_pctxt --privsep_sock_path /tmp/tmp1z21xzi6/privsep.sock
             ├─libpod-1ae13fee90b46f5eb19bd9030483f3244c23c433b8f4e46eb5cc0bd2c6a35d52.scope
             │ └─container
             │   ├─159731 dumb-init --single-child -- kolla_start
             │   ├─159734 "neutron-ovn-metadata-agent (/usr/bin/python3 /usr/bin/neutron-ovn-metadata-agent)"
             │   ├─159995 "neutron-ovn-metadata-agent (/usr/bin/python3 /usr/bin/neutron-ovn-metadata-agent)"
             │   ├─160056 /usr/bin/python3 /bin/privsep-helper --config-file /etc/neutron/neutron.conf --config-dir /etc/neutron.conf.d --privsep_context neutron.privileged.namespace_cmd --privsep_sock_path /tmp/tmp07osgxqf/privsep.sock
             │   ├─253234 /usr/bin/python3 /bin/privsep-helper --config-file /etc/neutron/neutron.conf --config-dir /etc/neutron.conf.d --privsep_context neutron.privileged.default --privsep_sock_path /tmp/tmpnmaxlr7e/privsep.sock
             │   └─253297 /usr/bin/python3 /bin/privsep-helper --config-file /etc/neutron/neutron.conf --config-dir /etc/neutron.conf.d --privsep_context neutron.privileged.link_cmd --privsep_sock_path /tmp/tmpyqrzsz_e/privsep.sock
             └─libpod-d068e3ccc9a9ec2248f269659070439009ec5c33951d360f88b13647cd275613.scope
               └─container
                 ├─149154 dumb-init --single-child -- kolla_start
                 └─149157 /usr/bin/ovn-controller --pidfile unix:/run/openvswitch/db.sock -p /etc/pki/tls/private/ovndb.key -c /etc/pki/tls/certs/ovndb.crt -C /etc/pki/tls/certs/ovndbca.crt

Jan 31 09:36:52 compute-0 loving_galileo[448533]: 167 167
Jan 31 09:36:53 compute-0 trusting_shannon[448652]: {
Jan 31 09:36:53 compute-0 trusting_shannon[448652]:     "69ce1ba1-37ea-44ee-8e02-ae107b60d956": {
Jan 31 09:36:53 compute-0 trusting_shannon[448652]:         "ceph_fsid": "2f5ab832-5f2e-5a84-bd93-cf8bab960ee2",
Jan 31 09:36:53 compute-0 trusting_shannon[448652]:         "device": "/dev/mapper/ceph_vg0-ceph_lv0",
Jan 31 09:36:53 compute-0 trusting_shannon[448652]:         "osd_id": 0,
Jan 31 09:36:53 compute-0 trusting_shannon[448652]:         "osd_uuid": "69ce1ba1-37ea-44ee-8e02-ae107b60d956",
Jan 31 09:36:53 compute-0 trusting_shannon[448652]:         "type": "bluestore"
Jan 31 09:36:53 compute-0 trusting_shannon[448652]:     }
Jan 31 09:36:53 compute-0 trusting_shannon[448652]: }

● system-ceph\x2d2f5ab832\x2d5f2e\x2d5a84\x2dbd93\x2dcf8bab960ee2.slice - Slice /system/ceph-2f5ab832-5f2e-5a84-bd93-cf8bab960ee2
     Loaded: loaded
     Active: active since Sat 2026-01-31 07:19:45 UTC; 2h 17min ago
      Until: Sat 2026-01-31 07:19:45 UTC; 2h 17min ago
         IO: 3.4G read, 12.0G written
      Tasks: 877
     Memory: 2.2G (peak: 2.4G)
        CPU: 9min 35.525s
     CGroup: /system.slice/system-ceph\x2d2f5ab832\x2d5f2e\x2d5a84\x2dbd93\x2dcf8bab960ee2.slice
             ├─ceph-2f5ab832-5f2e-5a84-bd93-cf8bab960ee2@crash.compute-0.service
             │ ├─libpod-payload-287b42eb8058430f66d49fad6a858b93e85e13d224b0307dc6db988fdd517c44
             │ │ ├─81766 /run/podman-init -- /usr/bin/ceph-crash -n client.crash.compute-0
             │ │ └─81774 /usr/bin/python3 -s /usr/bin/ceph-crash -n client.crash.compute-0
             │ └─runtime
             │   └─81764 /usr/bin/conmon --api-version 1 -c 287b42eb8058430f66d49fad6a858b93e85e13d224b0307dc6db988fdd517c44 -u 287b42eb8058430f66d49fad6a858b93e85e13d224b0307dc6db988fdd517c44 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/287b42eb8058430f66d49fad6a858b93e85e13d224b0307dc6db988fdd517c44/userdata -p /run/containers/storage/overlay-containers/287b42eb8058430f66d49fad6a858b93e85e13d224b0307dc6db988fdd517c44/userdata/pidfile -n ceph-2f5ab832-5f2e-5a84-bd93-cf8bab960ee2-crash-compute-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/287b42eb8058430f66d49fad6a858b93e85e13d224b0307dc6db988fdd517c44 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/287b42eb8058430f66d49fad6a858b93e85e13d224b0307dc6db988fdd517c44/userdata/oci-log --conmon-pidfile /run/ceph-2f5ab832-5f2e-5a84-bd93-cf8bab960ee2@crash.compute-0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 287b42eb8058430f66d49fad6a858b93e85e13d224b0307dc6db988fdd517c44
             ├─ceph-2f5ab832-5f2e-5a84-bd93-cf8bab960ee2@haproxy.rgw.default.compute-0.evwczw.service
             │ ├─libpod-payload-e85bd24af30cc47f8586afc6e524047e12ba657e07d03a65812cdd8f8d12c0fe
             │ │ ├─96044 /run/podman-init -- docker-entrypoint.sh haproxy -f /var/lib/haproxy/haproxy.cfg
             │ │ ├─96046 haproxy -W -db -f /var/lib/haproxy/haproxy.cfg
             │ │ └─96048 haproxy -W -db -f /var/lib/haproxy/haproxy.cfg
             │ └─runtime
             │   └─96042 /usr/bin/conmon --api-version 1 -c e85bd24af30cc47f8586afc6e524047e12ba657e07d03a65812cdd8f8d12c0fe -u e85bd24af30cc47f8586afc6e524047e12ba657e07d03a65812cdd8f8d12c0fe -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/e85bd24af30cc47f8586afc6e524047e12ba657e07d03a65812cdd8f8d12c0fe/userdata -p /run/containers/storage/overlay-containers/e85bd24af30cc47f8586afc6e524047e12ba657e07d03a65812cdd8f8d12c0fe/userdata/pidfile -n ceph-2f5ab832-5f2e-5a84-bd93-cf8bab960ee2-haproxy-rgw-default-compute-0-evwczw --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/e85bd24af30cc47f8586afc6e524047e12ba657e07d03a65812cdd8f8d12c0fe --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/e85bd24af30cc47f8586afc6e524047e12ba657e07d03a65812cdd8f8d12c0fe/userdata/oci-log --conmon-pidfile /run/ceph-2f5ab832-5f2e-5a84-bd93-cf8bab960ee2@haproxy.rgw.default.compute-0.evwczw.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg e85bd24af30cc47f8586afc6e524047e12ba657e07d03a65812cdd8f8d12c0fe
             ├─ceph-2f5ab832-5f2e-5a84-bd93-cf8bab960ee2@keepalived.rgw.default.compute-0.wujrgc.service
             │ ├─libpod-payload-9f037d7b549dfea18e07bcd207334bdb6be640ac888fb6a410ce9d0db364b790
             │ │ ├─96458 /run/podman-init -- ./init.sh
             │ │ ├─96460 /usr/sbin/keepalived -n -l -f /etc/keepalived/keepalived.conf
             │ │ └─96462 /usr/sbin/keepalived -n -l -f /etc/keepalived/keepalived.conf
             │ └─runtime
             │   └─96456 /usr/bin/conmon --api-version 1 -c 9f037d7b549dfea18e07bcd207334bdb6be640ac888fb6a410ce9d0db364b790 -u 9f037d7b549dfea18e07bcd207334bdb6be640ac888fb6a410ce9d0db364b790 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/9f037d7b549dfea18e07bcd207334bdb6be640ac888fb6a410ce9d0db364b790/userdata -p /run/containers/storage/overlay-containers/9f037d7b549dfea18e07bcd207334bdb6be640ac888fb6a410ce9d0db364b790/userdata/pidfile -n ceph-2f5ab832-5f2e-5a84-bd93-cf8bab960ee2-keepalived-rgw-default-compute-0-wujrgc --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/9f037d7b549dfea18e07bcd207334bdb6be640ac888fb6a410ce9d0db364b790 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/9f037d7b549dfea18e07bcd207334bdb6be640ac888fb6a410ce9d0db364b790/userdata/oci-log --conmon-pidfile /run/ceph-2f5ab832-5f2e-5a84-bd93-cf8bab960ee2@keepalived.rgw.default.compute-0.wujrgc.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 9f037d7b549dfea18e07bcd207334bdb6be640ac888fb6a410ce9d0db364b790
             ├─ceph-2f5ab832-5f2e-5a84-bd93-cf8bab960ee2@mds.cephfs.compute-0.jroeqh.service
             │ ├─libpod-payload-41a77f8e2fff2b8ec08ee3239657a26d889a4c43656e169833ff12edd4843c0e
             │ │ ├─94916 /run/podman-init -- /usr/bin/ceph-mds -n mds.cephfs.compute-0.jroeqh -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ └─94918 /usr/bin/ceph-mds -n mds.cephfs.compute-0.jroeqh -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ └─runtime
             │   └─94914 /usr/bin/conmon --api-version 1 -c 41a77f8e2fff2b8ec08ee3239657a26d889a4c43656e169833ff12edd4843c0e -u 41a77f8e2fff2b8ec08ee3239657a26d889a4c43656e169833ff12edd4843c0e -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/41a77f8e2fff2b8ec08ee3239657a26d889a4c43656e169833ff12edd4843c0e/userdata -p /run/containers/storage/overlay-containers/41a77f8e2fff2b8ec08ee3239657a26d889a4c43656e169833ff12edd4843c0e/userdata/pidfile -n ceph-2f5ab832-5f2e-5a84-bd93-cf8bab960ee2-mds-cephfs-compute-0-jroeqh --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/41a77f8e2fff2b8ec08ee3239657a26d889a4c43656e169833ff12edd4843c0e --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/41a77f8e2fff2b8ec08ee3239657a26d889a4c43656e169833ff12edd4843c0e/userdata/oci-log --conmon-pidfile /run/ceph-2f5ab832-5f2e-5a84-bd93-cf8bab960ee2@mds.cephfs.compute-0.jroeqh.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 41a77f8e2fff2b8ec08ee3239657a26d889a4c43656e169833ff12edd4843c0e
             ├─ceph-2f5ab832-5f2e-5a84-bd93-cf8bab960ee2@mgr.compute-0.ddmhwk.service
             │ ├─libpod-payload-27d29d56922973df23bd6f1e58ddc4d731ae614392da047fc3969a22adc43583
             │ │ ├─74687 /run/podman-init -- /usr/bin/ceph-mgr -n mgr.compute-0.ddmhwk -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ └─74689 /usr/bin/ceph-mgr -n mgr.compute-0.ddmhwk -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ └─runtime
             │   └─74685 /usr/bin/conmon --api-version 1 -c 27d29d56922973df23bd6f1e58ddc4d731ae614392da047fc3969a22adc43583 -u 27d29d56922973df23bd6f1e58ddc4d731ae614392da047fc3969a22adc43583 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/27d29d56922973df23bd6f1e58ddc4d731ae614392da047fc3969a22adc43583/userdata -p /run/containers/storage/overlay-containers/27d29d56922973df23bd6f1e58ddc4d731ae614392da047fc3969a22adc43583/userdata/pidfile -n ceph-2f5ab832-5f2e-5a84-bd93-cf8bab960ee2-mgr-compute-0-ddmhwk --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/27d29d56922973df23bd6f1e58ddc4d731ae614392da047fc3969a22adc43583 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/27d29d56922973df23bd6f1e58ddc4d731ae614392da047fc3969a22adc43583/userdata/oci-log --conmon-pidfile /run/ceph-2f5ab832-5f2e-5a84-bd93-cf8bab960ee2@mgr.compute-0.ddmhwk.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 27d29d56922973df23bd6f1e58ddc4d731ae614392da047fc3969a22adc43583
             ├─ceph-2f5ab832-5f2e-5a84-bd93-cf8bab960ee2@mon.compute-0.service
             │ ├─libpod-payload-8a056797e460928a5aeedd3e2298bafe24e7f52c20f4a7487148750cf8988d86
             │ │ ├─74392 /run/podman-init -- /usr/bin/ceph-mon -n mon.compute-0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false
             │ │ └─74394 /usr/bin/ceph-mon -n mon.compute-0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false
             │ └─runtime
             │   └─74390 /usr/bin/conmon --api-version 1 -c 8a056797e460928a5aeedd3e2298bafe24e7f52c20f4a7487148750cf8988d86 -u 8a056797e460928a5aeedd3e2298bafe24e7f52c20f4a7487148750cf8988d86 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/8a056797e460928a5aeedd3e2298bafe24e7f52c20f4a7487148750cf8988d86/userdata -p /run/containers/storage/overlay-containers/8a056797e460928a5aeedd3e2298bafe24e7f52c20f4a7487148750cf8988d86/userdata/pidfile -n ceph-2f5ab832-5f2e-5a84-bd93-cf8bab960ee2-mon-compute-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/8a056797e460928a5aeedd3e2298bafe24e7f52c20f4a7487148750cf8988d86 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/8a056797e460928a5aeedd3e2298bafe24e7f52c20f4a7487148750cf8988d86/userdata/oci-log --conmon-pidfile /run/ceph-2f5ab832-5f2e-5a84-bd93-cf8bab960ee2@mon.compute-0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 8a056797e460928a5aeedd3e2298bafe24e7f52c20f4a7487148750cf8988d86
             ├─ceph-2f5ab832-5f2e-5a84-bd93-cf8bab960ee2@osd.0.service
             │ ├─libpod-payload-af2e3815ded07052013af44e961eb30d6aebcbabf404f7e8579600fe4cb398c1
             │ │ ├─84878 /run/podman-init -- /usr/bin/ceph-osd -n osd.0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ └─84880 /usr/bin/ceph-osd -n osd.0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ └─runtime
             │   └─84876 /usr/bin/conmon --api-version 1 -c af2e3815ded07052013af44e961eb30d6aebcbabf404f7e8579600fe4cb398c1 -u af2e3815ded07052013af44e961eb30d6aebcbabf404f7e8579600fe4cb398c1 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/af2e3815ded07052013af44e961eb30d6aebcbabf404f7e8579600fe4cb398c1/userdata -p /run/containers/storage/overlay-containers/af2e3815ded07052013af44e961eb30d6aebcbabf404f7e8579600fe4cb398c1/userdata/pidfile -n ceph-2f5ab832-5f2e-5a84-bd93-cf8bab960ee2-osd-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/af2e3815ded07052013af44e961eb30d6aebcbabf404f7e8579600fe4cb398c1 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/af2e3815ded07052013af44e961eb30d6aebcbabf404f7e8579600fe4cb398c1/userdata/oci-log --conmon-pidfile /run/ceph-2f5ab832-5f2e-5a84-bd93-cf8bab960ee2@osd.0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg af2e3815ded07052013af44e961eb30d6aebcbabf404f7e8579600fe4cb398c1
             └─ceph-2f5ab832-5f2e-5a84-bd93-cf8bab960ee2@rgw.rgw.compute-0.pnpmok.service
               ├─libpod-payload-3d2c60d2a43b72ac915708b2ea14f4ac8b5ff4660c01be5af30a821a7121dba9
               │ ├─94349 /run/podman-init -- /usr/bin/radosgw -n client.rgw.rgw.compute-0.pnpmok -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
               │ └─94351 /usr/bin/radosgw -n client.rgw.rgw.compute-0.pnpmok -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
               └─runtime
                 └─94347 /usr/bin/conmon --api-version 1 -c 3d2c60d2a43b72ac915708b2ea14f4ac8b5ff4660c01be5af30a821a7121dba9 -u 3d2c60d2a43b72ac915708b2ea14f4ac8b5ff4660c01be5af30a821a7121dba9 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/3d2c60d2a43b72ac915708b2ea14f4ac8b5ff4660c01be5af30a821a7121dba9/userdata -p /run/containers/storage/overlay-containers/3d2c60d2a43b72ac915708b2ea14f4ac8b5ff4660c01be5af30a821a7121dba9/userdata/pidfile -n ceph-2f5ab832-5f2e-5a84-bd93-cf8bab960ee2-rgw-rgw-compute-0-pnpmok --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/3d2c60d2a43b72ac915708b2ea14f4ac8b5ff4660c01be5af30a821a7121dba9 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/3d2c60d2a43b72ac915708b2ea14f4ac8b5ff4660c01be5af30a821a7121dba9/userdata/oci-log --conmon-pidfile /run/ceph-2f5ab832-5f2e-5a84-bd93-cf8bab960ee2@rgw.rgw.compute-0.pnpmok.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 3d2c60d2a43b72ac915708b2ea14f4ac8b5ff4660c01be5af30a821a7121dba9

Jan 31 09:36:59 compute-0 ceph-mon[74394]: from='client.? 192.168.122.100:0/4105510240' entity='client.admin' cmd=[{"prefix": "osd blocklist ls", "format": "json-pretty"}]: dispatch
Jan 31 09:36:59 compute-0 ceph-mon[74394]: from='client.? 192.168.122.101:0/2241390496' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json-pretty"}]: dispatch
Jan 31 09:36:59 compute-0 ceph-mon[74394]: from='client.? 192.168.122.102:0/1983683427' entity='client.admin' cmd=[{"prefix": "fs ls", "format": "json-pretty"}]: dispatch
Jan 31 09:36:59 compute-0 ceph-mon[74394]: from='client.? 192.168.122.101:0/2481987779' entity='client.admin' cmd=[{"prefix": "osd blocklist ls", "format": "json-pretty"}]: dispatch
Jan 31 09:36:59 compute-0 radosgw[94351]: ====== starting new request req=0x7fb0bb49a6f0 =====
Jan 31 09:36:59 compute-0 radosgw[94351]: ====== req done req=0x7fb0bb49a6f0 op status=0 http_status=200 latency=0.000000000s ======
Jan 31 09:36:59 compute-0 radosgw[94351]: beast: 0x7fb0bb49a6f0: 192.168.122.100 - anonymous [31/Jan/2026:09:36:59.803 +0000] "HEAD / HTTP/1.0" 200 0 - - - latency=0.000000000s
Jan 31 09:36:59 compute-0 ceph-mon[74394]: mon.compute-0@0(leader) e3 handle_command mon_command({"prefix": "osd numa-status", "format": "json-pretty"} v 0) v1
Jan 31 09:36:59 compute-0 ceph-mon[74394]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/3537445942' entity='client.admin' cmd=[{"prefix": "osd numa-status", "format": "json-pretty"}]: dispatch
Jan 31 09:36:59 compute-0 ceph-mgr[74689]: log_channel(audit) log [DBG] : from='client.46058 -' entity='client.admin' cmd=[{"prefix": "osd df", "target": ["mon-mgr", ""], "format": "json-pretty"}]: dispatch

● system-dbus\x2d:1.1\x2dorg.fedoraproject.SetroubleshootPrivileged.slice - Slice /system/dbus-:1.1-org.fedoraproject.SetroubleshootPrivileged
     Loaded: loaded
     Active: active since Sat 2026-01-31 07:36:15 UTC; 2h 0min ago
      Until: Sat 2026-01-31 07:36:15 UTC; 2h 0min ago
         IO: 4.0K read, 0B written
      Tasks: 0
     Memory: 4.0K (peak: 58.7M)
        CPU: 936ms
     CGroup: /system.slice/system-dbus\x2d:1.1\x2dorg.fedoraproject.SetroubleshootPrivileged.slice

Jan 31 07:36:15 compute-0 systemd[1]: Created slice Slice /system/dbus-:1.1-org.fedoraproject.SetroubleshootPrivileged.

● system-getty.slice - Slice /system/getty
     Loaded: loaded
     Active: active since Sat 2026-01-31 06:24:59 UTC; 3h 12min ago
      Until: Sat 2026-01-31 06:24:59 UTC; 3h 12min ago
         IO: 0B read, 0B written
      Tasks: 1
     Memory: 248.0K (peak: 580.0K)
        CPU: 7ms
     CGroup: /system.slice/system-getty.slice
             └─getty@tty1.service
               └─1003 /sbin/agetty -o "-p -- \\u" --noclear - linux

● system-modprobe.slice - Slice /system/modprobe
     Loaded: loaded
     Active: active since Sat 2026-01-31 06:24:57 UTC; 3h 12min ago
      Until: Sat 2026-01-31 06:24:57 UTC; 3h 12min ago
         IO: 0B read, 0B written
      Tasks: 0
     Memory: 52.0K (peak: 11.5M)
        CPU: 116ms
     CGroup: /system.slice/system-modprobe.slice

Jan 31 06:24:57 localhost systemd[1]: Created slice Slice /system/modprobe.

● system-serial\x2dgetty.slice - Slice /system/serial-getty
     Loaded: loaded
     Active: active since Sat 2026-01-31 06:24:59 UTC; 3h 12min ago
      Until: Sat 2026-01-31 06:24:59 UTC; 3h 12min ago
         IO: 0B read, 0B written
      Tasks: 1
     Memory: 300.0K (peak: 544.0K)
        CPU: 5ms
     CGroup: /system.slice/system-serial\x2dgetty.slice
             └─serial-getty@ttyS0.service
               └─1004 /sbin/agetty -o "-p -- \\u" --keep-baud 115200,57600,38400,9600 - vt220

● system-sshd\x2dkeygen.slice - Slice /system/sshd-keygen
     Loaded: loaded
     Active: active since Sat 2026-01-31 06:24:59 UTC; 3h 12min ago
      Until: Sat 2026-01-31 06:24:59 UTC; 3h 12min ago
         IO: 0B read, 0B written
      Tasks: 0
     Memory: 0B (peak: 0B)
        CPU: 0
     CGroup: /system.slice/system-sshd\x2dkeygen.slice

● system.slice - System Slice
     Loaded: loaded
     Active: active since Sat 2026-01-31 06:24:55 UTC; 3h 12min ago
      Until: Sat 2026-01-31 06:24:55 UTC; 3h 12min ago
       Docs: man:systemd.special(7)
         IO: 3.5G read, 12.3G written
      Tasks: 1007
     Memory: 3.0G (peak: 3.2G)
        CPU: 18min 29.157s
     CGroup: /system.slice
             ├─NetworkManager.service
             │ └─49013 /usr/sbin/NetworkManager --no-daemon
             ├─auditd.service
             │ ├─697 /sbin/auditd
             │ └─699 /usr/sbin/sedispatch
             ├─chronyd.service
             │ └─58578 /usr/sbin/chronyd -F 2
             ├─crond.service
             │ └─1002 /usr/sbin/crond -n
             ├─dbus-broker.service
             │ ├─791 /usr/bin/dbus-broker-launch --scope system --audit
             │ └─808 dbus-broker --log 4 --controller 9 --machine-id bf0bc0bb03de29b24cba1cc9599cf5d0 --max-bytes 536870912 --max-fds 4096 --max-matches 131072 --audit
             ├─edpm_nova_compute.service
             │ └─247399 /usr/bin/conmon --api-version 1 -c 16f3cf77eee09ab7709c67d820482fbb1b64354510093143a021a8f1ec2ccd87 -u 16f3cf77eee09ab7709c67d820482fbb1b64354510093143a021a8f1ec2ccd87 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/16f3cf77eee09ab7709c67d820482fbb1b64354510093143a021a8f1ec2ccd87/userdata -p /run/containers/storage/overlay-containers/16f3cf77eee09ab7709c67d820482fbb1b64354510093143a021a8f1ec2ccd87/userdata/pidfile -n nova_compute --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/16f3cf77eee09ab7709c67d820482fbb1b64354510093143a021a8f1ec2ccd87 --full-attach -s -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/16f3cf77eee09ab7709c67d820482fbb1b64354510093143a021a8f1ec2ccd87/userdata/oci-log --conmon-pidfile /run/nova_compute.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg 16f3cf77eee09ab7709c67d820482fbb1b64354510093143a021a8f1ec2ccd87
             ├─edpm_ovn_controller.service
             │ └─149152 /usr/bin/conmon --api-version 1 -c d068e3ccc9a9ec2248f269659070439009ec5c33951d360f88b13647cd275613 -u d068e3ccc9a9ec2248f269659070439009ec5c33951d360f88b13647cd275613 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/d068e3ccc9a9ec2248f269659070439009ec5c33951d360f88b13647cd275613/userdata -p /run/containers/storage/overlay-containers/d068e3ccc9a9ec2248f269659070439009ec5c33951d360f88b13647cd275613/userdata/pidfile -n ovn_controller --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/d068e3ccc9a9ec2248f269659070439009ec5c33951d360f88b13647cd275613 --full-attach -s -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/d068e3ccc9a9ec2248f269659070439009ec5c33951d360f88b13647cd275613/userdata/oci-log --conmon-pidfile /run/ovn_controller.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg d068e3ccc9a9ec2248f269659070439009ec5c33951d360f88b13647cd275613
             ├─edpm_ovn_metadata_agent.service
             │ └─159729 /usr/bin/conmon --api-version 1 -c 1ae13fee90b46f5eb19bd9030483f3244c23c433b8f4e46eb5cc0bd2c6a35d52 -u 1ae13fee90b46f5eb19bd9030483f3244c23c433b8f4e46eb5cc0bd2c6a35d52 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/1ae13fee90b46f5eb19bd9030483f3244c23c433b8f4e46eb5cc0bd2c6a35d52/userdata -p /run/containers/storage/overlay-containers/1ae13fee90b46f5eb19bd9030483f3244c23c433b8f4e46eb5cc0bd2c6a35d52/userdata/pidfile -n ovn_metadata_agent --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/1ae13fee90b46f5eb19bd9030483f3244c23c433b8f4e46eb5cc0bd2c6a35d52 --full-attach -s -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/1ae13fee90b46f5eb19bd9030483f3244c23c433b8f4e46eb5cc0bd2c6a35d52/userdata/oci-log --conmon-pidfile /run/ovn_metadata_agent.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg 1ae13fee90b46f5eb19bd9030483f3244c23c433b8f4e46eb5cc0bd2c6a35d52
             ├─gssproxy.service
             │ └─871 /usr/sbin/gssproxy -D
             ├─irqbalance.service
             │ └─813 /usr/sbin/irqbalance
             ├─iscsid.service
             │ └─230940 /usr/sbin/iscsid -f
             ├─multipathd.service
             │ └─231100 /sbin/multipathd -d -s
             ├─ovs-vswitchd.service
             │ └─47313 ovs-vswitchd unix:/var/run/openvswitch/db.sock -vconsole:emer -vsyslog:err -vfile:info --mlockall --user openvswitch:hugetlbfs --no-chdir --log-file=/var/log/openvswitch/ovs-vswitchd.log --pidfile=/var/run/openvswitch/ovs-vswitchd.pid --detach
             ├─ovsdb-server.service
             │ └─47231 ovsdb-server /etc/openvswitch/conf.db -vconsole:emer -vsyslog:err -vfile:info --remote=punix:/var/run/openvswitch/db.sock --private-key=db:Open_vSwitch,SSL,private_key --certificate=db:Open_vSwitch,SSL,certificate --bootstrap-ca-cert=db:Open_vSwitch,SSL,ca_cert --user openvswitch:hugetlbfs --no-chdir --log-file=/var/log/openvswitch/ovsdb-server.log --pidfile=/var/run/openvswitch/ovsdb-server.pid --detach
             ├─polkit.service
             │ └─43497 /usr/lib/polkit-1/polkitd --no-debug
             ├─rpcbind.service
             │ └─695 /usr/bin/rpcbind -w -f
             ├─rsyslog.service
             │ └─998 /usr/sbin/rsyslogd -n
             ├─sshd.service
             │ └─186742 "sshd: /usr/sbin/sshd -D [listener] 0 of 10-100 startups"
             ├─system-ceph\x2d2f5ab832\x2d5f2e\x2d5a84\x2dbd93\x2dcf8bab960ee2.slice
             │ ├─ceph-2f5ab832-5f2e-5a84-bd93-cf8bab960ee2@crash.compute-0.service
             │ │ ├─libpod-payload-287b42eb8058430f66d49fad6a858b93e85e13d224b0307dc6db988fdd517c44
             │ │ │ ├─81766 /run/podman-init -- /usr/bin/ceph-crash -n client.crash.compute-0
             │ │ │ └─81774 /usr/bin/python3 -s /usr/bin/ceph-crash -n client.crash.compute-0
             │ │ └─runtime
             │ │   └─81764 /usr/bin/conmon --api-version 1 -c 287b42eb8058430f66d49fad6a858b93e85e13d224b0307dc6db988fdd517c44 -u 287b42eb8058430f66d49fad6a858b93e85e13d224b0307dc6db988fdd517c44 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/287b42eb8058430f66d49fad6a858b93e85e13d224b0307dc6db988fdd517c44/userdata -p /run/containers/storage/overlay-containers/287b42eb8058430f66d49fad6a858b93e85e13d224b0307dc6db988fdd517c44/userdata/pidfile -n ceph-2f5ab832-5f2e-5a84-bd93-cf8bab960ee2-crash-compute-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/287b42eb8058430f66d49fad6a858b93e85e13d224b0307dc6db988fdd517c44 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/287b42eb8058430f66d49fad6a858b93e85e13d224b0307dc6db988fdd517c44/userdata/oci-log --conmon-pidfile /run/ceph-2f5ab832-5f2e-5a84-bd93-cf8bab960ee2@crash.compute-0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 287b42eb8058430f66d49fad6a858b93e85e13d224b0307dc6db988fdd517c44
             │ ├─ceph-2f5ab832-5f2e-5a84-bd93-cf8bab960ee2@haproxy.rgw.default.compute-0.evwczw.service
             │ │ ├─libpod-payload-e85bd24af30cc47f8586afc6e524047e12ba657e07d03a65812cdd8f8d12c0fe
             │ │ │ ├─96044 /run/podman-init -- docker-entrypoint.sh haproxy -f /var/lib/haproxy/haproxy.cfg
             │ │ │ ├─96046 haproxy -W -db -f /var/lib/haproxy/haproxy.cfg
             │ │ │ └─96048 haproxy -W -db -f /var/lib/haproxy/haproxy.cfg
             │ │ └─runtime
             │ │   └─96042 /usr/bin/conmon --api-version 1 -c e85bd24af30cc47f8586afc6e524047e12ba657e07d03a65812cdd8f8d12c0fe -u e85bd24af30cc47f8586afc6e524047e12ba657e07d03a65812cdd8f8d12c0fe -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/e85bd24af30cc47f8586afc6e524047e12ba657e07d03a65812cdd8f8d12c0fe/userdata -p /run/containers/storage/overlay-containers/e85bd24af30cc47f8586afc6e524047e12ba657e07d03a65812cdd8f8d12c0fe/userdata/pidfile -n ceph-2f5ab832-5f2e-5a84-bd93-cf8bab960ee2-haproxy-rgw-default-compute-0-evwczw --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/e85bd24af30cc47f8586afc6e524047e12ba657e07d03a65812cdd8f8d12c0fe --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/e85bd24af30cc47f8586afc6e524047e12ba657e07d03a65812cdd8f8d12c0fe/userdata/oci-log --conmon-pidfile /run/ceph-2f5ab832-5f2e-5a84-bd93-cf8bab960ee2@haproxy.rgw.default.compute-0.evwczw.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg e85bd24af30cc47f8586afc6e524047e12ba657e07d03a65812cdd8f8d12c0fe
             │ ├─ceph-2f5ab832-5f2e-5a84-bd93-cf8bab960ee2@keepalived.rgw.default.compute-0.wujrgc.service
             │ │ ├─libpod-payload-9f037d7b549dfea18e07bcd207334bdb6be640ac888fb6a410ce9d0db364b790
             │ │ │ ├─96458 /run/podman-init -- ./init.sh
             │ │ │ ├─96460 /usr/sbin/keepalived -n -l -f /etc/keepalived/keepalived.conf
             │ │ │ └─96462 /usr/sbin/keepalived -n -l -f /etc/keepalived/keepalived.conf
             │ │ └─runtime
             │ │   └─96456 /usr/bin/conmon --api-version 1 -c 9f037d7b549dfea18e07bcd207334bdb6be640ac888fb6a410ce9d0db364b790 -u 9f037d7b549dfea18e07bcd207334bdb6be640ac888fb6a410ce9d0db364b790 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/9f037d7b549dfea18e07bcd207334bdb6be640ac888fb6a410ce9d0db364b790/userdata -p /run/containers/storage/overlay-containers/9f037d7b549dfea18e07bcd207334bdb6be640ac888fb6a410ce9d0db364b790/userdata/pidfile -n ceph-2f5ab832-5f2e-5a84-bd93-cf8bab960ee2-keepalived-rgw-default-compute-0-wujrgc --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/9f037d7b549dfea18e07bcd207334bdb6be640ac888fb6a410ce9d0db364b790 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/9f037d7b549dfea18e07bcd207334bdb6be640ac888fb6a410ce9d0db364b790/userdata/oci-log --conmon-pidfile /run/ceph-2f5ab832-5f2e-5a84-bd93-cf8bab960ee2@keepalived.rgw.default.compute-0.wujrgc.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 9f037d7b549dfea18e07bcd207334bdb6be640ac888fb6a410ce9d0db364b790
             │ ├─ceph-2f5ab832-5f2e-5a84-bd93-cf8bab960ee2@mds.cephfs.compute-0.jroeqh.service
             │ │ ├─libpod-payload-41a77f8e2fff2b8ec08ee3239657a26d889a4c43656e169833ff12edd4843c0e
             │ │ │ ├─94916 /run/podman-init -- /usr/bin/ceph-mds -n mds.cephfs.compute-0.jroeqh -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ │ └─94918 /usr/bin/ceph-mds -n mds.cephfs.compute-0.jroeqh -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ └─runtime
             │ │   └─94914 /usr/bin/conmon --api-version 1 -c 41a77f8e2fff2b8ec08ee3239657a26d889a4c43656e169833ff12edd4843c0e -u 41a77f8e2fff2b8ec08ee3239657a26d889a4c43656e169833ff12edd4843c0e -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/41a77f8e2fff2b8ec08ee3239657a26d889a4c43656e169833ff12edd4843c0e/userdata -p /run/containers/storage/overlay-containers/41a77f8e2fff2b8ec08ee3239657a26d889a4c43656e169833ff12edd4843c0e/userdata/pidfile -n ceph-2f5ab832-5f2e-5a84-bd93-cf8bab960ee2-mds-cephfs-compute-0-jroeqh --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/41a77f8e2fff2b8ec08ee3239657a26d889a4c43656e169833ff12edd4843c0e --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/41a77f8e2fff2b8ec08ee3239657a26d889a4c43656e169833ff12edd4843c0e/userdata/oci-log --conmon-pidfile /run/ceph-2f5ab832-5f2e-5a84-bd93-cf8bab960ee2@mds.cephfs.compute-0.jroeqh.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 41a77f8e2fff2b8ec08ee3239657a26d889a4c43656e169833ff12edd4843c0e
             │ ├─ceph-2f5ab832-5f2e-5a84-bd93-cf8bab960ee2@mgr.compute-0.ddmhwk.service
             │ │ ├─libpod-payload-27d29d56922973df23bd6f1e58ddc4d731ae614392da047fc3969a22adc43583
             │ │ │ ├─74687 /run/podman-init -- /usr/bin/ceph-mgr -n mgr.compute-0.ddmhwk -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ │ └─74689 /usr/bin/ceph-mgr -n mgr.compute-0.ddmhwk -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ └─runtime
             │ │   └─74685 /usr/bin/conmon --api-version 1 -c 27d29d56922973df23bd6f1e58ddc4d731ae614392da047fc3969a22adc43583 -u 27d29d56922973df23bd6f1e58ddc4d731ae614392da047fc3969a22adc43583 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/27d29d56922973df23bd6f1e58ddc4d731ae614392da047fc3969a22adc43583/userdata -p /run/containers/storage/overlay-containers/27d29d56922973df23bd6f1e58ddc4d731ae614392da047fc3969a22adc43583/userdata/pidfile -n ceph-2f5ab832-5f2e-5a84-bd93-cf8bab960ee2-mgr-compute-0-ddmhwk --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/27d29d56922973df23bd6f1e58ddc4d731ae614392da047fc3969a22adc43583 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/27d29d56922973df23bd6f1e58ddc4d731ae614392da047fc3969a22adc43583/userdata/oci-log --conmon-pidfile /run/ceph-2f5ab832-5f2e-5a84-bd93-cf8bab960ee2@mgr.compute-0.ddmhwk.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 27d29d56922973df23bd6f1e58ddc4d731ae614392da047fc3969a22adc43583
             │ ├─ceph-2f5ab832-5f2e-5a84-bd93-cf8bab960ee2@mon.compute-0.service
             │ │ ├─libpod-payload-8a056797e460928a5aeedd3e2298bafe24e7f52c20f4a7487148750cf8988d86
             │ │ │ ├─74392 /run/podman-init -- /usr/bin/ceph-mon -n mon.compute-0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false
             │ │ │ └─74394 /usr/bin/ceph-mon -n mon.compute-0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false
             │ │ └─runtime
             │ │   └─74390 /usr/bin/conmon --api-version 1 -c 8a056797e460928a5aeedd3e2298bafe24e7f52c20f4a7487148750cf8988d86 -u 8a056797e460928a5aeedd3e2298bafe24e7f52c20f4a7487148750cf8988d86 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/8a056797e460928a5aeedd3e2298bafe24e7f52c20f4a7487148750cf8988d86/userdata -p /run/containers/storage/overlay-containers/8a056797e460928a5aeedd3e2298bafe24e7f52c20f4a7487148750cf8988d86/userdata/pidfile -n ceph-2f5ab832-5f2e-5a84-bd93-cf8bab960ee2-mon-compute-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/8a056797e460928a5aeedd3e2298bafe24e7f52c20f4a7487148750cf8988d86 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/8a056797e460928a5aeedd3e2298bafe24e7f52c20f4a7487148750cf8988d86/userdata/oci-log --conmon-pidfile /run/ceph-2f5ab832-5f2e-5a84-bd93-cf8bab960ee2@mon.compute-0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 8a056797e460928a5aeedd3e2298bafe24e7f52c20f4a7487148750cf8988d86
             │ ├─ceph-2f5ab832-5f2e-5a84-bd93-cf8bab960ee2@osd.0.service
             │ │ ├─libpod-payload-af2e3815ded07052013af44e961eb30d6aebcbabf404f7e8579600fe4cb398c1
             │ │ │ ├─84878 /run/podman-init -- /usr/bin/ceph-osd -n osd.0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ │ └─84880 /usr/bin/ceph-osd -n osd.0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ └─runtime
             │ │   └─84876 /usr/bin/conmon --api-version 1 -c af2e3815ded07052013af44e961eb30d6aebcbabf404f7e8579600fe4cb398c1 -u af2e3815ded07052013af44e961eb30d6aebcbabf404f7e8579600fe4cb398c1 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/af2e3815ded07052013af44e961eb30d6aebcbabf404f7e8579600fe4cb398c1/userdata -p /run/containers/storage/overlay-containers/af2e3815ded07052013af44e961eb30d6aebcbabf404f7e8579600fe4cb398c1/userdata/pidfile -n ceph-2f5ab832-5f2e-5a84-bd93-cf8bab960ee2-osd-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/af2e3815ded07052013af44e961eb30d6aebcbabf404f7e8579600fe4cb398c1 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/af2e3815ded07052013af44e961eb30d6aebcbabf404f7e8579600fe4cb398c1/userdata/oci-log --conmon-pidfile /run/ceph-2f5ab832-5f2e-5a84-bd93-cf8bab960ee2@osd.0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg af2e3815ded07052013af44e961eb30d6aebcbabf404f7e8579600fe4cb398c1
             │ └─ceph-2f5ab832-5f2e-5a84-bd93-cf8bab960ee2@rgw.rgw.compute-0.pnpmok.service
             │   ├─libpod-payload-3d2c60d2a43b72ac915708b2ea14f4ac8b5ff4660c01be5af30a821a7121dba9
             │   │ ├─94349 /run/podman-init -- /usr/bin/radosgw -n client.rgw.rgw.compute-0.pnpmok -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │   │ └─94351 /usr/bin/radosgw -n client.rgw.rgw.compute-0.pnpmok -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │   └─runtime
             │     └─94347 /usr/bin/conmon --api-version 1 -c 3d2c60d2a43b72ac915708b2ea14f4ac8b5ff4660c01be5af30a821a7121dba9 -u 3d2c60d2a43b72ac915708b2ea14f4ac8b5ff4660c01be5af30a821a7121dba9 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/3d2c60d2a43b72ac915708b2ea14f4ac8b5ff4660c01be5af30a821a7121dba9/userdata -p /run/containers/storage/overlay-containers/3d2c60d2a43b72ac915708b2ea14f4ac8b5ff4660c01be5af30a821a7121dba9/userdata/pidfile -n ceph-2f5ab832-5f2e-5a84-bd93-cf8bab960ee2-rgw-rgw-compute-0-pnpmok --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/3d2c60d2a43b72ac915708b2ea14f4ac8b5ff4660c01be5af30a821a7121dba9 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/3d2c60d2a43b72ac915708b2ea14f4ac8b5ff4660c01be5af30a821a7121dba9/userdata/oci-log --conmon-pidfile /run/ceph-2f5ab832-5f2e-5a84-bd93-cf8bab960ee2@rgw.rgw.compute-0.pnpmok.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 3d2c60d2a43b72ac915708b2ea14f4ac8b5ff4660c01be5af30a821a7121dba9
             ├─system-getty.slice
             │ └─getty@tty1.service
             │   └─1003 /sbin/agetty -o "-p -- \\u" --noclear - linux
             ├─system-serial\x2dgetty.slice
             │ └─serial-getty@ttyS0.service
             │   └─1004 /sbin/agetty -o "-p -- \\u" --keep-baud 115200,57600,38400,9600 - vt220
             ├─systemd-hostnamed.service
             │ └─444949 /usr/lib/systemd/systemd-hostnamed
             ├─systemd-journald.service
             │ └─674 /usr/lib/systemd/systemd-journald
             ├─systemd-logind.service
             │ └─818 /usr/lib/systemd/systemd-logind
             ├─systemd-machined.service
             │ └─212769 /usr/lib/systemd/systemd-machined
             ├─systemd-udevd.service
             │ └─udev
             │   └─725 /usr/lib/systemd/systemd-udevd
             ├─tuned.service
             │ └─108256 /usr/bin/python3 -Es /usr/sbin/tuned -l -P
             ├─virtlogd.service
             │ └─212140 /usr/sbin/virtlogd
             ├─virtnodedevd.service
             │ └─247723 /usr/sbin/virtnodedevd --timeout 120
             ├─virtqemud.service
             │ └─247123 /usr/sbin/virtqemud --timeout 120
             └─virtsecretd.service
               └─253144 /usr/sbin/virtsecretd --timeout 120

Jan 31 09:36:56 compute-0 nova_compute[247399]: 2026-01-31 09:36:56.129 247403 DEBUG nova.compute.manager [None req-85be7847-7ccd-4892-a7aa-161de3959a02 - - - - - -] CONF.reclaim_instance_interval <= 0, skipping... _reclaim_queued_deletes /usr/lib/python3.9/site-packages/nova/compute/manager.py:10477[00m
Jan 31 09:36:57 compute-0 nova_compute[247399]: 2026-01-31 09:36:57.393 247403 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263[00m
Jan 31 09:36:58 compute-0 nova_compute[247399]: 2026-01-31 09:36:58.193 247403 DEBUG oslo_service.periodic_task [None req-85be7847-7ccd-4892-a7aa-161de3959a02 - - - - - -] Running periodic task ComputeManager._check_instance_build_time run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210[00m
Jan 31 09:36:58 compute-0 nova_compute[247399]: 2026-01-31 09:36:58.197 247403 DEBUG oslo_service.periodic_task [None req-85be7847-7ccd-4892-a7aa-161de3959a02 - - - - - -] Running periodic task ComputeManager._heal_instance_info_cache run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210[00m
Jan 31 09:36:58 compute-0 nova_compute[247399]: 2026-01-31 09:36:58.198 247403 DEBUG nova.compute.manager [None req-85be7847-7ccd-4892-a7aa-161de3959a02 - - - - - -] Starting heal instance info cache _heal_instance_info_cache /usr/lib/python3.9/site-packages/nova/compute/manager.py:9858[00m
Jan 31 09:36:58 compute-0 nova_compute[247399]: 2026-01-31 09:36:58.198 247403 DEBUG nova.compute.manager [None req-85be7847-7ccd-4892-a7aa-161de3959a02 - - - - - -] Rebuilding the list of instances to heal _heal_instance_info_cache /usr/lib/python3.9/site-packages/nova/compute/manager.py:9862[00m
Jan 31 09:36:58 compute-0 nova_compute[247399]: 2026-01-31 09:36:58.354 247403 DEBUG nova.compute.manager [None req-85be7847-7ccd-4892-a7aa-161de3959a02 - - - - - -] Didn't find any instances for network info cache update. _heal_instance_info_cache /usr/lib/python3.9/site-packages/nova/compute/manager.py:9944[00m
Jan 31 09:36:59 compute-0 nova_compute[247399]: 2026-01-31 09:36:59.197 247403 DEBUG oslo_service.periodic_task [None req-85be7847-7ccd-4892-a7aa-161de3959a02 - - - - - -] Running periodic task ComputeManager._poll_rebooting_instances run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210[00m
Jan 31 09:36:59 compute-0 nova_compute[247399]: 2026-01-31 09:36:59.198 247403 DEBUG oslo_service.periodic_task [None req-85be7847-7ccd-4892-a7aa-161de3959a02 - - - - - -] Running periodic task ComputeManager._poll_volume_usage run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210[00m
Jan 31 09:36:59 compute-0 nova_compute[247399]: 2026-01-31 09:36:59.907 247403 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263[00m

● user-1000.slice - User Slice of UID 1000
     Loaded: loaded
    Drop-In: /usr/lib/systemd/system/user-.slice.d
             └─10-defaults.conf
     Active: active since Sat 2026-01-31 06:31:00 UTC; 3h 5min ago
      Until: Sat 2026-01-31 06:31:00 UTC; 3h 5min ago
       Docs: man:user@.service(5)
         IO: 539.0M read, 8.3G written
      Tasks: 37 (limit: 20031)
     Memory: 1.9G (peak: 4.5G)
        CPU: 20min 59.412s
     CGroup: /user.slice/user-1000.slice
             ├─session-1.scope
             │ └─4518 /usr/bin/python3
             ├─session-78.scope
             │ ├─442577 "sshd-session: zuul [priv]"
             │ ├─442580 "sshd-session: zuul@notty"
             │ ├─442581 sudo bash -c "rm -rf /var/tmp/sos-osp && mkdir /var/tmp/sos-osp && sos report --batch --all-logs --tmp-dir=/var/tmp/sos-osp  -p container,openstack_edpm,system,storage,virt"
             │ ├─442605 /usr/bin/python3 -s /sbin/sos report --batch --all-logs --tmp-dir=/var/tmp/sos-osp -p container,openstack_edpm,system,storage,virt
             │ ├─449422 timeout 15s turbostat --debug sleep 10
             │ ├─449779 timeout 300s systemctl status --all
             │ ├─449781 systemctl status --all
             │ ├─449827 timeout 300s semanage module -l
             │ ├─449828 /usr/bin/python3 -EsI /usr/sbin/semanage module -l
             │ ├─449829 timeout 300s ceph osd perf --format json-pretty
             │ └─449830 /usr/bin/python3 -s /usr/bin/ceph osd perf --format json-pretty
             └─user@1000.service
               ├─app.slice
               │ └─dbus-broker.service
               │   ├─20820 /usr/bin/dbus-broker-launch --scope user
               │   └─20831 dbus-broker --log 4 --controller 9 --machine-id bf0bc0bb03de29b24cba1cc9599cf5d0 --max-bytes 100000000000000 --max-fds 25000000000000 --max-matches 5000000000
               ├─init.scope
               │ ├─4306 /usr/lib/systemd/systemd --user
               │ └─4308 "(sd-pam)"
               └─user.slice
                 └─podman-pause-d63f6fad.scope
                   └─20760 catatonit -P

Jan 31 09:21:33 compute-0 sshd-session[424537]: Disconnected from user zuul 192.168.122.10 port 39858
Jan 31 09:21:33 compute-0 sudo[424567]:     zuul : PWD=/home/zuul ; USER=root ; COMMAND=/bin/rm -rf /var/tmp/sos-osp
Jan 31 09:21:33 compute-0 sudo[424567]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Jan 31 09:21:33 compute-0 sudo[424567]: pam_unix(sudo:session): session closed for user root
Jan 31 09:21:33 compute-0 sshd-session[424566]: Received disconnect from 192.168.122.10 port 39860:11: disconnected by user
Jan 31 09:21:33 compute-0 sshd-session[424566]: Disconnected from user zuul 192.168.122.10 port 39860
Jan 31 09:36:23 compute-0 sudo[442581]:     zuul : PWD=/home/zuul ; USER=root ; COMMAND=/bin/bash -c 'rm -rf /var/tmp/sos-osp && mkdir /var/tmp/sos-osp && sos report --batch --all-logs --tmp-dir=/var/tmp/sos-osp  -p container,openstack_edpm,system,storage,virt'
Jan 31 09:36:23 compute-0 sudo[442581]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Jan 31 09:36:52 compute-0 ovs-appctl[448538]: ovs|00001|daemon_unix|WARN|/var/run/openvswitch/ovs-monitor-ipsec.pid: open: No such file or directory
Jan 31 09:36:52 compute-0 ovs-appctl[448545]: ovs|00001|daemon_unix|WARN|/var/run/openvswitch/ovs-monitor-ipsec.pid: open: No such file or directory

● user-42477.slice - User Slice of UID 42477
     Loaded: loaded
    Drop-In: /usr/lib/systemd/system/user-.slice.d
             └─10-defaults.conf
     Active: active since Sat 2026-01-31 07:20:43 UTC; 2h 16min ago
      Until: Sat 2026-01-31 07:20:43 UTC; 2h 16min ago
       Docs: man:user@.service(5)
         IO: 224.0K read, 688.3M written
      Tasks: 26 (limit: 20031)
     Memory: 299.3M (peak: 466.5M)
        CPU: 8min 24.940s
     CGroup: /user.slice/user-42477.slice
             ├─session-21.scope
             │ ├─76056 "sshd-session: ceph-admin [priv]"
             │ └─76078 "sshd-session: ceph-admin"
             ├─session-23.scope
             │ ├─76073 "sshd-session: ceph-admin [priv]"
             │ └─76079 "sshd-session: ceph-admin@notty"
             ├─session-24.scope
             │ ├─76130 "sshd-session: ceph-admin [priv]"
             │ └─76133 "sshd-session: ceph-admin@notty"
             ├─session-25.scope
             │ ├─76184 "sshd-session: ceph-admin [priv]"
             │ └─76187 "sshd-session: ceph-admin@notty"
             ├─session-26.scope
             │ ├─76238 "sshd-session: ceph-admin [priv]"
             │ └─76241 "sshd-session: ceph-admin@notty"
             ├─session-27.scope
             │ ├─76292 "sshd-session: ceph-admin [priv]"
             │ └─76295 "sshd-session: ceph-admin@notty"
             ├─session-28.scope
             │ ├─76346 "sshd-session: ceph-admin [priv]"
             │ └─76349 "sshd-session: ceph-admin@notty"
             ├─session-29.scope
             │ ├─76400 "sshd-session: ceph-admin [priv]"
             │ └─76403 "sshd-session: ceph-admin@notty"
             ├─session-30.scope
             │ ├─76454 "sshd-session: ceph-admin [priv]"
             │ └─76457 "sshd-session: ceph-admin@notty"
             ├─session-31.scope
             │ ├─76508 "sshd-session: ceph-admin [priv]"
             │ └─76511 "sshd-session: ceph-admin@notty"
             ├─session-32.scope
             │ ├─76535 "sshd-session: ceph-admin [priv]"
             │ └─76538 "sshd-session: ceph-admin@notty"
             ├─session-33.scope
             │ ├─76589 "sshd-session: ceph-admin [priv]"
             │ └─76592 "sshd-session: ceph-admin@notty"
             └─user@42477.service
               └─init.scope
                 ├─76060 /usr/lib/systemd/systemd --user
                 └─76062 "(sd-pam)"

Jan 31 09:36:53 compute-0 podman[448609]: 2026-01-31 09:36:53.199539136 +0000 UTC m=+0.125531881 container attach 52d0c0ac81b961a974a80e50ed2724c529d249c83767a41c57b0fe1b833d3e0d (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=trusting_shannon, CEPH_REF=reef, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9)
Jan 31 09:36:53 compute-0 podman[448609]: 2026-01-31 09:36:53.968702237 +0000 UTC m=+0.894694952 container died 52d0c0ac81b961a974a80e50ed2724c529d249c83767a41c57b0fe1b833d3e0d (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=trusting_shannon, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, ceph=True, org.label-schema.license=GPLv2)
Jan 31 09:36:54 compute-0 podman[448609]: 2026-01-31 09:36:54.027626451 +0000 UTC m=+0.953619176 container remove 52d0c0ac81b961a974a80e50ed2724c529d249c83767a41c57b0fe1b833d3e0d (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=trusting_shannon, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/)
Jan 31 09:36:54 compute-0 sudo[448282]: pam_unix(sudo:session): session closed for user root
Jan 31 09:36:54 compute-0 sudo[449069]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Jan 31 09:36:54 compute-0 sudo[449069]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Jan 31 09:36:54 compute-0 sudo[449069]: pam_unix(sudo:session): session closed for user root
Jan 31 09:36:54 compute-0 sudo[449113]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/ls /etc/sysctl.d
Jan 31 09:36:54 compute-0 sudo[449113]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Jan 31 09:36:54 compute-0 sudo[449113]: pam_unix(sudo:session): session closed for user root

● user.slice - User and Session Slice
     Loaded: loaded (/usr/lib/systemd/system/user.slice; static)
     Active: active since Sat 2026-01-31 06:24:59 UTC; 3h 12min ago
      Until: Sat 2026-01-31 06:24:59 UTC; 3h 12min ago
       Docs: man:systemd.special(7)
         IO: 539.6M read, 9.0G written
      Tasks: 63
     Memory: 2.2G (peak: 4.8G)
        CPU: 29min 26.349s
     CGroup: /user.slice
             ├─user-1000.slice
             │ ├─session-1.scope
             │ │ └─4518 /usr/bin/python3
             │ ├─session-78.scope
             │ │ ├─442577 "sshd-session: zuul [priv]"
             │ │ ├─442580 "sshd-session: zuul@notty"
             │ │ ├─442581 sudo bash -c "rm -rf /var/tmp/sos-osp && mkdir /var/tmp/sos-osp && sos report --batch --all-logs --tmp-dir=/var/tmp/sos-osp  -p container,openstack_edpm,system,storage,virt"
             │ │ ├─442605 /usr/bin/python3 -s /sbin/sos report --batch --all-logs --tmp-dir=/var/tmp/sos-osp -p container,openstack_edpm,system,storage,virt
             │ │ ├─449422 timeout 15s turbostat --debug sleep 10
             │ │ ├─449779 timeout 300s systemctl status --all
             │ │ ├─449781 systemctl status --all
             │ │ ├─449827 timeout 300s semanage module -l
             │ │ ├─449828 /usr/bin/python3 -EsI /usr/sbin/semanage module -l
             │ │ ├─449829 timeout 300s ceph osd perf --format json-pretty
             │ │ └─449830 /usr/bin/python3 -s /usr/bin/ceph osd perf --format json-pretty
             │ └─user@1000.service
             │   ├─app.slice
             │   │ └─dbus-broker.service
             │   │   ├─20820 /usr/bin/dbus-broker-launch --scope user
             │   │   └─20831 dbus-broker --log 4 --controller 9 --machine-id bf0bc0bb03de29b24cba1cc9599cf5d0 --max-bytes 100000000000000 --max-fds 25000000000000 --max-matches 5000000000
             │   ├─init.scope
             │   │ ├─4306 /usr/lib/systemd/systemd --user
             │   │ └─4308 "(sd-pam)"
             │   └─user.slice
             │     └─podman-pause-d63f6fad.scope
             │       └─20760 catatonit -P
             └─user-42477.slice
               ├─session-21.scope
               │ ├─76056 "sshd-session: ceph-admin [priv]"
               │ └─76078 "sshd-session: ceph-admin"
               ├─session-23.scope
               │ ├─76073 "sshd-session: ceph-admin [priv]"
               │ └─76079 "sshd-session: ceph-admin@notty"
               ├─session-24.scope
               │ ├─76130 "sshd-session: ceph-admin [priv]"
               │ └─76133 "sshd-session: ceph-admin@notty"
               ├─session-25.scope
               │ ├─76184 "sshd-session: ceph-admin [priv]"
               │ └─76187 "sshd-session: ceph-admin@notty"
               ├─session-26.scope
               │ ├─76238 "sshd-session: ceph-admin [priv]"
               │ └─76241 "sshd-session: ceph-admin@notty"
               ├─session-27.scope
               │ ├─76292 "sshd-session: ceph-admin [priv]"
               │ └─76295 "sshd-session: ceph-admin@notty"
               ├─session-28.scope
               │ ├─76346 "sshd-session: ceph-admin [priv]"
               │ └─76349 "sshd-session: ceph-admin@notty"
               ├─session-29.scope
               │ ├─76400 "sshd-session: ceph-admin [priv]"
               │ └─76403 "sshd-session: ceph-admin@notty"
               ├─session-30.scope
               │ ├─76454 "sshd-session: ceph-admin [priv]"
               │ └─76457 "sshd-session: ceph-admin@notty"
               ├─session-31.scope
               │ ├─76508 "sshd-session: ceph-admin [priv]"
               │ └─76511 "sshd-session: ceph-admin@notty"
               ├─session-32.scope
               │ ├─76535 "sshd-session: ceph-admin [priv]"
               │ └─76538 "sshd-session: ceph-admin@notty"
               ├─session-33.scope
               │ ├─76589 "sshd-session: ceph-admin [priv]"
               │ └─76592 "sshd-session: ceph-admin@notty"
               └─user@42477.service
                 └─init.scope
                   ├─76060 /usr/lib/systemd/systemd --user
                   └─76062 "(sd-pam)"

● dbus.socket - D-Bus System Message Bus Socket
     Loaded: loaded (/usr/lib/systemd/system/dbus.socket; enabled; preset: enabled)
     Active: active (running) since Sat 2026-01-31 06:25:00 UTC; 3h 11min ago
      Until: Sat 2026-01-31 06:25:00 UTC; 3h 11min ago
   Triggers: ● dbus-broker.service
     Listen: /run/dbus/system_bus_socket (Stream)
     CGroup: /system.slice/dbus.socket

● dm-event.socket - Device-mapper event daemon FIFOs
     Loaded: loaded (/usr/lib/systemd/system/dm-event.socket; enabled; preset: enabled)
     Active: active (listening) since Sat 2026-01-31 07:12:17 UTC; 2h 24min ago
      Until: Sat 2026-01-31 07:12:17 UTC; 2h 24min ago
   Triggers: ● dm-event.service
       Docs: man:dmeventd(8)
     Listen: /run/dmeventd-server (FIFO)
             /run/dmeventd-client (FIFO)
     CGroup: /system.slice/dm-event.socket

● iscsid.socket - Open-iSCSI iscsid Socket
     Loaded: loaded (/usr/lib/systemd/system/iscsid.socket; enabled; preset: enabled)
     Active: active (running) since Sat 2026-01-31 07:37:17 UTC; 1h 59min ago
      Until: Sat 2026-01-31 07:37:17 UTC; 1h 59min ago
   Triggers: ● iscsid.service
       Docs: man:iscsid(8)
             man:iscsiadm(8)
     Listen: @ISCSIADM_ABSTRACT_NAMESPACE (Stream)
     CGroup: /system.slice/iscsid.socket

Jan 31 07:37:17 compute-0 systemd[1]: Listening on Open-iSCSI iscsid Socket.

○ iscsiuio.socket - Open-iSCSI iscsiuio Socket
     Loaded: loaded (/usr/lib/systemd/system/iscsiuio.socket; enabled; preset: enabled)
     Active: inactive (dead)
   Triggers: ● iscsiuio.service
       Docs: man:iscsiuio(8)
     Listen: @ISCSID_UIP_ABSTRACT_NAMESPACE (Stream)

○ libvirtd-admin.socket - libvirt legacy monolithic daemon admin socket
     Loaded: loaded (/usr/lib/systemd/system/libvirtd-admin.socket; disabled; preset: disabled)
     Active: inactive (dead)
   Triggers: ● libvirtd.service
     Listen: /run/libvirt/libvirt-admin-sock (Stream)

○ libvirtd-ro.socket - libvirt legacy monolithic daemon read-only socket
     Loaded: loaded (/usr/lib/systemd/system/libvirtd-ro.socket; disabled; preset: disabled)
     Active: inactive (dead)
   Triggers: ● libvirtd.service
     Listen: /run/libvirt/libvirt-sock-ro (Stream)

○ libvirtd-tls.socket
     Loaded: masked (Reason: Unit libvirtd-tls.socket is masked.)
     Active: inactive (dead)

○ libvirtd.socket - libvirt legacy monolithic daemon socket
     Loaded: loaded (/usr/lib/systemd/system/libvirtd.socket; disabled; preset: disabled)
     Active: inactive (dead)
   Triggers: ● libvirtd.service
     Listen: /run/libvirt/libvirt-sock (Stream)

● lvm2-lvmpolld.socket - LVM2 poll daemon socket
     Loaded: loaded (/usr/lib/systemd/system/lvm2-lvmpolld.socket; enabled; preset: enabled)
     Active: active (listening) since Sat 2026-01-31 07:12:17 UTC; 2h 24min ago
      Until: Sat 2026-01-31 07:12:17 UTC; 2h 24min ago
   Triggers: ● lvm2-lvmpolld.service
       Docs: man:lvmpolld(8)
     Listen: /run/lvm/lvmpolld.socket (Stream)
     CGroup: /system.slice/lvm2-lvmpolld.socket

● multipathd.socket - multipathd control socket
     Loaded: loaded (/usr/lib/systemd/system/multipathd.socket; enabled; preset: disabled)
     Active: active (running) since Sat 2026-01-31 07:37:45 UTC; 1h 59min ago
      Until: Sat 2026-01-31 07:37:45 UTC; 1h 59min ago
   Triggers: ● multipathd.service
     Listen: @/org/kernel/linux/storage/multipathd (Stream)
             /run/multipathd.socket (Stream)
     CGroup: /system.slice/multipathd.socket

Jan 31 07:37:45 compute-0 systemd[1]: Listening on multipathd control socket.

● rpcbind.socket - RPCbind Server Activation Socket
     Loaded: loaded (/usr/lib/systemd/system/rpcbind.socket; enabled; preset: enabled)
     Active: active (running) since Sat 2026-01-31 06:24:59 UTC; 3h 12min ago
      Until: Sat 2026-01-31 06:24:59 UTC; 3h 12min ago
   Triggers: ● rpcbind.service
     Listen: /run/rpcbind.sock (Stream)
             0.0.0.0:111 (Stream)
             0.0.0.0:111 (Datagram)
             [::]:111 (Stream)
             [::]:111 (Datagram)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48560)
     Memory: 16.0K (peak: 288.0K)
        CPU: 2ms
     CGroup: /system.slice/rpcbind.socket

● sssd-kcm.socket - SSSD Kerberos Cache Manager responder socket
     Loaded: loaded (/usr/lib/systemd/system/sssd-kcm.socket; enabled; preset: enabled)
     Active: active (listening) since Sat 2026-01-31 06:25:00 UTC; 3h 11min ago
      Until: Sat 2026-01-31 06:25:00 UTC; 3h 11min ago
   Triggers: ● sssd-kcm.service
       Docs: man:sssd-kcm(8)
     Listen: /run/.heim_org.h5l.kcm-socket (Stream)
     CGroup: /system.slice/sssd-kcm.socket

○ syslog.socket - Syslog Socket
     Loaded: loaded (/usr/lib/systemd/system/syslog.socket; static)
     Active: inactive (dead)
   Triggers: ● syslog.service
       Docs: man:systemd.special(7)
             https://www.freedesktop.org/wiki/Software/systemd/syslog
     Listen: /run/systemd/journal/syslog (Datagram)

● systemd-coredump.socket - Process Core Dump Socket
     Loaded: loaded (/usr/lib/systemd/system/systemd-coredump.socket; static)
     Active: active (listening) since Sat 2026-01-31 06:24:59 UTC; 3h 12min ago
      Until: Sat 2026-01-31 06:24:59 UTC; 3h 12min ago
       Docs: man:systemd-coredump(8)
     Listen: /run/systemd/coredump (SequentialPacket)
   Accepted: 0; Connected: 0;
     CGroup: /system.slice/systemd-coredump.socket

● systemd-initctl.socket - initctl Compatibility Named Pipe
     Loaded: loaded (/usr/lib/systemd/system/systemd-initctl.socket; static)
     Active: active (listening) since Sat 2026-01-31 06:24:59 UTC; 3h 12min ago
      Until: Sat 2026-01-31 06:24:59 UTC; 3h 12min ago
   Triggers: ● systemd-initctl.service
       Docs: man:systemd-initctl.socket(8)
     Listen: /run/initctl (FIFO)
     CGroup: /system.slice/systemd-initctl.socket

● systemd-journald-dev-log.socket - Journal Socket (/dev/log)
     Loaded: loaded (/usr/lib/systemd/system/systemd-journald-dev-log.socket; static)
     Active: active (running) since Sat 2026-01-31 06:24:55 UTC; 3h 12min ago
      Until: Sat 2026-01-31 06:24:55 UTC; 3h 12min ago
   Triggers: ● systemd-journald.service
       Docs: man:systemd-journald.service(8)
             man:journald.conf(5)
     Listen: /run/systemd/journal/dev-log (Datagram)
     CGroup: /system.slice/systemd-journald-dev-log.socket

Notice: journal has been rotated since unit was started, output may be incomplete.

● systemd-journald.socket - Journal Socket
     Loaded: loaded (/usr/lib/systemd/system/systemd-journald.socket; static)
     Active: active (running) since Sat 2026-01-31 06:24:55 UTC; 3h 12min ago
      Until: Sat 2026-01-31 06:24:55 UTC; 3h 12min ago
   Triggers: ● systemd-journald.service
       Docs: man:systemd-journald.service(8)
             man:journald.conf(5)
     Listen: /run/systemd/journal/socket (Datagram)
             /run/systemd/journal/stdout (Stream)
     CGroup: /system.slice/systemd-journald.socket

Notice: journal has been rotated since unit was started, output may be incomplete.

● systemd-rfkill.socket - Load/Save RF Kill Switch Status /dev/rfkill Watch
     Loaded: loaded (/usr/lib/systemd/system/systemd-rfkill.socket; static)
     Active: active (listening) since Sat 2026-01-31 06:25:01 UTC; 3h 11min ago
      Until: Sat 2026-01-31 06:25:01 UTC; 3h 11min ago
   Triggers: ● systemd-rfkill.service
       Docs: man:systemd-rfkill.socket(8)
     Listen: /dev/rfkill (Special)
     CGroup: /system.slice/systemd-rfkill.socket

● systemd-udevd-control.socket - udev Control Socket
     Loaded: loaded (/usr/lib/systemd/system/systemd-udevd-control.socket; static)
     Active: active (running) since Sat 2026-01-31 06:24:59 UTC; 3h 12min ago
      Until: Sat 2026-01-31 06:24:59 UTC; 3h 12min ago
   Triggers: ● systemd-udevd.service
       Docs: man:systemd-udevd-control.socket(8)
             man:udev(7)
     Listen: /run/udev/control (SequentialPacket)
     CGroup: /system.slice/systemd-udevd-control.socket

● systemd-udevd-kernel.socket - udev Kernel Socket
     Loaded: loaded (/usr/lib/systemd/system/systemd-udevd-kernel.socket; static)
     Active: active (running) since Sat 2026-01-31 06:24:59 UTC; 3h 12min ago
      Until: Sat 2026-01-31 06:24:59 UTC; 3h 12min ago
   Triggers: ● systemd-udevd.service
       Docs: man:systemd-udevd-kernel.socket(8)
             man:udev(7)
     Listen: kobject-uevent 1 (Netlink)
     CGroup: /system.slice/systemd-udevd-kernel.socket

○ virtinterfaced-admin.socket - libvirt interface daemon admin socket
     Loaded: loaded (/usr/lib/systemd/system/virtinterfaced-admin.socket; enabled; preset: enabled)
     Active: inactive (dead)
   Triggers: ● virtinterfaced.service
     Listen: /run/libvirt/virtinterfaced-admin-sock (Stream)

○ virtinterfaced-ro.socket - libvirt interface daemon read-only socket
     Loaded: loaded (/usr/lib/systemd/system/virtinterfaced-ro.socket; enabled; preset: enabled)
     Active: inactive (dead)
   Triggers: ● virtinterfaced.service
     Listen: /run/libvirt/virtinterfaced-sock-ro (Stream)

○ virtinterfaced.socket - libvirt interface daemon socket
     Loaded: loaded (/usr/lib/systemd/system/virtinterfaced.socket; enabled; preset: enabled)
     Active: inactive (dead)
   Triggers: ● virtinterfaced.service
     Listen: /run/libvirt/virtinterfaced-sock (Stream)

○ virtlockd-admin.socket - libvirt locking daemon admin socket
     Loaded: loaded (/usr/lib/systemd/system/virtlockd-admin.socket; enabled; preset: enabled)
     Active: inactive (dead)
   Triggers: ● virtlockd.service
     Listen: /run/libvirt/virtlockd-admin-sock (Stream)

● virtlockd.socket - libvirt locking daemon socket
     Loaded: loaded (/usr/lib/systemd/system/virtlockd.socket; enabled; preset: enabled)
     Active: active (listening) since Sat 2026-01-31 07:36:17 UTC; 2h 0min ago
      Until: Sat 2026-01-31 07:36:17 UTC; 2h 0min ago
   Triggers: ● virtlockd.service
     Listen: /run/libvirt/virtlockd-sock (Stream)
     CGroup: /system.slice/virtlockd.socket

Jan 31 07:36:17 compute-0 systemd[1]: Listening on libvirt locking daemon socket.

● virtlogd-admin.socket - libvirt logging daemon admin socket
     Loaded: loaded (/usr/lib/systemd/system/virtlogd-admin.socket; enabled; preset: enabled)
    Drop-In: /etc/systemd/system/virtlogd-admin.socket.d
             └─override.conf
     Active: active (running) since Sat 2026-01-31 07:36:13 UTC; 2h 0min ago
      Until: Sat 2026-01-31 07:36:13 UTC; 2h 0min ago
   Triggers: ● virtlogd.service
     Listen: /run/libvirt/virtlogd-admin-sock (Stream)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48560)
     Memory: 0B (peak: 512.0K)
        CPU: 3ms
     CGroup: /system.slice/virtlogd-admin.socket

Jan 31 07:36:13 compute-0 systemd[1]: Starting libvirt logging daemon admin socket...
Jan 31 07:36:13 compute-0 systemd[1]: Listening on libvirt logging daemon admin socket.

● virtlogd.socket - libvirt logging daemon socket
     Loaded: loaded (/usr/lib/systemd/system/virtlogd.socket; enabled; preset: enabled)
    Drop-In: /etc/systemd/system/virtlogd.socket.d
             └─override.conf
     Active: active (running) since Sat 2026-01-31 07:36:13 UTC; 2h 0min ago
      Until: Sat 2026-01-31 07:36:13 UTC; 2h 0min ago
   Triggers: ● virtlogd.service
     Listen: /run/libvirt/virtlogd-sock (Stream)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48560)
     Memory: 0B (peak: 512.0K)
        CPU: 2ms
     CGroup: /system.slice/virtlogd.socket

Jan 31 07:36:13 compute-0 systemd[1]: Starting libvirt logging daemon socket...
Jan 31 07:36:13 compute-0 systemd[1]: Listening on libvirt logging daemon socket.

○ virtnetworkd-admin.socket - libvirt network daemon admin socket
     Loaded: loaded (/usr/lib/systemd/system/virtnetworkd-admin.socket; enabled; preset: enabled)
     Active: inactive (dead)
   Triggers: ● virtnetworkd.service
     Listen: /run/libvirt/virtnetworkd-admin-sock (Stream)

○ virtnetworkd-ro.socket - libvirt network daemon read-only socket
     Loaded: loaded (/usr/lib/systemd/system/virtnetworkd-ro.socket; enabled; preset: enabled)
     Active: inactive (dead)
   Triggers: ● virtnetworkd.service
     Listen: /run/libvirt/virtnetworkd-sock-ro (Stream)

○ virtnetworkd.socket - libvirt network daemon socket
     Loaded: loaded (/usr/lib/systemd/system/virtnetworkd.socket; enabled; preset: enabled)
     Active: inactive (dead)
   Triggers: ● virtnetworkd.service
     Listen: /run/libvirt/virtnetworkd-sock (Stream)

● virtnodedevd-admin.socket - libvirt nodedev daemon admin socket
     Loaded: loaded (/usr/lib/systemd/system/virtnodedevd-admin.socket; enabled; preset: enabled)
    Drop-In: /etc/systemd/system/virtnodedevd-admin.socket.d
             └─override.conf
     Active: active (running) since Sat 2026-01-31 07:36:14 UTC; 2h 0min ago
      Until: Sat 2026-01-31 07:36:14 UTC; 2h 0min ago
   Triggers: ● virtnodedevd.service
     Listen: /run/libvirt/virtnodedevd-admin-sock (Stream)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48560)
     Memory: 4.0K (peak: 512.0K)
        CPU: 1ms
     CGroup: /system.slice/virtnodedevd-admin.socket

Jan 31 07:36:14 compute-0 systemd[1]: Starting libvirt nodedev daemon admin socket...
Jan 31 07:36:14 compute-0 systemd[1]: Listening on libvirt nodedev daemon admin socket.

● virtnodedevd-ro.socket - libvirt nodedev daemon read-only socket
     Loaded: loaded (/usr/lib/systemd/system/virtnodedevd-ro.socket; enabled; preset: enabled)
    Drop-In: /etc/systemd/system/virtnodedevd-ro.socket.d
             └─override.conf
     Active: active (running) since Sat 2026-01-31 07:36:14 UTC; 2h 0min ago
      Until: Sat 2026-01-31 07:36:14 UTC; 2h 0min ago
   Triggers: ● virtnodedevd.service
     Listen: /run/libvirt/virtnodedevd-sock-ro (Stream)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48560)
     Memory: 0B (peak: 512.0K)
        CPU: 1ms
     CGroup: /system.slice/virtnodedevd-ro.socket

Jan 31 07:36:14 compute-0 systemd[1]: Starting libvirt nodedev daemon read-only socket...
Jan 31 07:36:14 compute-0 systemd[1]: Listening on libvirt nodedev daemon read-only socket.

● virtnodedevd.socket - libvirt nodedev daemon socket
     Loaded: loaded (/usr/lib/systemd/system/virtnodedevd.socket; enabled; preset: enabled)
    Drop-In: /etc/systemd/system/virtnodedevd.socket.d
             └─override.conf
     Active: active (running) since Sat 2026-01-31 07:36:14 UTC; 2h 0min ago
      Until: Sat 2026-01-31 07:36:14 UTC; 2h 0min ago
   Triggers: ● virtnodedevd.service
     Listen: /run/libvirt/virtnodedevd-sock (Stream)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48560)
     Memory: 0B (peak: 512.0K)
        CPU: 1ms
     CGroup: /system.slice/virtnodedevd.socket

Jan 31 07:36:14 compute-0 systemd[1]: Starting libvirt nodedev daemon socket...
Jan 31 07:36:14 compute-0 systemd[1]: Listening on libvirt nodedev daemon socket.

○ virtnwfilterd-admin.socket - libvirt nwfilter daemon admin socket
     Loaded: loaded (/usr/lib/systemd/system/virtnwfilterd-admin.socket; enabled; preset: enabled)
     Active: inactive (dead)
   Triggers: ● virtnwfilterd.service
     Listen: /run/libvirt/virtnwfilterd-admin-sock (Stream)

○ virtnwfilterd-ro.socket - libvirt nwfilter daemon read-only socket
     Loaded: loaded (/usr/lib/systemd/system/virtnwfilterd-ro.socket; enabled; preset: enabled)
     Active: inactive (dead)
   Triggers: ● virtnwfilterd.service
     Listen: /run/libvirt/virtnwfilterd-sock-ro (Stream)

○ virtnwfilterd.socket - libvirt nwfilter daemon socket
     Loaded: loaded (/usr/lib/systemd/system/virtnwfilterd.socket; enabled; preset: enabled)
     Active: inactive (dead)
   Triggers: ● virtnwfilterd.service
     Listen: /run/libvirt/virtnwfilterd-sock (Stream)

● virtproxyd-admin.socket - libvirt proxy daemon admin socket
     Loaded: loaded (/usr/lib/systemd/system/virtproxyd-admin.socket; enabled; preset: enabled)
    Drop-In: /etc/systemd/system/virtproxyd-admin.socket.d
             └─override.conf
     Active: active (listening) since Sat 2026-01-31 07:36:15 UTC; 2h 0min ago
      Until: Sat 2026-01-31 07:36:15 UTC; 2h 0min ago
   Triggers: ● virtproxyd.service
     Listen: /run/libvirt/libvirt-admin-sock (Stream)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48560)
     Memory: 0B (peak: 500.0K)
        CPU: 2ms
     CGroup: /system.slice/virtproxyd-admin.socket

Jan 31 07:36:15 compute-0 systemd[1]: Starting libvirt proxy daemon admin socket...
Jan 31 07:36:15 compute-0 systemd[1]: Listening on libvirt proxy daemon admin socket.

● virtproxyd-ro.socket - libvirt proxy daemon read-only socket
     Loaded: loaded (/usr/lib/systemd/system/virtproxyd-ro.socket; enabled; preset: enabled)
    Drop-In: /etc/systemd/system/virtproxyd-ro.socket.d
             └─override.conf
     Active: active (listening) since Sat 2026-01-31 07:36:15 UTC; 2h 0min ago
      Until: Sat 2026-01-31 07:36:15 UTC; 2h 0min ago
   Triggers: ● virtproxyd.service
     Listen: /run/libvirt/libvirt-sock-ro (Stream)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48560)
     Memory: 0B (peak: 600.0K)
        CPU: 1ms
     CGroup: /system.slice/virtproxyd-ro.socket

Jan 31 07:36:15 compute-0 systemd[1]: Starting libvirt proxy daemon read-only socket...
Jan 31 07:36:15 compute-0 systemd[1]: Listening on libvirt proxy daemon read-only socket.

● virtproxyd-tls.socket - libvirt proxy daemon TLS IP socket
     Loaded: loaded (/usr/lib/systemd/system/virtproxyd-tls.socket; enabled; preset: disabled)
     Active: active (listening) since Sat 2026-01-31 07:35:10 UTC; 2h 1min ago
      Until: Sat 2026-01-31 07:35:10 UTC; 2h 1min ago
   Triggers: ● virtproxyd.service
     Listen: [::]:16514 (Stream)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48560)
     Memory: 8.0K (peak: 272.0K)
        CPU: 1ms
     CGroup: /system.slice/virtproxyd-tls.socket

Jan 31 07:35:10 compute-0 systemd[1]: Listening on libvirt proxy daemon TLS IP socket.

● virtproxyd.socket - libvirt proxy daemon socket
     Loaded: loaded (/usr/lib/systemd/system/virtproxyd.socket; enabled; preset: enabled)
    Drop-In: /etc/systemd/system/virtproxyd.socket.d
             └─override.conf
     Active: active (listening) since Sat 2026-01-31 07:35:10 UTC; 2h 1min ago
      Until: Sat 2026-01-31 07:35:10 UTC; 2h 1min ago
   Triggers: ● virtproxyd.service
     Listen: /run/libvirt/libvirt-sock (Stream)
     CGroup: /system.slice/virtproxyd.socket

Jan 31 07:35:10 compute-0 systemd[1]: Listening on libvirt proxy daemon socket.

● virtqemud-admin.socket - libvirt QEMU daemon admin socket
     Loaded: loaded (/usr/lib/systemd/system/virtqemud-admin.socket; enabled; preset: enabled)
    Drop-In: /etc/systemd/system/virtqemud-admin.socket.d
             └─override.conf
     Active: active (running) since Sat 2026-01-31 07:36:17 UTC; 2h 0min ago
      Until: Sat 2026-01-31 07:36:17 UTC; 2h 0min ago
   Triggers: ● virtqemud.service
     Listen: /run/libvirt/virtqemud-admin-sock (Stream)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48560)
     Memory: 0B (peak: 560.0K)
        CPU: 1ms
     CGroup: /system.slice/virtqemud-admin.socket

Jan 31 07:36:17 compute-0 systemd[1]: Starting libvirt QEMU daemon admin socket...
Jan 31 07:36:17 compute-0 systemd[1]: Listening on libvirt QEMU daemon admin socket.

● virtqemud-ro.socket - libvirt QEMU daemon read-only socket
     Loaded: loaded (/usr/lib/systemd/system/virtqemud-ro.socket; enabled; preset: enabled)
    Drop-In: /etc/systemd/system/virtqemud-ro.socket.d
             └─override.conf
     Active: active (running) since Sat 2026-01-31 07:36:17 UTC; 2h 0min ago
      Until: Sat 2026-01-31 07:36:17 UTC; 2h 0min ago
   Triggers: ● virtqemud.service
     Listen: /run/libvirt/virtqemud-sock-ro (Stream)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48560)
     Memory: 0B (peak: 512.0K)
        CPU: 2ms
     CGroup: /system.slice/virtqemud-ro.socket

Jan 31 07:36:17 compute-0 systemd[1]: Starting libvirt QEMU daemon read-only socket...
Jan 31 07:36:17 compute-0 systemd[1]: Listening on libvirt QEMU daemon read-only socket.

● virtqemud.socket - libvirt QEMU daemon socket
     Loaded: loaded (/usr/lib/systemd/system/virtqemud.socket; enabled; preset: enabled)
    Drop-In: /etc/systemd/system/virtqemud.socket.d
             └─override.conf
     Active: active (running) since Sat 2026-01-31 07:36:17 UTC; 2h 0min ago
      Until: Sat 2026-01-31 07:36:17 UTC; 2h 0min ago
   Triggers: ● virtqemud.service
     Listen: /run/libvirt/virtqemud-sock (Stream)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48560)
     Memory: 4.0K (peak: 512.0K)
        CPU: 2ms
     CGroup: /system.slice/virtqemud.socket

Jan 31 07:36:17 compute-0 systemd[1]: Starting libvirt QEMU daemon socket...
Jan 31 07:36:17 compute-0 systemd[1]: Listening on libvirt QEMU daemon socket.

● virtsecretd-admin.socket - libvirt secret daemon admin socket
     Loaded: loaded (/usr/lib/systemd/system/virtsecretd-admin.socket; enabled; preset: enabled)
    Drop-In: /etc/systemd/system/virtsecretd-admin.socket.d
             └─override.conf
     Active: active (running) since Sat 2026-01-31 07:36:18 UTC; 2h 0min ago
      Until: Sat 2026-01-31 07:36:18 UTC; 2h 0min ago
   Triggers: ● virtsecretd.service
     Listen: /run/libvirt/virtsecretd-admin-sock (Stream)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48560)
     Memory: 0B (peak: 684.0K)
        CPU: 3ms
     CGroup: /system.slice/virtsecretd-admin.socket

Jan 31 07:36:18 compute-0 systemd[1]: Starting libvirt secret daemon admin socket...
Jan 31 07:36:18 compute-0 systemd[1]: Listening on libvirt secret daemon admin socket.

● virtsecretd-ro.socket - libvirt secret daemon read-only socket
     Loaded: loaded (/usr/lib/systemd/system/virtsecretd-ro.socket; enabled; preset: enabled)
    Drop-In: /etc/systemd/system/virtsecretd-ro.socket.d
             └─override.conf
     Active: active (running) since Sat 2026-01-31 07:36:18 UTC; 2h 0min ago
      Until: Sat 2026-01-31 07:36:18 UTC; 2h 0min ago
   Triggers: ● virtsecretd.service
     Listen: /run/libvirt/virtsecretd-sock-ro (Stream)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48560)
     Memory: 0B (peak: 512.0K)
        CPU: 2ms
     CGroup: /system.slice/virtsecretd-ro.socket

Jan 31 07:36:18 compute-0 systemd[1]: Starting libvirt secret daemon read-only socket...
Jan 31 07:36:18 compute-0 systemd[1]: Listening on libvirt secret daemon read-only socket.

● virtsecretd.socket - libvirt secret daemon socket
     Loaded: loaded (/usr/lib/systemd/system/virtsecretd.socket; enabled; preset: enabled)
    Drop-In: /etc/systemd/system/virtsecretd.socket.d
             └─override.conf
     Active: active (running) since Sat 2026-01-31 07:36:18 UTC; 2h 0min ago
      Until: Sat 2026-01-31 07:36:18 UTC; 2h 0min ago
   Triggers: ● virtsecretd.service
     Listen: /run/libvirt/virtsecretd-sock (Stream)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48560)
     Memory: 0B (peak: 512.0K)
        CPU: 3ms
     CGroup: /system.slice/virtsecretd.socket

Jan 31 07:36:18 compute-0 systemd[1]: Starting libvirt secret daemon socket...
Jan 31 07:36:18 compute-0 systemd[1]: Listening on libvirt secret daemon socket.

○ virtstoraged-admin.socket - libvirt storage daemon admin socket
     Loaded: loaded (/usr/lib/systemd/system/virtstoraged-admin.socket; enabled; preset: enabled)
     Active: inactive (dead)
   Triggers: ● virtstoraged.service
     Listen: /run/libvirt/virtstoraged-admin-sock (Stream)

○ virtstoraged-ro.socket - libvirt storage daemon read-only socket
     Loaded: loaded (/usr/lib/systemd/system/virtstoraged-ro.socket; enabled; preset: enabled)
     Active: inactive (dead)
   Triggers: ● virtstoraged.service
     Listen: /run/libvirt/virtstoraged-sock-ro (Stream)

○ virtstoraged.socket - libvirt storage daemon socket
     Loaded: loaded (/usr/lib/systemd/system/virtstoraged.socket; enabled; preset: enabled)
     Active: inactive (dead)
   Triggers: ● virtstoraged.service
     Listen: /run/libvirt/virtstoraged-sock (Stream)

● swap.swap - /swap
     Loaded: loaded (/etc/fstab; generated)
     Active: active since Sat 2026-01-31 07:14:36 UTC; 2h 22min ago
      Until: Sat 2026-01-31 07:14:36 UTC; 2h 22min ago
       What: /swap
       Docs: man:fstab(5)
             man:systemd-fstab-generator(8)

● basic.target - Basic System
     Loaded: loaded (/usr/lib/systemd/system/basic.target; static)
     Active: active since Sat 2026-01-31 06:25:00 UTC; 3h 11min ago
      Until: Sat 2026-01-31 06:25:00 UTC; 3h 11min ago
       Docs: man:systemd.special(7)

○ blockdev@dev-disk-by\x2duuid-822f14ea\x2d6e7e\x2d41df\x2db0d8\x2dfbe282d9ded8.target - Block Device Preparation for /dev/disk/by-uuid/822f14ea-6e7e-41df-b0d8-fbe282d9ded8
     Loaded: loaded (/usr/lib/systemd/system/blockdev@.target; static)
     Active: inactive (dead)
       Docs: man:systemd.special(7)

○ blockdev@dev-vda1.target - Block Device Preparation for /dev/vda1
     Loaded: loaded (/usr/lib/systemd/system/blockdev@.target; static)
     Active: inactive (dead)
       Docs: man:systemd.special(7)

● ceph-2f5ab832-5f2e-5a84-bd93-cf8bab960ee2.target - Ceph cluster 2f5ab832-5f2e-5a84-bd93-cf8bab960ee2
     Loaded: loaded (/etc/systemd/system/ceph-2f5ab832-5f2e-5a84-bd93-cf8bab960ee2.target; enabled; preset: disabled)
     Active: active since Sat 2026-01-31 07:19:44 UTC; 2h 17min ago
      Until: Sat 2026-01-31 07:19:44 UTC; 2h 17min ago

Jan 31 07:19:44 compute-0 systemd[1]: Reached target Ceph cluster 2f5ab832-5f2e-5a84-bd93-cf8bab960ee2.

● ceph.target - All Ceph clusters and services
     Loaded: loaded (/etc/systemd/system/ceph.target; enabled; preset: disabled)
     Active: active since Sat 2026-01-31 07:19:44 UTC; 2h 17min ago
      Until: Sat 2026-01-31 07:19:44 UTC; 2h 17min ago

Jan 31 07:19:44 compute-0 systemd[1]: Reached target All Ceph clusters and services.

● cloud-config.target - Cloud-config availability
     Loaded: loaded (/usr/lib/systemd/system/cloud-config.target; static)
     Active: active since Sat 2026-01-31 06:25:03 UTC; 3h 11min ago
      Until: Sat 2026-01-31 06:25:03 UTC; 3h 11min ago

● cloud-init.target - Cloud-init target
     Loaded: loaded (/usr/lib/systemd/system/cloud-init.target; enabled-runtime; preset: disabled)
     Active: active since Sat 2026-01-31 06:25:03 UTC; 3h 11min ago
      Until: Sat 2026-01-31 06:25:03 UTC; 3h 11min ago

○ cryptsetup-pre.target - Local Encrypted Volumes (Pre)
     Loaded: loaded (/usr/lib/systemd/system/cryptsetup-pre.target; static)
     Active: inactive (dead)
       Docs: man:systemd.special(7)

● cryptsetup.target - Local Encrypted Volumes
     Loaded: loaded (/usr/lib/systemd/system/cryptsetup.target; static)
     Active: active since Sat 2026-01-31 06:24:59 UTC; 3h 12min ago
      Until: Sat 2026-01-31 06:24:59 UTC; 3h 12min ago
       Docs: man:systemd.special(7)

● edpm_libvirt.target
     Loaded: loaded (/etc/systemd/system/edpm_libvirt.target; static)
     Active: active since Sat 2026-01-31 07:36:49 UTC; 2h 0min ago
      Until: Sat 2026-01-31 07:36:49 UTC; 2h 0min ago

Jan 31 07:36:49 compute-0 systemd[1]: Reached target edpm_libvirt.target.

○ emergency.target - Emergency Mode
     Loaded: loaded (/usr/lib/systemd/system/emergency.target; static)
     Active: inactive (dead)
       Docs: man:systemd.special(7)

○ first-boot-complete.target - First Boot Complete
     Loaded: loaded (/usr/lib/systemd/system/first-boot-complete.target; static)
     Active: inactive (dead)
  Condition: start condition failed at Sat 2026-01-31 06:24:59 UTC; 3h 12min ago
       Docs: man:systemd.special(7)

○ getty-pre.target - Preparation for Logins
     Loaded: loaded (/usr/lib/systemd/system/getty-pre.target; static)
     Active: inactive (dead)
       Docs: man:systemd.special(7)
             man:systemd-getty-generator(8)
             http://0pointer.de/blog/projects/serial-console.html

● getty.target - Login Prompts
     Loaded: loaded (/usr/lib/systemd/system/getty.target; static)
     Active: active since Sat 2026-01-31 06:25:03 UTC; 3h 11min ago
      Until: Sat 2026-01-31 06:25:03 UTC; 3h 11min ago
       Docs: man:systemd.special(7)
             man:systemd-getty-generator(8)
             http://0pointer.de/blog/projects/serial-console.html

○ graphical.target - Graphical Interface
     Loaded: loaded (/usr/lib/systemd/system/graphical.target; static)
     Active: inactive (dead)
       Docs: man:systemd.special(7)

○ initrd-fs.target - Initrd File Systems
     Loaded: loaded (/usr/lib/systemd/system/initrd-fs.target; static)
     Active: inactive (dead) since Sat 2026-01-31 06:24:59 UTC; 3h 12min ago
       Docs: man:systemd.special(7)

Jan 31 06:24:58 localhost systemd[1]: Reached target Initrd File Systems.

○ initrd-root-device.target - Initrd Root Device
     Loaded: loaded (/usr/lib/systemd/system/initrd-root-device.target; static)
     Active: inactive (dead) since Sat 2026-01-31 06:24:58 UTC; 3h 12min ago
       Docs: man:systemd.special(7)

Jan 31 06:24:57 localhost systemd[1]: Reached target Initrd Root Device.
Jan 31 06:24:58 localhost systemd[1]: Stopped target Initrd Root Device.

○ initrd-root-fs.target - Initrd Root File System
     Loaded: loaded (/usr/lib/systemd/system/initrd-root-fs.target; static)
     Active: inactive (dead) since Sat 2026-01-31 06:24:59 UTC; 3h 12min ago
       Docs: man:systemd.special(7)

Jan 31 06:24:58 localhost systemd[1]: Reached target Initrd Root File System.

○ initrd-switch-root.target - Switch Root
     Loaded: loaded (/usr/lib/systemd/system/initrd-switch-root.target; static)
     Active: inactive (dead) since Sat 2026-01-31 06:24:59 UTC; 3h 12min ago

Jan 31 06:24:58 localhost systemd[1]: Reached target Switch Root.

○ initrd-usr-fs.target - Initrd /usr File System
     Loaded: loaded (/usr/lib/systemd/system/initrd-usr-fs.target; static)
     Active: inactive (dead) since Sat 2026-01-31 06:24:58 UTC; 3h 12min ago
       Docs: man:systemd.special(7)

Jan 31 06:24:58 localhost systemd[1]: Stopped target Initrd /usr File System.
Notice: journal has been rotated since unit was started, output may be incomplete.

○ initrd.target - Initrd Default Target
     Loaded: loaded (/usr/lib/systemd/system/initrd.target; static)
     Active: inactive (dead) since Sat 2026-01-31 06:24:58 UTC; 3h 12min ago
       Docs: man:systemd.special(7)

Jan 31 06:24:58 localhost systemd[1]: Reached target Initrd Default Target.
Jan 31 06:24:58 localhost systemd[1]: Stopped target Initrd Default Target.

● integritysetup.target - Local Integrity Protected Volumes
     Loaded: loaded (/usr/lib/systemd/system/integritysetup.target; static)
     Active: active since Sat 2026-01-31 06:24:59 UTC; 3h 12min ago
      Until: Sat 2026-01-31 06:24:59 UTC; 3h 12min ago
       Docs: man:systemd.special(7)

● local-fs-pre.target - Preparation for Local File Systems
     Loaded: loaded (/usr/lib/systemd/system/local-fs-pre.target; static)
     Active: active since Sat 2026-01-31 06:24:59 UTC; 3h 12min ago
      Until: Sat 2026-01-31 06:24:59 UTC; 3h 12min ago
       Docs: man:systemd.special(7)

● local-fs.target - Local File Systems
     Loaded: loaded (/usr/lib/systemd/system/local-fs.target; static)
     Active: active since Sat 2026-01-31 06:24:59 UTC; 3h 12min ago
      Until: Sat 2026-01-31 06:24:59 UTC; 3h 12min ago
       Docs: man:systemd.special(7)

● multi-user.target - Multi-User System
     Loaded: loaded (/usr/lib/systemd/system/multi-user.target; indirect; preset: disabled)
     Active: active since Sat 2026-01-31 06:25:03 UTC; 3h 11min ago
      Until: Sat 2026-01-31 06:25:03 UTC; 3h 11min ago
       Docs: man:systemd.special(7)

● network-online.target - Network is Online
     Loaded: loaded (/usr/lib/systemd/system/network-online.target; static)
     Active: active since Sat 2026-01-31 06:25:03 UTC; 3h 11min ago
      Until: Sat 2026-01-31 06:25:03 UTC; 3h 11min ago
       Docs: man:systemd.special(7)
             https://systemd.io/NETWORK_ONLINE

● network-pre.target - Preparation for Network
     Loaded: loaded (/usr/lib/systemd/system/network-pre.target; static)
     Active: active since Sat 2026-01-31 06:25:01 UTC; 3h 11min ago
      Until: Sat 2026-01-31 06:25:01 UTC; 3h 11min ago
       Docs: man:systemd.special(7)
             https://systemd.io/NETWORK_ONLINE

● network.target - Network
     Loaded: loaded (/usr/lib/systemd/system/network.target; static)
     Active: active since Sat 2026-01-31 06:25:01 UTC; 3h 11min ago
      Until: Sat 2026-01-31 06:25:01 UTC; 3h 11min ago
       Docs: man:systemd.special(7)
             https://systemd.io/NETWORK_ONLINE

● nfs-client.target - NFS client services
     Loaded: loaded (/usr/lib/systemd/system/nfs-client.target; enabled; preset: disabled)
     Active: active since Sat 2026-01-31 06:25:01 UTC; 3h 11min ago
      Until: Sat 2026-01-31 06:25:01 UTC; 3h 11min ago

○ nss-lookup.target - Host and Network Name Lookups
     Loaded: loaded (/usr/lib/systemd/system/nss-lookup.target; static)
     Active: inactive (dead)
       Docs: man:systemd.special(7)

● nss-user-lookup.target - User and Group Name Lookups
     Loaded: loaded (/usr/lib/systemd/system/nss-user-lookup.target; static)
     Active: active since Sat 2026-01-31 06:25:00 UTC; 3h 11min ago
      Until: Sat 2026-01-31 06:25:00 UTC; 3h 11min ago
       Docs: man:systemd.special(7)

● paths.target - Path Units
     Loaded: loaded (/usr/lib/systemd/system/paths.target; static)
     Active: active since Sat 2026-01-31 06:24:59 UTC; 3h 12min ago
      Until: Sat 2026-01-31 06:24:59 UTC; 3h 12min ago
       Docs: man:systemd.special(7)

○ remote-cryptsetup.target - Remote Encrypted Volumes
     Loaded: loaded (/usr/lib/systemd/system/remote-cryptsetup.target; disabled; preset: enabled)
     Active: inactive (dead) since Sat 2026-01-31 06:24:58 UTC; 3h 12min ago
       Docs: man:systemd.special(7)

Jan 31 06:24:57 localhost systemd[1]: Reached target Remote Encrypted Volumes.
Jan 31 06:24:58 localhost systemd[1]: Stopped target Remote Encrypted Volumes.

● remote-fs-pre.target - Preparation for Remote File Systems
     Loaded: loaded (/usr/lib/systemd/system/remote-fs-pre.target; static)
     Active: active since Sat 2026-01-31 06:25:01 UTC; 3h 11min ago
      Until: Sat 2026-01-31 06:25:01 UTC; 3h 11min ago
       Docs: man:systemd.special(7)

● remote-fs.target - Remote File Systems
     Loaded: loaded (/usr/lib/systemd/system/remote-fs.target; enabled; preset: enabled)
     Active: active since Sat 2026-01-31 06:25:01 UTC; 3h 11min ago
      Until: Sat 2026-01-31 06:25:01 UTC; 3h 11min ago
       Docs: man:systemd.special(7)

○ remote-veritysetup.target - Remote Verity Protected Volumes
     Loaded: loaded (/usr/lib/systemd/system/remote-veritysetup.target; disabled; preset: disabled)
     Active: inactive (dead)
       Docs: man:systemd.special(7)

○ rescue.target - Rescue Mode
     Loaded: loaded (/usr/lib/systemd/system/rescue.target; static)
     Active: inactive (dead)
       Docs: man:systemd.special(7)

● rpc_pipefs.target
     Loaded: loaded (/usr/lib/systemd/system/rpc_pipefs.target; static)
     Active: active since Sat 2026-01-31 06:24:59 UTC; 3h 12min ago
      Until: Sat 2026-01-31 06:24:59 UTC; 3h 12min ago

● rpcbind.target - RPC Port Mapper
     Loaded: loaded (/usr/lib/systemd/system/rpcbind.target; static)
     Active: active since Sat 2026-01-31 06:24:59 UTC; 3h 12min ago
      Until: Sat 2026-01-31 06:24:59 UTC; 3h 12min ago
       Docs: man:systemd.special(7)

○ shutdown.target - System Shutdown
     Loaded: loaded (/usr/lib/systemd/system/shutdown.target; static)
     Active: inactive (dead)
       Docs: man:systemd.special(7)

● slices.target - Slice Units
     Loaded: loaded (/usr/lib/systemd/system/slices.target; static)
     Active: active since Sat 2026-01-31 06:24:59 UTC; 3h 12min ago
      Until: Sat 2026-01-31 06:24:59 UTC; 3h 12min ago
       Docs: man:systemd.special(7)

● sockets.target - Socket Units
     Loaded: loaded (/usr/lib/systemd/system/sockets.target; static)
     Active: active since Sat 2026-01-31 06:25:00 UTC; 3h 12min ago
      Until: Sat 2026-01-31 06:25:00 UTC; 3h 12min ago
       Docs: man:systemd.special(7)

● sshd-keygen.target
     Loaded: loaded (/usr/lib/systemd/system/sshd-keygen.target; static)
     Active: active since Sat 2026-01-31 07:34:39 UTC; 2h 2min ago
      Until: Sat 2026-01-31 07:34:39 UTC; 2h 2min ago

Jan 31 07:34:39 compute-0 systemd[1]: Reached target sshd-keygen.target.

● swap.target - Swaps
     Loaded: loaded (/usr/lib/systemd/system/swap.target; static)
     Active: active since Sat 2026-01-31 06:24:59 UTC; 3h 12min ago
      Until: Sat 2026-01-31 06:24:59 UTC; 3h 12min ago
       Docs: man:systemd.special(7)

● sysinit.target - System Initialization
     Loaded: loaded (/usr/lib/systemd/system/sysinit.target; static)
     Active: active since Sat 2026-01-31 06:25:00 UTC; 3h 12min ago
      Until: Sat 2026-01-31 06:25:00 UTC; 3h 12miUnit syslog.target could not be found.
n ago
       Docs: man:systemd.special(7)

● time-set.target - System Time Set
     Loaded: loaded (/usr/lib/systemd/system/time-set.target; static)
     Active: active since Sat 2026-01-31 07:19:45 UTC; 2h 17min ago
      Until: Sat 2026-01-31 07:19:45 UTC; 2h 17min ago
       Docs: man:systemd.special(7)

Jan 31 07:19:45 compute-0 systemd[1]: Reached target System Time Set.

● time-sync.target - System Time Synchronized
     Loaded: loaded (/usr/lib/systemd/system/time-sync.target; static)
     Active: active since Sat 2026-01-31 07:19:45 UTC; 2h 17min ago
      Until: Sat 2026-01-31 07:19:45 UTC; 2h 17min ago
       Docs: man:systemd.special(7)

Jan 31 07:19:45 compute-0 systemd[1]: Reached target System Time Synchronized.

● timers.target - Timer Units
     Loaded: loaded (/usr/lib/systemd/system/timers.target; static)
     Active: active since Sat 2026-01-31 06:25:00 UTC; 3h 12min ago
      Until: Sat 2026-01-31 06:25:00 UTC; 3h 12min ago
       Docs: man:systemd.special(7)

○ umount.target - Unmount All Filesystems
     Loaded: loaded (/usr/lib/systemd/system/umount.target; static)
     Active: inactive (dead)
       Docs: man:systemd.special(7)

○ veritysetup-pre.target - Local Verity Protected Volumes (Pre)
     Loaded: loaded (/usr/lib/systemd/system/veritysetup-pre.target; static)
     Active: inactive (dead)
       Docs: man:systemd.special(7)

● veritysetup.target - Local Verity Protected Volumes
     Loaded: loaded (/usr/lib/systemd/system/veritysetup.target; static)
     Active: active since Sat 2026-01-31 06:24:59 UTC; 3h 12min ago
      Until: Sat 2026-01-31 06:24:59 UTC; 3h 12min ago
       Docs: man:systemd.special(7)

○ virt-guest-shutdown.target - Libvirt guests shutdown
     Loaded: loaded (/etc/systemd/system/virt-guest-shutdown.target; static)
     Active: inactive (dead)
       Docs: https://libvirt.org

● 1ae13fee90b46f5eb19bd9030483f3244c23c433b8f4e46eb5cc0bd2c6a35d52-1a8f1cf84db28910.timer - /usr/bin/podman healthcheck run 1ae13fee90b46f5eb19bd9030483f3244c23c433b8f4e46eb5cc0bd2c6a35d52
     Loaded: loaded (/run/systemd/transient/1ae13fee90b46f5eb19bd9030483f3244c23c433b8f4e46eb5cc0bd2c6a35d52-1a8f1cf84db28910.timer; transient)
  Transient: yes
     Active: active (waiting) since Sat 2026-01-31 07:32:28 UTC; 2h 4min ago
      Until: Sat 2026-01-31 07:32:28 UTC; 2h 4min ago
    Trigger: Sat 2026-01-31 09:37:20 UTC; 20s left
   Triggers: ● 1ae13fee90b46f5eb19bd9030483f3244c23c433b8f4e46eb5cc0bd2c6a35d52-1a8f1cf84db28910.service

Jan 31 07:32:28 compute-0 systemd[1]: Started /usr/bin/podman healthcheck run 1ae13fee90b46f5eb19bd9030483f3244c23c433b8f4e46eb5cc0bd2c6a35d52.

● d068e3ccc9a9ec2248f269659070439009ec5c33951d360f88b13647cd275613-280d62f938195e00.timer - /usr/bin/podman healthcheck run d068e3ccc9a9ec2248f269659070439009ec5c33951d360f88b13647cd275613
     Loaded: loaded (/run/systemd/transient/d068e3ccc9a9ec2248f269659070439009ec5c33951d360f88b13647cd275613-280d62f938195e00.timer; transient)
  Transient: yes
     Active: active (waiting) since Sat 2026-01-31 07:31:18 UTC; 2h 5min ago
      Until: Sat 2026-01-31 07:31:18 UTC; 2h 5min ago
    Trigger: Sat 2026-01-31 09:37:20 UTC; 20s left
   Triggers: ● d068e3ccc9a9ec2248f269659070439009ec5c33951d360f88b13647cd275613-280d62f938195e00.service

Jan 31 07:31:18 compute-0 systemd[1]: Started /usr/bin/podman healthcheck run d068e3ccc9a9ec2248f269659070439009ec5c33951d360f88b13647cd275613.

● dnf-makecache.timer - dnf makecache --timer
     Loaded: loaded (/usr/lib/systemd/system/dnf-makecache.timer; enabled; preset: enabled)
     Active: active (waiting) since Sat 2026-01-31 06:25:00 UTC; 3h 12min ago
      Until: Sat 2026-01-31 06:25:00 UTC; 3h 12min ago
    Trigger: Sat 2026-01-31 10:00:27 UTC; 23min left
   Triggers: ● dnf-makecache.service

● logrotate.timer - Daily rotation of log files
     Loaded: loaded (/usr/lib/systemd/system/logrotate.timer; enabled; preset: enabled)
     Active: active (waiting) since Sat 2026-01-31 06:25:00 UTC; 3h 12min ago
      Until: Sat 2026-01-31 06:25:00 UTC; 3h 12min ago
    Trigger: Sun 2026-02-01 00:00:00 UTC; 14h left
   Triggers: ● logrotate.service
       Docs: man:logrotate(8)
             man:logrotate.conf(5)

○ raid-check.timer - Weekly RAID setup health check
     Loaded: loaded (/usr/lib/systemd/system/raid-check.timer; enabled; preset: enabled)
     Active: inactive (dead)
    Trigger: n/a
   Triggers: ● raid-check.service

○ sysstat-collect.timer - Run system activity accounting tool every 10 minutes
     Loaded: loaded (/usr/lib/systemd/system/sysstat-collect.timer; enabled; preset: disabled)
     Active: inactive (dead)
    Trigger: n/a
   Triggers: ● sysstat-collect.service

○ sysstat-summary.timer - Generate summary of yesterday's process accounting
     Loaded: loaded (/usr/lib/systemd/system/sysstat-summary.timer; enabled; preset: disabled)
     Active: inactive (dead)
    Trigger: n/a
   Triggers: ● sysstat-summary.service

● systemd-tmpfiles-clean.timer - Daily Cleanup of Temporary Directories
     Loaded: loaded (/usr/lib/systemd/system/systemd-tmpfiles-clean.timer; static)
     Active: active (waiting) since Sat 2026-01-31 06:25:00 UTC; 3h 12min ago
      Until: Sat 2026-01-31 06:25:00 UTC; 3h 12min ago
    Trigger: Sun 2026-02-01 06:40:37 UTC; 21h left
   Triggers: ● systemd-tmpfiles-clean.service
       Docs: man:tmpfiles.d(5)
             man:systemd-tmpfiles(8)

● unbound-anchor.timer - daily update of the root trust anchor for DNSSEC
     Loaded: loaded (/usr/lib/systemd/system/unbound-anchor.timer; enabled; preset: enabled)
     Active: active (waiting) since Sat 2026-01-31 07:15:08 UTC; 2h 21min ago
      Until: Sat 2026-01-31 07:15:08 UTC; 2h 21min ago
    Trigger: Sun 2026-02-01 00:00:00 UTC; 14h left
   Triggers: ● unbound-anchor.service
       Docs: man:unbound-anchor(8)

Jan 31 07:15:08 compute-0 systemd[1]: Started daily update of the root trust anchor for DNSSEC.
