● compute-0
    State: degraded
    Units: 454 loaded (incl. loaded aliases)
     Jobs: 0 queued
   Failed: 1 units
    Since: Fri 2026-01-23 09:01:22 UTC; 1h 40min ago
  systemd: 252-64.el9
   CGroup: /
           ├─296661 turbostat --debug sleep 10
           ├─296664 sleep 10
           ├─init.scope
           │ └─1 /usr/lib/systemd/systemd --switched-root --system --deserialize 31
           ├─machine.slice
           │ ├─libpod-7c4b1914e1e86e16566f40dac1c2043d119deee57a046ec037c84640bd0c067d.scope
           │ │ └─container
           │ │   ├─161918 dumb-init --single-child -- kolla_start
           │ │   ├─161921 "neutron-ovn-metadata-agent (/usr/bin/python3 /usr/bin/neutron-ovn-metadata-agent)"
           │ │   ├─162303 "neutron-ovn-metadata-agent (/usr/bin/python3 /usr/bin/neutron-ovn-metadata-agent)"
           │ │   ├─162436 /usr/bin/python3 /bin/privsep-helper --config-file /etc/neutron/neutron.conf --config-dir /etc/neutron.conf.d --privsep_context neutron.privileged.namespace_cmd --privsep_sock_path /tmp/tmpm91q9wva/privsep.sock
           │ │   ├─255218 /usr/bin/python3 /bin/privsep-helper --config-file /etc/neutron/neutron.conf --config-dir /etc/neutron.conf.d --privsep_context neutron.privileged.default --privsep_sock_path /tmp/tmp5z6r0kvj/privsep.sock
           │ │   └─255276 /usr/bin/python3 /bin/privsep-helper --config-file /etc/neutron/neutron.conf --config-dir /etc/neutron.conf.d --privsep_context neutron.privileged.link_cmd --privsep_sock_path /tmp/tmpv8hp88b7/privsep.sock
           │ ├─libpod-955f9566d05798cdd71546732c1b5a8107d1262ca33e1e62353f97ae0dc2cc2f.scope
           │ │ └─container
           │ │   ├─249231 dumb-init --single-child -- kolla_start
           │ │   ├─249233 /usr/bin/python3 /usr/bin/nova-compute
           │ │   ├─255097 /usr/bin/python3 /bin/privsep-helper --config-file /etc/nova/nova.conf --config-file /etc/nova/nova-compute.conf --config-dir /etc/nova/nova.conf.d --privsep_context vif_plug_ovs.privsep.vif_plug --privsep_sock_path /tmp/tmp_1x_jasi/privsep.sock
           │ │   └─255486 /usr/bin/python3 /bin/privsep-helper --config-file /etc/nova/nova.conf --config-file /etc/nova/nova-compute.conf --config-dir /etc/nova/nova.conf.d --privsep_context nova.privsep.sys_admin_pctxt --privsep_sock_path /tmp/tmpsh92gw98/privsep.sock
           │ └─libpod-ffd3fd97eb8ccb69f06ea21df042fbfc8784045b6313bea6a684bfa168f1196d.scope
           │   └─container
           │     ├─151636 dumb-init --single-child -- kolla_start
           │     └─151639 /usr/bin/ovn-controller --pidfile unix:/run/openvswitch/db.sock -p /etc/pki/tls/private/ovndb.key -c /etc/pki/tls/certs/ovndb.crt -C /etc/pki/tls/certs/ovndbca.crt
           ├─system.slice
           │ ├─NetworkManager.service
           │ │ └─48866 /usr/sbin/NetworkManager --no-daemon
           │ ├─auditd.service
           │ │ ├─699 /sbin/auditd
           │ │ └─701 /usr/sbin/sedispatch
           │ ├─chronyd.service
           │ │ └─58433 /usr/sbin/chronyd -F 2
           │ ├─crond.service
           │ │ ├─  1008 /usr/sbin/crond -n
           │ │ └─147517 /usr/sbin/anacron -s
           │ ├─dbus-broker.service
           │ │ ├─765 /usr/bin/dbus-broker-launch --scope system --audit
           │ │ └─769 dbus-broker --log 4 --controller 9 --machine-id 85ac68c10a6e7ae08ceb898dbdca0cb5 --max-bytes 536870912 --max-fds 4096 --max-matches 131072 --audit
           │ ├─edpm_nova_compute.service
           │ │ └─249229 /usr/bin/conmon --api-version 1 -c 955f9566d05798cdd71546732c1b5a8107d1262ca33e1e62353f97ae0dc2cc2f -u 955f9566d05798cdd71546732c1b5a8107d1262ca33e1e62353f97ae0dc2cc2f -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/955f9566d05798cdd71546732c1b5a8107d1262ca33e1e62353f97ae0dc2cc2f/userdata -p /run/containers/storage/overlay-containers/955f9566d05798cdd71546732c1b5a8107d1262ca33e1e62353f97ae0dc2cc2f/userdata/pidfile -n nova_compute --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/955f9566d05798cdd71546732c1b5a8107d1262ca33e1e62353f97ae0dc2cc2f --full-attach -s -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/955f9566d05798cdd71546732c1b5a8107d1262ca33e1e62353f97ae0dc2cc2f/userdata/oci-log --conmon-pidfile /run/nova_compute.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg 955f9566d05798cdd71546732c1b5a8107d1262ca33e1e62353f97ae0dc2cc2f
           │ ├─edpm_ovn_controller.service
           │ │ └─151634 /usr/bin/conmon --api-version 1 -c ffd3fd97eb8ccb69f06ea21df042fbfc8784045b6313bea6a684bfa168f1196d -u ffd3fd97eb8ccb69f06ea21df042fbfc8784045b6313bea6a684bfa168f1196d -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/ffd3fd97eb8ccb69f06ea21df042fbfc8784045b6313bea6a684bfa168f1196d/userdata -p /run/containers/storage/overlay-containers/ffd3fd97eb8ccb69f06ea21df042fbfc8784045b6313bea6a684bfa168f1196d/userdata/pidfile -n ovn_controller --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/ffd3fd97eb8ccb69f06ea21df042fbfc8784045b6313bea6a684bfa168f1196d --full-attach -s -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/ffd3fd97eb8ccb69f06ea21df042fbfc8784045b6313bea6a684bfa168f1196d/userdata/oci-log --conmon-pidfile /run/ovn_controller.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg ffd3fd97eb8ccb69f06ea21df042fbfc8784045b6313bea6a684bfa168f1196d
           │ ├─edpm_ovn_metadata_agent.service
           │ │ └─161916 /usr/bin/conmon --api-version 1 -c 7c4b1914e1e86e16566f40dac1c2043d119deee57a046ec037c84640bd0c067d -u 7c4b1914e1e86e16566f40dac1c2043d119deee57a046ec037c84640bd0c067d -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/7c4b1914e1e86e16566f40dac1c2043d119deee57a046ec037c84640bd0c067d/userdata -p /run/containers/storage/overlay-containers/7c4b1914e1e86e16566f40dac1c2043d119deee57a046ec037c84640bd0c067d/userdata/pidfile -n ovn_metadata_agent --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/7c4b1914e1e86e16566f40dac1c2043d119deee57a046ec037c84640bd0c067d --full-attach -s -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/7c4b1914e1e86e16566f40dac1c2043d119deee57a046ec037c84640bd0c067d/userdata/oci-log --conmon-pidfile /run/ovn_metadata_agent.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg 7c4b1914e1e86e16566f40dac1c2043d119deee57a046ec037c84640bd0c067d
           │ ├─gssproxy.service
           │ │ └─868 /usr/sbin/gssproxy -D
           │ ├─irqbalance.service
           │ │ └─779 /usr/sbin/irqbalance
           │ ├─iscsid.service
           │ │ └─233083 /usr/sbin/iscsid -f
           │ ├─multipathd.service
           │ │ └─233315 /sbin/multipathd -d -s
           │ ├─ovs-vswitchd.service
           │ │ └─47171 ovs-vswitchd unix:/var/run/openvswitch/db.sock -vconsole:emer -vsyslog:err -vfile:info --mlockall --user openvswitch:hugetlbfs --no-chdir --log-file=/var/log/openvswitch/ovs-vswitchd.log --pidfile=/var/run/openvswitch/ovs-vswitchd.pid --detach
           │ ├─ovsdb-server.service
           │ │ └─47090 ovsdb-server /etc/openvswitch/conf.db -vconsole:emer -vsyslog:err -vfile:info --remote=punix:/var/run/openvswitch/db.sock --private-key=db:Open_vSwitch,SSL,private_key --certificate=db:Open_vSwitch,SSL,certificate --bootstrap-ca-cert=db:Open_vSwitch,SSL,ca_cert --user openvswitch:hugetlbfs --no-chdir --log-file=/var/log/openvswitch/ovsdb-server.log --pidfile=/var/run/openvswitch/ovsdb-server.pid --detach
           │ ├─polkit.service
           │ │ └─43358 /usr/lib/polkit-1/polkitd --no-debug
           │ ├─rpcbind.service
           │ │ └─697 /usr/bin/rpcbind -w -f
           │ ├─rsyslog.service
           │ │ └─1003 /usr/sbin/rsyslogd -n
           │ ├─sshd.service
           │ │ └─189686 "sshd: /usr/sbin/sshd -D [listener] 0 of 10-100 startups"
           │ ├─system-ceph\x2df3005f84\x2d239a\x2d55b6\x2da948\x2d8f1fb592b920.slice
           │ │ ├─ceph-f3005f84-239a-55b6-a948-8f1fb592b920@alertmanager.compute-0.service
           │ │ │ ├─libpod-payload-a2ddeb968d99b1961970b788dda423e2c0177d966be5d6216090bc7f97658982
           │ │ │ │ ├─104187 /run/podman-init -- /bin/alertmanager --cluster.listen-address=:9094 --web.listen-address=192.168.122.100:9093 --config.file=/etc/alertmanager/alertmanager.yml
           │ │ │ │ └─104189 /bin/alertmanager --cluster.listen-address=:9094 --web.listen-address=192.168.122.100:9093 --config.file=/etc/alertmanager/alertmanager.yml
           │ │ │ └─runtime
           │ │ │   └─104185 /usr/bin/conmon --api-version 1 -c a2ddeb968d99b1961970b788dda423e2c0177d966be5d6216090bc7f97658982 -u a2ddeb968d99b1961970b788dda423e2c0177d966be5d6216090bc7f97658982 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/a2ddeb968d99b1961970b788dda423e2c0177d966be5d6216090bc7f97658982/userdata -p /run/containers/storage/overlay-containers/a2ddeb968d99b1961970b788dda423e2c0177d966be5d6216090bc7f97658982/userdata/pidfile -n ceph-f3005f84-239a-55b6-a948-8f1fb592b920-alertmanager-compute-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/a2ddeb968d99b1961970b788dda423e2c0177d966be5d6216090bc7f97658982 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/a2ddeb968d99b1961970b788dda423e2c0177d966be5d6216090bc7f97658982/userdata/oci-log --conmon-pidfile /run/ceph-f3005f84-239a-55b6-a948-8f1fb592b920@alertmanager.compute-0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg a2ddeb968d99b1961970b788dda423e2c0177d966be5d6216090bc7f97658982
           │ │ ├─ceph-f3005f84-239a-55b6-a948-8f1fb592b920@crash.compute-0.service
           │ │ │ ├─libpod-payload-ae2342c943dc0b4633eaeef8f7726de29e0287eb0b10e37d37a519b117896a21
           │ │ │ │ ├─79596 /run/podman-init -- /usr/bin/ceph-crash -n client.crash.compute-0
           │ │ │ │ └─79598 /usr/bin/python3 -s /usr/bin/ceph-crash -n client.crash.compute-0
           │ │ │ └─runtime
           │ │ │   └─79594 /usr/bin/conmon --api-version 1 -c ae2342c943dc0b4633eaeef8f7726de29e0287eb0b10e37d37a519b117896a21 -u ae2342c943dc0b4633eaeef8f7726de29e0287eb0b10e37d37a519b117896a21 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/ae2342c943dc0b4633eaeef8f7726de29e0287eb0b10e37d37a519b117896a21/userdata -p /run/containers/storage/overlay-containers/ae2342c943dc0b4633eaeef8f7726de29e0287eb0b10e37d37a519b117896a21/userdata/pidfile -n ceph-f3005f84-239a-55b6-a948-8f1fb592b920-crash-compute-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/ae2342c943dc0b4633eaeef8f7726de29e0287eb0b10e37d37a519b117896a21 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/ae2342c943dc0b4633eaeef8f7726de29e0287eb0b10e37d37a519b117896a21/userdata/oci-log --conmon-pidfile /run/ceph-f3005f84-239a-55b6-a948-8f1fb592b920@crash.compute-0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg ae2342c943dc0b4633eaeef8f7726de29e0287eb0b10e37d37a519b117896a21
           │ │ ├─ceph-f3005f84-239a-55b6-a948-8f1fb592b920@grafana.compute-0.service
           │ │ │ ├─libpod-payload-91a745e69178e4c0b1322185fa504c92fadf052d1491b19cbcad743ff263de28
           │ │ │ │ ├─104505 /run/podman-init -- /run.sh
           │ │ │ │ └─104507 grafana server --homepath=/usr/share/grafana --config=/etc/grafana/grafana.ini --packaging=docker cfg:default.log.mode=console cfg:default.paths.data=/var/lib/grafana cfg:default.paths.logs=/var/log/grafana cfg:default.paths.plugins=/var/lib/grafana/plugins cfg:default.paths.provisioning=/etc/grafana/provisioning
           │ │ │ └─runtime
           │ │ │   └─104501 /usr/bin/conmon --api-version 1 -c 91a745e69178e4c0b1322185fa504c92fadf052d1491b19cbcad743ff263de28 -u 91a745e69178e4c0b1322185fa504c92fadf052d1491b19cbcad743ff263de28 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/91a745e69178e4c0b1322185fa504c92fadf052d1491b19cbcad743ff263de28/userdata -p /run/containers/storage/overlay-containers/91a745e69178e4c0b1322185fa504c92fadf052d1491b19cbcad743ff263de28/userdata/pidfile -n ceph-f3005f84-239a-55b6-a948-8f1fb592b920-grafana-compute-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/91a745e69178e4c0b1322185fa504c92fadf052d1491b19cbcad743ff263de28 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/91a745e69178e4c0b1322185fa504c92fadf052d1491b19cbcad743ff263de28/userdata/oci-log --conmon-pidfile /run/ceph-f3005f84-239a-55b6-a948-8f1fb592b920@grafana.compute-0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 91a745e69178e4c0b1322185fa504c92fadf052d1491b19cbcad743ff263de28
           │ │ ├─ceph-f3005f84-239a-55b6-a948-8f1fb592b920@haproxy.nfs.cephfs.compute-0.yeogal.service
           │ │ │ ├─libpod-payload-2675dd2af0d87249968094bd6c1eb5d25ac7173a5fd992ed1bd309216a505178
           │ │ │ │ ├─95467 /run/podman-init -- docker-entrypoint.sh haproxy -f /var/lib/haproxy/haproxy.cfg
           │ │ │ │ ├─95469 haproxy -W -db -f /var/lib/haproxy/haproxy.cfg
           │ │ │ │ └─95471 haproxy -W -db -f /var/lib/haproxy/haproxy.cfg
           │ │ │ └─runtime
           │ │ │   └─95465 /usr/bin/conmon --api-version 1 -c 2675dd2af0d87249968094bd6c1eb5d25ac7173a5fd992ed1bd309216a505178 -u 2675dd2af0d87249968094bd6c1eb5d25ac7173a5fd992ed1bd309216a505178 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/2675dd2af0d87249968094bd6c1eb5d25ac7173a5fd992ed1bd309216a505178/userdata -p /run/containers/storage/overlay-containers/2675dd2af0d87249968094bd6c1eb5d25ac7173a5fd992ed1bd309216a505178/userdata/pidfile -n ceph-f3005f84-239a-55b6-a948-8f1fb592b920-haproxy-nfs-cephfs-compute-0-yeogal --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/2675dd2af0d87249968094bd6c1eb5d25ac7173a5fd992ed1bd309216a505178 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/2675dd2af0d87249968094bd6c1eb5d25ac7173a5fd992ed1bd309216a505178/userdata/oci-log --conmon-pidfile /run/ceph-f3005f84-239a-55b6-a948-8f1fb592b920@haproxy.nfs.cephfs.compute-0.yeogal.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 2675dd2af0d87249968094bd6c1eb5d25ac7173a5fd992ed1bd309216a505178
           │ │ ├─ceph-f3005f84-239a-55b6-a948-8f1fb592b920@haproxy.rgw.default.compute-0.qabsws.service
           │ │ │ ├─libpod-payload-872bb66aeaa09288ead0a99e17c29682960b011c0b0f7af2b0513c1ab79aba61
           │ │ │ │ ├─97806 /run/podman-init -- docker-entrypoint.sh haproxy -f /var/lib/haproxy/haproxy.cfg
           │ │ │ │ ├─97808 haproxy -W -db -f /var/lib/haproxy/haproxy.cfg
           │ │ │ │ └─97810 haproxy -W -db -f /var/lib/haproxy/haproxy.cfg
           │ │ │ └─runtime
           │ │ │   └─97804 /usr/bin/conmon --api-version 1 -c 872bb66aeaa09288ead0a99e17c29682960b011c0b0f7af2b0513c1ab79aba61 -u 872bb66aeaa09288ead0a99e17c29682960b011c0b0f7af2b0513c1ab79aba61 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/872bb66aeaa09288ead0a99e17c29682960b011c0b0f7af2b0513c1ab79aba61/userdata -p /run/containers/storage/overlay-containers/872bb66aeaa09288ead0a99e17c29682960b011c0b0f7af2b0513c1ab79aba61/userdata/pidfile -n ceph-f3005f84-239a-55b6-a948-8f1fb592b920-haproxy-rgw-default-compute-0-qabsws --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/872bb66aeaa09288ead0a99e17c29682960b011c0b0f7af2b0513c1ab79aba61 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/872bb66aeaa09288ead0a99e17c29682960b011c0b0f7af2b0513c1ab79aba61/userdata/oci-log --conmon-pidfile /run/ceph-f3005f84-239a-55b6-a948-8f1fb592b920@haproxy.rgw.default.compute-0.qabsws.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 872bb66aeaa09288ead0a99e17c29682960b011c0b0f7af2b0513c1ab79aba61
           │ │ ├─ceph-f3005f84-239a-55b6-a948-8f1fb592b920@keepalived.nfs.cephfs.compute-0.lrsdkc.service
           │ │ │ ├─libpod-payload-4783fa4a0e03b39b894c380a1696ad8cf3e72c4e94fd2480817944beb7891609
           │ │ │ │ ├─96150 /run/podman-init -- ./init.sh
           │ │ │ │ ├─96152 /usr/sbin/keepalived -n -l -f /etc/keepalived/keepalived.conf
           │ │ │ │ └─96154 /usr/sbin/keepalived -n -l -f /etc/keepalived/keepalived.conf
           │ │ │ └─runtime
           │ │ │   └─96148 /usr/bin/conmon --api-version 1 -c 4783fa4a0e03b39b894c380a1696ad8cf3e72c4e94fd2480817944beb7891609 -u 4783fa4a0e03b39b894c380a1696ad8cf3e72c4e94fd2480817944beb7891609 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/4783fa4a0e03b39b894c380a1696ad8cf3e72c4e94fd2480817944beb7891609/userdata -p /run/containers/storage/overlay-containers/4783fa4a0e03b39b894c380a1696ad8cf3e72c4e94fd2480817944beb7891609/userdata/pidfile -n ceph-f3005f84-239a-55b6-a948-8f1fb592b920-keepalived-nfs-cephfs-compute-0-lrsdkc --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/4783fa4a0e03b39b894c380a1696ad8cf3e72c4e94fd2480817944beb7891609 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/4783fa4a0e03b39b894c380a1696ad8cf3e72c4e94fd2480817944beb7891609/userdata/oci-log --conmon-pidfile /run/ceph-f3005f84-239a-55b6-a948-8f1fb592b920@keepalived.nfs.cephfs.compute-0.lrsdkc.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 4783fa4a0e03b39b894c380a1696ad8cf3e72c4e94fd2480817944beb7891609
           │ │ ├─ceph-f3005f84-239a-55b6-a948-8f1fb592b920@keepalived.rgw.default.compute-0.tytkrd.service
           │ │ │ ├─libpod-payload-fb69e42829d45e3b674ff9bd3f3333c8c90dc07a3801eda65c7a0ef9a0f84b50
           │ │ │ │ ├─98096 /run/podman-init -- ./init.sh
           │ │ │ │ ├─98098 /usr/sbin/keepalived -n -l -f /etc/keepalived/keepalived.conf
           │ │ │ │ └─98100 /usr/sbin/keepalived -n -l -f /etc/keepalived/keepalived.conf
           │ │ │ └─runtime
           │ │ │   └─98094 /usr/bin/conmon --api-version 1 -c fb69e42829d45e3b674ff9bd3f3333c8c90dc07a3801eda65c7a0ef9a0f84b50 -u fb69e42829d45e3b674ff9bd3f3333c8c90dc07a3801eda65c7a0ef9a0f84b50 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/fb69e42829d45e3b674ff9bd3f3333c8c90dc07a3801eda65c7a0ef9a0f84b50/userdata -p /run/containers/storage/overlay-containers/fb69e42829d45e3b674ff9bd3f3333c8c90dc07a3801eda65c7a0ef9a0f84b50/userdata/pidfile -n ceph-f3005f84-239a-55b6-a948-8f1fb592b920-keepalived-rgw-default-compute-0-tytkrd --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/fb69e42829d45e3b674ff9bd3f3333c8c90dc07a3801eda65c7a0ef9a0f84b50 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/fb69e42829d45e3b674ff9bd3f3333c8c90dc07a3801eda65c7a0ef9a0f84b50/userdata/oci-log --conmon-pidfile /run/ceph-f3005f84-239a-55b6-a948-8f1fb592b920@keepalived.rgw.default.compute-0.tytkrd.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg fb69e42829d45e3b674ff9bd3f3333c8c90dc07a3801eda65c7a0ef9a0f84b50
           │ │ ├─ceph-f3005f84-239a-55b6-a948-8f1fb592b920@mds.cephfs.compute-0.ymknms.service
           │ │ │ ├─libpod-payload-e4542c7adce0f518b7f99d99679470337d13a224c21a637a7c2e54819c64d093
           │ │ │ │ ├─94626 /run/podman-init -- /usr/bin/ceph-mds -n mds.cephfs.compute-0.ymknms -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
           │ │ │ │ └─94628 /usr/bin/ceph-mds -n mds.cephfs.compute-0.ymknms -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
           │ │ │ └─runtime
           │ │ │   └─94624 /usr/bin/conmon --api-version 1 -c e4542c7adce0f518b7f99d99679470337d13a224c21a637a7c2e54819c64d093 -u e4542c7adce0f518b7f99d99679470337d13a224c21a637a7c2e54819c64d093 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/e4542c7adce0f518b7f99d99679470337d13a224c21a637a7c2e54819c64d093/userdata -p /run/containers/storage/overlay-containers/e4542c7adce0f518b7f99d99679470337d13a224c21a637a7c2e54819c64d093/userdata/pidfile -n ceph-f3005f84-239a-55b6-a948-8f1fb592b920-mds-cephfs-compute-0-ymknms --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/e4542c7adce0f518b7f99d99679470337d13a224c21a637a7c2e54819c64d093 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/e4542c7adce0f518b7f99d99679470337d13a224c21a637a7c2e54819c64d093/userdata/oci-log --conmon-pidfile /run/ceph-f3005f84-239a-55b6-a948-8f1fb592b920@mds.cephfs.compute-0.ymknms.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg e4542c7adce0f518b7f99d99679470337d13a224c21a637a7c2e54819c64d093
           │ │ ├─ceph-f3005f84-239a-55b6-a948-8f1fb592b920@mgr.compute-0.nbdygh.service
           │ │ │ ├─libpod-payload-e4a1c45f747e69af65041011d00875cfaaf16149f31875bd1585747dd24058b3
           │ │ │ │ ├─74631 /run/podman-init -- /usr/bin/ceph-mgr -n mgr.compute-0.nbdygh -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
           │ │ │ │ └─74633 /usr/bin/ceph-mgr -n mgr.compute-0.nbdygh -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
           │ │ │ └─runtime
           │ │ │   └─74629 /usr/bin/conmon --api-version 1 -c e4a1c45f747e69af65041011d00875cfaaf16149f31875bd1585747dd24058b3 -u e4a1c45f747e69af65041011d00875cfaaf16149f31875bd1585747dd24058b3 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/e4a1c45f747e69af65041011d00875cfaaf16149f31875bd1585747dd24058b3/userdata -p /run/containers/storage/overlay-containers/e4a1c45f747e69af65041011d00875cfaaf16149f31875bd1585747dd24058b3/userdata/pidfile -n ceph-f3005f84-239a-55b6-a948-8f1fb592b920-mgr-compute-0-nbdygh --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/e4a1c45f747e69af65041011d00875cfaaf16149f31875bd1585747dd24058b3 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/e4a1c45f747e69af65041011d00875cfaaf16149f31875bd1585747dd24058b3/userdata/oci-log --conmon-pidfile /run/ceph-f3005f84-239a-55b6-a948-8f1fb592b920@mgr.compute-0.nbdygh.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg e4a1c45f747e69af65041011d00875cfaaf16149f31875bd1585747dd24058b3
           │ │ ├─ceph-f3005f84-239a-55b6-a948-8f1fb592b920@mon.compute-0.service
           │ │ │ ├─libpod-payload-cbfd7f9a2ad9887ed3adf829b401ebf60f670e8aa91916ede409f75a12aeb3e6
           │ │ │ │ ├─74333 /run/podman-init -- /usr/bin/ceph-mon -n mon.compute-0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false
           │ │ │ │ └─74335 /usr/bin/ceph-mon -n mon.compute-0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false
           │ │ │ └─runtime
           │ │ │   └─74331 /usr/bin/conmon --api-version 1 -c cbfd7f9a2ad9887ed3adf829b401ebf60f670e8aa91916ede409f75a12aeb3e6 -u cbfd7f9a2ad9887ed3adf829b401ebf60f670e8aa91916ede409f75a12aeb3e6 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/cbfd7f9a2ad9887ed3adf829b401ebf60f670e8aa91916ede409f75a12aeb3e6/userdata -p /run/containers/storage/overlay-containers/cbfd7f9a2ad9887ed3adf829b401ebf60f670e8aa91916ede409f75a12aeb3e6/userdata/pidfile -n ceph-f3005f84-239a-55b6-a948-8f1fb592b920-mon-compute-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/cbfd7f9a2ad9887ed3adf829b401ebf60f670e8aa91916ede409f75a12aeb3e6 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/cbfd7f9a2ad9887ed3adf829b401ebf60f670e8aa91916ede409f75a12aeb3e6/userdata/oci-log --conmon-pidfile /run/ceph-f3005f84-239a-55b6-a948-8f1fb592b920@mon.compute-0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg cbfd7f9a2ad9887ed3adf829b401ebf60f670e8aa91916ede409f75a12aeb3e6
           │ │ ├─ceph-f3005f84-239a-55b6-a948-8f1fb592b920@node-exporter.compute-0.service
           │ │ │ ├─libpod-payload-97848d12ab6322cfe3cc805f7972048af088977ee2e693bcc0a5bb581613a0d8
           │ │ │ │ ├─89474 /run/podman-init -- /bin/node_exporter --no-collector.timex --web.listen-address=:9100 --path.procfs=/host/proc --path.sysfs=/host/sys --path.rootfs=/rootfs
           │ │ │ │ └─89477 /bin/node_exporter --no-collector.timex --web.listen-address=:9100 --path.procfs=/host/proc --path.sysfs=/host/sys --path.rootfs=/rootfs
           │ │ │ └─runtime
           │ │ │   └─89471 /usr/bin/conmon --api-version 1 -c 97848d12ab6322cfe3cc805f7972048af088977ee2e693bcc0a5bb581613a0d8 -u 97848d12ab6322cfe3cc805f7972048af088977ee2e693bcc0a5bb581613a0d8 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/97848d12ab6322cfe3cc805f7972048af088977ee2e693bcc0a5bb581613a0d8/userdata -p /run/containers/storage/overlay-containers/97848d12ab6322cfe3cc805f7972048af088977ee2e693bcc0a5bb581613a0d8/userdata/pidfile -n ceph-f3005f84-239a-55b6-a948-8f1fb592b920-node-exporter-compute-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/97848d12ab6322cfe3cc805f7972048af088977ee2e693bcc0a5bb581613a0d8 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/97848d12ab6322cfe3cc805f7972048af088977ee2e693bcc0a5bb581613a0d8/userdata/oci-log --conmon-pidfile /run/ceph-f3005f84-239a-55b6-a948-8f1fb592b920@node-exporter.compute-0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 97848d12ab6322cfe3cc805f7972048af088977ee2e693bcc0a5bb581613a0d8
           │ │ ├─ceph-f3005f84-239a-55b6-a948-8f1fb592b920@osd.1.service
           │ │ │ ├─libpod-payload-ba38de35226506cb699780f729ec895e86e90cec52f99c13abc1fc038212a39b
           │ │ │ │ ├─82639 /run/podman-init -- /usr/bin/ceph-osd -n osd.1 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
           │ │ │ │ └─82641 /usr/bin/ceph-osd -n osd.1 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
           │ │ │ └─runtime
           │ │ │   └─82637 /usr/bin/conmon --api-version 1 -c ba38de35226506cb699780f729ec895e86e90cec52f99c13abc1fc038212a39b -u ba38de35226506cb699780f729ec895e86e90cec52f99c13abc1fc038212a39b -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/ba38de35226506cb699780f729ec895e86e90cec52f99c13abc1fc038212a39b/userdata -p /run/containers/storage/overlay-containers/ba38de35226506cb699780f729ec895e86e90cec52f99c13abc1fc038212a39b/userdata/pidfile -n ceph-f3005f84-239a-55b6-a948-8f1fb592b920-osd-1 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/ba38de35226506cb699780f729ec895e86e90cec52f99c13abc1fc038212a39b --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/ba38de35226506cb699780f729ec895e86e90cec52f99c13abc1fc038212a39b/userdata/oci-log --conmon-pidfile /run/ceph-f3005f84-239a-55b6-a948-8f1fb592b920@osd.1.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg ba38de35226506cb699780f729ec895e86e90cec52f99c13abc1fc038212a39b
           │ │ ├─ceph-f3005f84-239a-55b6-a948-8f1fb592b920@prometheus.compute-0.service
           │ │ │ ├─libpod-payload-8d18c97a753c8610913f4f1be41e91ee0fa0045d3fad5bb17483ebd74168eedf
           │ │ │ │ ├─98643 /run/podman-init -- /bin/prometheus --config.file=/etc/prometheus/prometheus.yml --storage.tsdb.path=/prometheus --storage.tsdb.retention.time=15d --storage.tsdb.retention.size=0 --web.external-url=http://compute-0.ctlplane.example.com:9095 --web.listen-address=192.168.122.100:9095
           │ │ │ │ └─98645 /bin/prometheus --config.file=/etc/prometheus/prometheus.yml --storage.tsdb.path=/prometheus --storage.tsdb.retention.time=15d --storage.tsdb.retention.size=0 --web.external-url=http://compute-0.ctlplane.example.com:9095 --web.listen-address=192.168.122.100:9095
           │ │ │ └─runtime
           │ │ │   └─98641 /usr/bin/conmon --api-version 1 -c 8d18c97a753c8610913f4f1be41e91ee0fa0045d3fad5bb17483ebd74168eedf -u 8d18c97a753c8610913f4f1be41e91ee0fa0045d3fad5bb17483ebd74168eedf -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/8d18c97a753c8610913f4f1be41e91ee0fa0045d3fad5bb17483ebd74168eedf/userdata -p /run/containers/storage/overlay-containers/8d18c97a753c8610913f4f1be41e91ee0fa0045d3fad5bb17483ebd74168eedf/userdata/pidfile -n ceph-f3005f84-239a-55b6-a948-8f1fb592b920-prometheus-compute-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/8d18c97a753c8610913f4f1be41e91ee0fa0045d3fad5bb17483ebd74168eedf --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/8d18c97a753c8610913f4f1be41e91ee0fa0045d3fad5bb17483ebd74168eedf/userdata/oci-log --conmon-pidfile /run/ceph-f3005f84-239a-55b6-a948-8f1fb592b920@prometheus.compute-0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 8d18c97a753c8610913f4f1be41e91ee0fa0045d3fad5bb17483ebd74168eedf
           │ │ └─ceph-f3005f84-239a-55b6-a948-8f1fb592b920@rgw.rgw.compute-0.jbpfwf.service
           │ │   ├─libpod-payload-318a53d5c542e8639ae8cbe910fcc9d6c8a7c7006a978c4655d1d2582222973a
           │ │   │ ├─93746 /run/podman-init -- /usr/bin/radosgw -n client.rgw.rgw.compute-0.jbpfwf -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
           │ │   │ └─93748 /usr/bin/radosgw -n client.rgw.rgw.compute-0.jbpfwf -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
           │ │   └─runtime
           │ │     └─93744 /usr/bin/conmon --api-version 1 -c 318a53d5c542e8639ae8cbe910fcc9d6c8a7c7006a978c4655d1d2582222973a -u 318a53d5c542e8639ae8cbe910fcc9d6c8a7c7006a978c4655d1d2582222973a -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/318a53d5c542e8639ae8cbe910fcc9d6c8a7c7006a978c4655d1d2582222973a/userdata -p /run/containers/storage/overlay-containers/318a53d5c542e8639ae8cbe910fcc9d6c8a7c7006a978c4655d1d2582222973a/userdata/pidfile -n ceph-f3005f84-239a-55b6-a948-8f1fb592b920-rgw-rgw-compute-0-jbpfwf --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/318a53d5c542e8639ae8cbe910fcc9d6c8a7c7006a978c4655d1d2582222973a --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/318a53d5c542e8639ae8cbe910fcc9d6c8a7c7006a978c4655d1d2582222973a/userdata/oci-log --conmon-pidfile /run/ceph-f3005f84-239a-55b6-a948-8f1fb592b920@rgw.rgw.compute-0.jbpfwf.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 318a53d5c542e8639ae8cbe910fcc9d6c8a7c7006a978c4655d1d2582222973a
           │ ├─system-getty.slice
           │ │ └─getty@tty1.service
           │ │   └─1009 /sbin/agetty -o "-p -- \\u" --noclear - linux
           │ ├─system-serial\x2dgetty.slice
           │ │ └─serial-getty@ttyS0.service
           │ │   └─1010 /sbin/agetty -o "-p -- \\u" --keep-baud 115200,57600,38400,9600 - vt220
           │ ├─systemd-hostnamed.service
           │ │ └─292378 /usr/lib/systemd/systemd-hostnamed
           │ ├─systemd-journald.service
           │ │ └─675 /usr/lib/systemd/systemd-journald
           │ ├─systemd-logind.service
           │ │ └─784 /usr/lib/systemd/systemd-logind
           │ ├─systemd-machined.service
           │ │ └─216411 /usr/lib/systemd/systemd-machined
           │ ├─systemd-udevd.service
           │ │ └─udev
           │ │   └─727 /usr/lib/systemd/systemd-udevd
           │ ├─tuned.service
           │ │ └─109791 /usr/bin/python3 -Es /usr/sbin/tuned -l -P
           │ ├─virtlogd.service
           │ │ └─215777 /usr/sbin/virtlogd
           │ ├─virtnodedevd.service
           │ Unit boot.automount could not be found.
 └─248921 /usr/sbin/virtnodedevd --timeout 120
           │ └─virtqemud.service
           │   └─248554 /usr/sbin/virtqemud --timeout 120
           └─user.slice
             ├─user-1000.slice
             │ ├─session-1.scope
             │ │ └─4516 /usr/bin/python3
             │ ├─session-59.scope
             │ │ ├─289862 "sshd-session: zuul [priv]"
             │ │ ├─289865 "sshd-session: zuul@notty"
             │ │ ├─289866 sudo bash -c "rm -rf /var/tmp/sos-osp && mkdir /var/tmp/sos-osp && sos report --batch --all-logs --tmp-dir=/var/tmp/sos-osp  -p container,openstack_edpm,system,storage,virt"
             │ │ ├─289890 /usr/bin/python3 -s /sbin/sos report --batch --all-logs --tmp-dir=/var/tmp/sos-osp -p container,openstack_edpm,system,storage,virt
             │ │ ├─296659 timeout 15s turbostat --debug sleep 10
             │ │ ├─297005 timeout 300s ceph osd df --format json-pretty
             │ │ ├─297006 /usr/bin/python3 -s /usr/bin/ceph osd df --format json-pretty
             │ │ ├─297027 timeout 300s semanage boolean -l
             │ │ ├─297028 /usr/bin/python3 -EsI /usr/sbin/semanage boolean -l
             │ │ ├─297031 timeout 300s systemctl status --all
             │ │ └─297032 systemctl status --all
             │ └─user@1000.service
             │   ├─app.slice
             │   │ └─dbus-broker.service
             │   │   ├─11765 /usr/bin/dbus-broker-launch --scope user
             │   │   └─11778 dbus-broker --log 4 --controller 9 --machine-id 85ac68c10a6e7ae08ceb898dbdca0cb5 --max-bytes 100000000000000 --max-fds 25000000000000 --max-matches 5000000000
             │   ├─init.scope
             │   │ ├─4304 /usr/lib/systemd/systemd --user
             │   │ └─4306 "(sd-pam)"
             │   └─user.slice
             │     └─podman-pause-3184ab7f.scope
             │       └─11628 catatonit -P
             └─user-42477.slice
               ├─session-39.scope
               │ ├─99842 "sshd-session: ceph-admin [priv]"
               │ └─99848 "sshd-session: ceph-admin@notty"
               └─user@42477.service
                 └─init.scope
                   ├─90024 /usr/lib/systemd/systemd --user
                   └─90026 "(sd-pam)"

● proc-sys-fs-binfmt_misc.automount - Arbitrary Executable File Formats File System Automount Point
     Loaded: loaded (/usr/lib/systemd/system/proc-sys-fs-binfmt_misc.automount; static)
     Active: active (running) since Fri 2026-01-23 09:01:22 UTC; 1h 40min ago
      Until: Fri 2026-01-23 09:01:22 UTC; 1h 40min ago
   Triggers: ● proc-sys-fs-binfmt_misc.mount
      Where: /proc/sys/fs/binfmt_misc
       Docs: https://docs.kernel.org/admin-guide/binfmt-misc.html
             https://www.freedesktop.org/wiki/Software/systemd/APIFileSystems

Jan 23 09:48:27 compute-0 systemd[1]: proc-sys-fs-binfmt_misc.automount: Got automount request for /proc/sys/fs/binfmt_misc, triggered by 76688 (sysctl)

● dev-cdrom.device - QEMU_DVD-ROM config-2
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:01.1-ata1-host0-target0:0:0-0:0:0:0-block-sr0.device
     Loaded: loaded
     Active: active (plugged) since Fri 2026-01-23 09:01:23 UTC; 1h 40min ago
      Until: Fri 2026-01-23 09:01:23 UTC; 1h 40min ago
     Device: /sys/devices/pci0000:00/0000:00:01.1/ata1/host0/target0:0:0/0:0:0:0/block/sr0

● dev-ceph_vg0-ceph_lv0.device - /dev/ceph_vg0/ceph_lv0
    Follows: unit currently follows state of sys-devices-virtual-block-dm\x2d0.device
     Loaded: loaded
     Active: active (plugged) since Fri 2026-01-23 09:46:31 UTC; 54min ago
      Until: Fri 2026-01-23 09:46:31 UTC; 54min ago
     Device: /sys/devices/virtual/block/dm-0

● dev-disk-by\x2ddiskseq-1.device - /dev/disk/by-diskseq/1
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:04.0-virtio2-block-vda.device
     Loaded: loaded
     Active: active (plugged) since Fri 2026-01-23 09:01:23 UTC; 1h 40min ago
      Until: Fri 2026-01-23 09:01:23 UTC; 1h 40min ago
     Device: /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda

● dev-disk-by\x2ddiskseq-3.device - QEMU_DVD-ROM config-2
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:01.1-ata1-host0-target0:0:0-0:0:0:0-block-sr0.device
     Loaded: loaded
     Active: active (plugged) since Fri 2026-01-23 09:01:23 UTC; 1h 40min ago
      Until: Fri 2026-01-23 09:01:23 UTC; 1h 40min ago
     Device: /sys/devices/pci0000:00/0000:00:01.1/ata1/host0/target0:0:0/0:0:0:0/block/sr0

● dev-disk-by\x2ddiskseq-5.device - /dev/disk/by-diskseq/5
    Follows: unit currently follows state of sys-devices-virtual-block-loop3.device
     Loaded: loaded
     Active: active (plugged) since Fri 2026-01-23 09:46:31 UTC; 54min ago
      Until: Fri 2026-01-23 09:46:31 UTC; 54min ago
     Device: /sys/devices/virtual/block/loop3

● dev-disk-by\x2ddiskseq-6.device - /dev/disk/by-diskseq/6
    Follows: unit currently follows state of sys-devices-virtual-block-dm\x2d0.device
     Loaded: loaded
     Active: active (plugged) since Fri 2026-01-23 09:46:31 UTC; 54min ago
      Until: Fri 2026-01-23 09:46:31 UTC; 54min ago
     Device: /sys/devices/virtual/block/dm-0

● dev-disk-by\x2did-ata\x2dQEMU_DVD\x2dROM_QM00001.device - QEMU_DVD-ROM config-2
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:01.1-ata1-host0-target0:0:0-0:0:0:0-block-sr0.device
     Loaded: loaded
     Active: active (plugged) since Fri 2026-01-23 09:01:23 UTC; 1h 40min ago
      Until: Fri 2026-01-23 09:01:23 UTC; 1h 40min ago
     Device: /sys/devices/pci0000:00/0000:00:01.1/ata1/host0/target0:0:0/0:0:0:0/block/sr0

● dev-disk-by\x2did-dm\x2dname\x2dceph_vg0\x2dceph_lv0.device - /dev/disk/by-id/dm-name-ceph_vg0-ceph_lv0
    Follows: unit currently follows state of sys-devices-virtual-block-dm\x2d0.device
     Loaded: loaded
     Active: active (plugged) since Fri 2026-01-23 09:46:31 UTC; 54min ago
      Until: Fri 2026-01-23 09:46:31 UTC; 54min ago
     Device: /sys/devices/virtual/block/dm-0

● dev-disk-by\x2did-dm\x2duuid\x2dLVM\x2dTcMco8594bPhQjwmBYpsgOTKKJfW2V9EH5XBPEipFze91kJQfGqrZdSuIGjS3jpB.device - /dev/disk/by-id/dm-uuid-LVM-TcMco8594bPhQjwmBYpsgOTKKJfW2V9EH5XBPEipFze91kJQfGqrZdSuIGjS3jpB
    Follows: unit currently follows state of sys-devices-virtual-block-dm\x2d0.device
     Loaded: loaded
     Active: active (plugged) since Fri 2026-01-23 09:46:31 UTC; 54min ago
      Until: Fri 2026-01-23 09:46:31 UTC; 54min ago
     Device: /sys/devices/virtual/block/dm-0

● dev-disk-by\x2did-lvm\x2dpv\x2duuid\x2dy1xEXV\x2dhHR1\x2dUO5D\x2ds8kD\x2dfglb\x2dVdQO\x2d9MkInr.device - /dev/disk/by-id/lvm-pv-uuid-y1xEXV-hHR1-UO5D-s8kD-fglb-VdQO-9MkInr
    Follows: unit currently follows state of sys-devices-virtual-block-loop3.device
     Loaded: loaded
     Active: active (plugged) since Fri 2026-01-23 09:46:31 UTC; 54min ago
      Until: Fri 2026-01-23 09:46:31 UTC; 54min ago
     Device: /sys/devices/virtual/block/loop3

● dev-disk-by\x2dlabel-config\x2d2.device - QEMU_DVD-ROM config-2
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:01.1-ata1-host0-target0:0:0-0:0:0:0-block-sr0.device
     Loaded: loaded
     Active: active (plugged) since Fri 2026-01-23 09:01:23 UTC; 1h 40min ago
      Until: Fri 2026-01-23 09:01:23 UTC; 1h 40min ago
     Device: /sys/devices/pci0000:00/0000:00:01.1/ata1/host0/target0:0:0/0:0:0:0/block/sr0

● dev-disk-by\x2dpartuuid-97d3b354\x2d01.device - /dev/disk/by-partuuid/97d3b354-01
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:04.0-virtio2-block-vda-vda1.device
     Loaded: loaded
     Active: active (plugged) since Fri 2026-01-23 09:01:23 UTC; 1h 40min ago
      Until: Fri 2026-01-23 09:01:23 UTC; 1h 40min ago
     Device: /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda/vda1

● dev-disk-by\x2dpath-pci\x2d0000:00:01.1\x2data\x2d1.0.device - QEMU_DVD-ROM config-2
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:01.1-ata1-host0-target0:0:0-0:0:0:0-block-sr0.device
     Loaded: loaded
     Active: active (plugged) since Fri 2026-01-23 09:01:23 UTC; 1h 40min ago
      Until: Fri 2026-01-23 09:01:23 UTC; 1h 40min ago
     Device: /sys/devices/pci0000:00/0000:00:01.1/ata1/host0/target0:0:0/0:0:0:0/block/sr0

● dev-disk-by\x2dpath-pci\x2d0000:00:01.1\x2data\x2d1.device - QEMU_DVD-ROM config-2
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:01.1-ata1-host0-target0:0:0-0:0:0:0-block-sr0.device
     Loaded: loaded
     Active: active (plugged) since Fri 2026-01-23 09:01:23 UTC; 1h 40min ago
      Until: Fri 2026-01-23 09:01:23 UTC; 1h 40min ago
     Device: /sys/devices/pci0000:00/0000:00:01.1/ata1/host0/target0:0:0/0:0:0:0/block/sr0

● dev-disk-by\x2dpath-pci\x2d0000:00:04.0.device - /dev/disk/by-path/pci-0000:00:04.0
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:04.0-virtio2-block-vda.device
     Loaded: loaded
     Active: active (plugged) since Fri 2026-01-23 09:01:23 UTC; 1h 40min ago
      Until: Fri 2026-01-23 09:01:23 UTC; 1h 40min ago
     Device: /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda

● dev-disk-by\x2dpath-pci\x2d0000:00:04.0\x2dpart1.device - /dev/disk/by-path/pci-0000:00:04.0-part1
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:04.0-virtio2-block-vda-vda1.device
     Loaded: loaded
     Active: active (plugged) since Fri 2026-01-23 09:01:23 UTC; 1h 40min ago
      Until: Fri 2026-01-23 09:01:23 UTC; 1h 40min ago
     Device: /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda/vda1

● dev-disk-by\x2dpath-virtio\x2dpci\x2d0000:00:04.0.device - /dev/disk/by-path/virtio-pci-0000:00:04.0
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:04.0-virtio2-block-vda.device
     Loaded: loaded
     Active: active (plugged) since Fri 2026-01-23 09:01:23 UTC; 1h 40min ago
      Until: Fri 2026-01-23 09:01:23 UTC; 1h 40min ago
     Device: /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda

● dev-disk-by\x2dpath-virtio\x2dpci\x2d0000:00:04.0\x2dpart1.device - /dev/disk/by-path/virtio-pci-0000:00:04.0-part1
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:04.0-virtio2-block-vda-vda1.device
     Loaded: loaded
     Active: active (plugged) since Fri 2026-01-23 09:01:23 UTC; 1h 40min ago
      Until: Fri 2026-01-23 09:01:23 UTC; 1h 40min ago
     Device: /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda/vda1

● dev-disk-by\x2duuid-2026\x2d01\x2d23\x2d09\x2d01\x2d08\x2d00.device - QEMU_DVD-ROM config-2
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:01.1-ata1-host0-target0:0:0-0:0:0:0-block-sr0.device
     Loaded: loaded
     Active: active (plugged) since Fri 2026-01-23 09:01:23 UTC; 1h 40min ago
      Until: Fri 2026-01-23 09:01:23 UTC; 1h 40min ago
     Device: /sys/devices/pci0000:00/0000:00:01.1/ata1/host0/target0:0:0/0:0:0:0/block/sr0

● dev-disk-by\x2duuid-22ac9141\x2d3960\x2d4912\x2db20e\x2d19fc8a328d40.device - /dev/disk/by-uuid/22ac9141-3960-4912-b20e-19fc8a328d40
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:04.0-virtio2-block-vda-vda1.device
     Loaded: loaded
     Active: active (plugged) since Fri 2026-01-23 09:01:23 UTC; 1h 40min ago
      Until: Fri 2026-01-23 09:01:23 UTC; 1h 40min ago
     Device: /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda/vda1

Jan 23 09:01:21 localhost systemd[1]: Found device /dev/disk/by-uuid/22ac9141-3960-4912-b20e-19fc8a328d40.

● dev-dm\x2d0.device - /dev/dm-0
    Follows: unit currently follows state of sys-devices-virtual-block-dm\x2d0.device
     Loaded: loaded
     Active: active (plugged) since Fri 2026-01-23 09:46:31 UTC; 54min ago
      Until: Fri 2026-01-23 09:46:31 UTC; 54min ago
     Device: /sys/devices/virtual/block/dm-0

● dev-loop3.device - /dev/loop3
    Follows: unit currently follows state of sys-devices-virtual-block-loop3.device
     Loaded: loaded
     Active: active (plugged) since Fri 2026-01-23 09:46:31 UTC; 54min ago
      Until: Fri 2026-01-23 09:46:31 UTC; 54min ago
     Device: /sys/devices/virtual/block/loop3

● dev-mapper-ceph_vg0\x2dceph_lv0.device - /dev/mapper/ceph_vg0-ceph_lv0
    Follows: unit currently follows state of sys-devices-virtual-block-dm\x2d0.device
     Loaded: loaded
     Active: active (plugged) since Fri 2026-01-23 09:46:31 UTC; 54min ago
      Until: Fri 2026-01-23 09:46:31 UTC; 54min ago
     Device: /sys/devices/virtual/block/dm-0

● dev-rfkill.device - /dev/rfkill
    Follows: unit currently follows state of sys-devices-virtual-misc-rfkill.device
     Loaded: loaded
     Active: active (plugged) since Fri 2026-01-23 09:01:24 UTC; 1h 40min ago
      Until: Fri 2026-01-23 09:01:24 UTC; 1h 40min ago
     Device: /sys/devices/virtual/misc/rfkill

● dev-sr0.device - QEMU_DVD-ROM config-2
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:01.1-ata1-host0-target0:0:0-0:0:0:0-block-sr0.device
     Loaded: loaded
     Active: active (plugged) since Fri 2026-01-23 09:01:23 UTC; 1h 40min ago
      Until: Fri 2026-01-23 09:01:23 UTC; 1h 40min ago
     Device: /sys/devices/pci0000:00/0000:00:01.1/ata1/host0/target0:0:0/0:0:0:0/block/sr0

● dev-ttyS0.device - /dev/ttyS0
    Follows: unit currently follows state of sys-devices-pnp0-00:00-tty-ttyS0.device
     Loaded: loaded
     Active: active (plugged) since Fri 2026-01-23 09:01:23 UTC; 1h 40min ago
      Until: Fri 2026-01-23 09:01:23 UTC; 1h 40min ago
     Device: /sys/devices/pnp0/00:00/tty/ttyS0

Jan 23 09:01:23 localhost systemd[1]: Condition check resulted in /dev/ttyS0 being skipped.

● dev-ttyS1.device - /dev/ttyS1
    Follows: unit currently follows state of sys-devices-platform-serial8250-tty-ttyS1.device
     Loaded: loaded
     Active: active (plugged) since Fri 2026-01-23 09:01:23 UTC; 1h 40min ago
      Until: Fri 2026-01-23 09:01:23 UTC; 1h 40min ago
     Device: /sys/devices/platform/serial8250/tty/ttyS1

● dev-ttyS2.device - /dev/ttyS2
    Follows: unit currently follows state of sys-devices-platform-serial8250-tty-ttyS2.device
     Loaded: loaded
     Active: active (plugged) since Fri 2026-01-23 09:01:23 UTC; 1h 40min ago
      Until: Fri 2026-01-23 09:01:23 UTC; 1h 40min ago
     Device: /sys/devices/platform/serial8250/tty/ttyS2

● dev-ttyS3.device - /dev/ttyS3
    Follows: unit currently follows state of sys-devices-platform-serial8250-tty-ttyS3.device
     Loaded: loaded
     Active: active (plugged) since Fri 2026-01-23 09:01:23 UTC; 1h 40min ago
      Until: Fri 2026-01-23 09:01:23 UTC; 1h 40min ago
     Device: /sys/devices/platform/serial8250/tty/ttyS3

● dev-vda.device - /dev/vda
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:04.0-virtio2-block-vda.device
     Loaded: loaded
     Active: active (plugged) since Fri 2026-01-23 09:01:23 UTC; 1h 40min ago
      Until: Fri 2026-01-23 09:01:23 UTC; 1h 40min ago
     Device: /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda

● dev-vda1.device - /dev/vda1
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:04.0-virtio2-block-vda-vda1.device
     Loaded: loaded
     Active: active (plugged) since Fri 2026-01-23 09:01:23 UTC; 1h 40min ago
      Until: Fri 2026-01-23 09:01:23 UTC; 1h 40min ago
     Device: /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda/vda1

● sys-devices-pci0000:00-0000:00:01.1-ata1-host0-target0:0:0-0:0:0:0-block-sr0.device - QEMU_DVD-ROM config-2
     Loaded: loaded
     Active: active (plugged) since Fri 2026-01-23 09:01:23 UTC; 1h 40min ago
      Until: Fri 2026-01-23 09:01:23 UTC; 1h 40min ago
     Device: /sys/devices/pci0000:00/0000:00:01.1/ata1/host0/target0:0:0/0:0:0:0/block/sr0

● sys-devices-pci0000:00-0000:00:03.0-virtio1-net-eth0.device - Virtio network device
     Loaded: loaded
     Active: active (plugged) since Fri 2026-01-23 09:01:23 UTC; 1h 40min ago
      Until: Fri 2026-01-23 09:01:23 UTC; 1h 40min ago
     Device: /sys/devices/pci0000:00/0000:00:03.0/virtio1/net/eth0

● sys-devices-pci0000:00-0000:00:04.0-virtio2-block-vda-vda1.device - /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda/vda1
     Loaded: loaded
     Active: active (plugged) since Fri 2026-01-23 09:01:23 UTC; 1h 40min ago
      Until: Fri 2026-01-23 09:01:23 UTC; 1h 40min ago
     Device: /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda/vda1

● sys-devices-pci0000:00-0000:00:04.0-virtio2-block-vda.device - /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda
     Loaded: loaded
     Active: active (plugged) since Fri 2026-01-23 09:01:23 UTC; 1h 40min ago
      Until: Fri 2026-01-23 09:01:23 UTC; 1h 40min ago
     Device: /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda

● sys-devices-pci0000:00-0000:00:07.0-virtio5-net-eth1.device - Virtio network device
     Loaded: loaded
     Active: active (plugged) since Fri 2026-01-23 09:03:30 UTC; 1h 37min ago
      Until: Fri 2026-01-23 09:03:30 UTC; 1h 37min ago
     Device: /sys/devices/pci0000:00/0000:00:07.0/virtio5/net/eth1

● sys-devices-platform-serial8250-tty-ttyS1.device - /sys/devices/platform/serial8250/tty/ttyS1
     Loaded: loaded
     Active: active (plugged) since Fri 2026-01-23 09:01:23 UTC; 1h 40min ago
      Until: Fri 2026-01-23 09:01:23 UTC; 1h 40min ago
     Device: /sys/devices/platform/serial8250/tty/ttyS1

● sys-devices-platform-serial8250-tty-ttyS2.device - /sys/devices/platform/serial8250/tty/ttyS2
     Loaded: loaded
     Active: active (plugged) since Fri 2026-01-23 09:01:23 UTC; 1h 40min ago
      Until: Fri 2026-01-23 09:01:23 UTC; 1h 40min ago
     Device: /sys/devices/platform/serial8250/tty/ttyS2

● sys-devices-platform-serial8250-tty-ttyS3.device - /sys/devices/platform/serial8250/tty/ttyS3
     Loaded: loaded
     Active: active (plugged) since Fri 2026-01-23 09:01:23 UTC; 1h 40min ago
      Until: Fri 2026-01-23 09:01:23 UTC; 1h 40min ago
     Device: /sys/devices/platform/serial8250/tty/ttyS3

● sys-devices-pnp0-00:00-tty-ttyS0.device - /sys/devices/pnp0/00:00/tty/ttyS0
     Loaded: loaded
     Active: active (plugged) since Fri 2026-01-23 09:01:23 UTC; 1h 40min ago
      Until: Fri 2026-01-23 09:01:23 UTC; 1h 40min ago
     Device: /sys/devices/pnp0/00:00/tty/ttyS0

● sys-devices-virtual-block-dm\x2d0.device - /sys/devices/virtual/block/dm-0
     Loaded: loaded
     Active: active (plugged) since Fri 2026-01-23 09:46:31 UTC; 54min ago
      Until: Fri 2026-01-23 09:46:31 UTC; 54min ago
     Device: /sys/devices/virtual/block/dm-0

● sys-devices-virtual-block-loop3.device - /sys/devices/virtual/block/loop3
     Loaded: loaded
     Active: active (plugged) since Fri 2026-01-23 09:46:31 UTC; 54min ago
      Until: Fri 2026-01-23 09:46:31 UTC; 54min ago
     Device: /sys/devices/virtual/block/loop3

● sys-devices-virtual-misc-rfkill.device - /sys/devices/virtual/misc/rfkill
     Loaded: loaded
     Active: active (plugged) since Fri 2026-01-23 09:01:24 UTC; 1h 40min ago
      Until: Fri 2026-01-23 09:01:24 UTC; 1h 40min ago
     Device: /sys/devices/virtual/misc/rfkill

● sys-devices-virtual-net-br\x2dex.device - /sys/devices/virtual/net/br-ex
     Loaded: loaded
     Active: active (plugged) since Fri 2026-01-23 09:43:14 UTC; 58min ago
      Until: Fri 2026-01-23 09:43:14 UTC; 58min ago
     Device: /sys/devices/virtual/net/br-ex

● sys-devices-virtual-net-br\x2dint.device - /sys/devices/virtual/net/br-int
     Loaded: loaded
     Active: active (plugged) since Fri 2026-01-23 10:01:35 UTC; 39min ago
      Until: Fri 2026-01-23 10:01:35 UTC; 39min ago
     Device: /sys/devices/virtual/net/br-int

● sys-devices-virtual-net-genev_sys_6081.device - /sys/devices/virtual/net/genev_sys_6081
     Loaded: loaded
     Active: active (plugged) since Fri 2026-01-23 10:01:35 UTC; 39min ago
      Until: Fri 2026-01-23 10:01:35 UTC; 39min ago
     Device: /sys/devices/virtual/net/genev_sys_6081

● sys-devices-virtual-net-ovs\x2dsystem.device - /sys/devices/virtual/net/ovs-system
     Loaded: loaded
     Active: active (plugged) since Fri 2026-01-23 09:43:14 UTC; 58min ago
      Until: Fri 2026-01-23 09:43:14 UTC; 58min ago
     Device: /sys/devices/virtual/net/ovs-system

● sys-devices-virtual-net-vlan20.device - /sys/devices/virtual/net/vlan20
     Loaded: loaded
     Active: active (plugged) since Fri 2026-01-23 09:43:14 UTC; 58min ago
      Until: Fri 2026-01-23 09:43:14 UTC; 58min ago
     Device: /sys/devices/virtual/net/vlan20

● sys-devices-virtual-net-vlan21.device - /sys/devices/virtual/net/vlan21
     Loaded: loaded
     Active: active (plugged) since Fri 2026-01-23 09:43:14 UTC; 58min ago
      Until: Fri 2026-01-23 09:43:14 UTC; 58min ago
     Device: /sys/devices/virtual/net/vlan21

● sys-devices-virtual-net-vlan22.device - /sys/devices/virtual/net/vlan22
     Loaded: loaded
     Active: active (plugged) since Fri 2026-01-23 09:43:14 UTC; 58min ago
      Until: Fri 2026-01-23 09:43:14 UTC; 58min ago
     Device: /sys/devices/virtual/net/vlan22

● sys-devices-virtual-net-vlan23.device - /sys/devices/virtual/net/vlan23
     Loaded: loaded
     Active: active (plugged) since Fri 2026-01-23 09:43:14 UTC; 58min ago
      Until: Fri 2026-01-23 09:43:14 UTC; 58min ago
     Device: /sys/devices/virtual/net/vlan23

● sys-module-configfs.device - /sys/module/configfs
     Loaded: loaded
     Active: active (plugged) since Fri 2026-01-23 09:01:23 UTC; 1h 40min ago
      Until: Fri 2026-01-23 09:01:23 UTC; 1h 40min ago
     Device: /sys/module/configfs

● sys-module-fuse.device - /sys/module/fuse
     Loaded: loaded
     Active: active (plugged) since Fri 2026-01-23 09:01:23 UTC; 1h 40min ago
      Until: Fri 2026-01-23 09:01:23 UTC; 1h 40min ago
     Device: /sys/module/fuse

● sys-subsystem-net-devices-br\x2dex.device - /sys/subsystem/net/devices/br-ex
     Loaded: loaded
     Active: active (plugged) since Fri 2026-01-23 09:43:14 UTC; 58min ago
      Until: Fri 2026-01-23 09:43:14 UTC; 58min ago
     Device: /sys/devices/virtual/net/br-ex

● sys-subsystem-net-devices-br\x2dint.device - /sys/subsystem/net/devices/br-int
     Loaded: loaded
     Active: active (plugged) since Fri 2026-01-23 10:01:35 UTC; 39min ago
      Until: Fri 2026-01-23 10:01:35 UTC; 39min ago
     Device: /sys/devices/virtual/net/br-int

● sys-subsystem-net-devices-eth0.device - Virtio network device
     Loaded: loaded
     Active: active (plugged) since Fri 2026-01-23 09:01:23 UTC; 1h 40min ago
      Until: Fri 2026-01-23 09:01:23 UTC; 1h 40min ago
     Device: /sys/devices/pci0000:00/0000:00:03.0/virtio1/net/eth0

● sys-subsystem-net-devices-eth1.device - Virtio network device
     Loaded: loaded
     Active: active (plugged) since Fri 2026-01-23 09:03:30 UTC; 1h 37min ago
      Until: Fri 2026-01-23 09:03:30 UTC; 1h 37min ago
     Device: /sys/devices/pci0000:00/0000:00:07.0/virtio5/net/eth1

● sys-subsystem-net-devices-genev_sys_6081.device - /sys/subsystem/net/devices/genev_sys_6081
     Loaded: loaded
     Active: active (plugged) since Fri 2026-01-23 10:01:35 UTC; 39min ago
      Until: Fri 2026-01-23 10:01:35 UTC; 39min ago
     Device: /sys/devices/virtual/net/genev_sys_6081

● sys-subsystem-net-devices-ovs\x2dsystem.device - /sys/subsystem/net/devices/ovs-system
     Loaded: loaded
     Active: active (plugged) since Fri 2026-01-23 09:43:14 UTC; 58min ago
      Until: Fri 2026-01-23 09:43:14 UTC; 58min ago
     Device: /sys/devices/virtual/net/ovs-system

● sys-subsystem-net-devices-vlan20.device - /sys/subsystem/net/devices/vlan20
     Loaded: loaded
     Active: active (plugged) since Fri 2026-01-23 09:43:14 UTC; 58min ago
      Until: Fri 2026-01-23 09:43:14 UTC; 58min ago
     Device: /sys/devices/virtual/net/vlan20

● sys-subsystem-net-devices-vlan21.device - /sys/subsystem/net/devices/vlan21
     Loaded: loaded
     Active: active (plugged) since Fri 2026-01-23 09:43:14 UTC; 58min ago
      Until: Fri 2026-01-23 09:43:14 UTC; 58min ago
     Device: /sys/devices/virtual/net/vlan21

● sys-subsystem-net-devices-vlan22.device - /sys/subsystem/net/devices/vlan22
     Loaded: loaded
     Active: active (plugged) since Fri 2026-01-23 09:43:14 UTC; 58min ago
      Until: Fri 2026-01-23 09:43:14 UTC; 58min ago
     Device: /sys/devices/virtual/net/vlan22

● sys-subsystem-net-Unit boot.mount could not be found.
Unit home.mount could not be found.
devices-vlan23.device - /sys/subsystem/net/devices/vlan23
     Loaded: loaded
     Active: active (plugged) since Fri 2026-01-23 09:43:14 UTC; 58min ago
      Until: Fri 2026-01-23 09:43:14 UTC; 58min ago
     Device: /sys/devices/virtual/net/vlan23

● -.mount - Root Mount
     Loaded: loaded (/etc/fstab; generated)
     Active: active (mounted)
      Where: /
       What: /dev/vda1
       Docs: man:fstab(5)
             man:systemd-fstab-generator(8)

● dev-hugepages.mount - Huge Pages File System
     Loaded: loaded (/usr/lib/systemd/system/dev-hugepages.mount; static)
     Active: active (mounted) since Fri 2026-01-23 09:01:23 UTC; 1h 40min ago
      Until: Fri 2026-01-23 09:01:23 UTC; 1h 40min ago
      Where: /dev/hugepages
       What: hugetlbfs
       Docs: https://docs.kernel.org/admin-guide/mm/hugetlbpage.html
             https://www.freedesktop.org/wiki/Software/systemd/APIFileSystems
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48560)
     Memory: 20.0K (peak: 324.0K)
        CPU: 11ms
     CGroup: /dev-hugepages.mount

● dev-hugepages1G.mount - /dev/hugepages1G
     Loaded: loaded (/etc/fstab; generated)
     Active: active (mounted) since Fri 2026-01-23 09:45:30 UTC; 55min ago
      Until: Fri 2026-01-23 09:45:30 UTC; 55min ago
      Where: /dev/hugepages1G
       What: none
       Docs: man:fstab(5)
             man:systemd-fstab-generator(8)

● dev-hugepages2M.mount - /dev/hugepages2M
     Loaded: loaded (/etc/fstab; generated)
     Active: active (mounted) since Fri 2026-01-23 09:45:31 UTC; 55min ago
      Until: Fri 2026-01-23 09:45:31 UTC; 55min ago
      Where: /dev/hugepages2M
       What: none
       Docs: man:fstab(5)
             man:systemd-fstab-generator(8)

● dev-mqueue.mount - POSIX Message Queue File System
     Loaded: loaded (/usr/lib/systemd/system/dev-mqueue.mount; static)
     Active: active (mounted) since Fri 2026-01-23 09:01:23 UTC; 1h 40min ago
      Until: Fri 2026-01-23 09:01:23 UTC; 1h 40min ago
      Where: /dev/mqueue
       What: mqueue
       Docs: man:mq_overview(7)
             https://www.freedesktop.org/wiki/Software/systemd/APIFileSystems
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48560)
     Memory: 36.0K (peak: 536.0K)
        CPU: 3ms
     CGroup: /dev-mqueue.mount

Jan 23 09:01:23 localhost systemd[1]: Mounted POSIX Message Queue File System.

○ proc-fs-nfsd.mount - NFSD configuration filesystem
     Loaded: loaded (/usr/lib/systemd/system/proc-fs-nfsd.mount; static)
     Active: inactive (dead)
      Where: /proc/fs/nfsd
       What: nfsd

● proc-sys-fs-binfmt_misc.mount - Arbitrary Executable File Formats File System
     Loaded: loaded (/usr/lib/systemd/system/proc-sys-fs-binfmt_misc.mount; disabled; preset: disabled)
     Active: active (mounted) since Fri 2026-01-23 09:48:27 UTC; 52min ago
      Until: Fri 2026-01-23 09:48:27 UTC; 52min ago
TriggeredBy: ● proc-sys-fs-binfmt_misc.automount
      Where: /proc/sys/fs/binfmt_misc
       What: binfmt_misc
       Docs: https://docs.kernel.org/admin-guide/binfmt-misc.html
             https://www.freedesktop.org/wiki/Software/systemd/APIFileSystems
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48560)
     Memory: 8.0K (peak: 548.0K)
        CPU: 9ms
     CGroup: /proc-sys-fs-binfmt_misc.mount

Jan 23 09:48:27 compute-0 systemd[1]: Mounting Arbitrary Executable File Formats File System...
Jan 23 09:48:27 compute-0 systemd[1]: Mounted Arbitrary Executable File Formats File System.

● run-credentials-systemd\x2dsysctl.service.mount - /run/credentials/systemd-sysctl.service
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Fri 2026-01-23 09:41:39 UTC; 59min ago
      Until: Fri 2026-01-23 09:41:39 UTC; 59min ago
      Where: /run/credentials/systemd-sysctl.service
       What: none

● run-credentials-systemd\x2dsysusers.service.mount - /run/credentials/systemd-sysusers.service
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Fri 2026-01-23 09:01:23 UTC; 1h 40min ago
      Until: Fri 2026-01-23 09:01:23 UTC; 1h 40min ago
      Where: /run/credentials/systemd-sysusers.service
       What: none

● run-credentials-systemd\x2dtmpfiles\x2dsetup.service.mount - /run/credentials/systemd-tmpfiles-setup.service
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Fri 2026-01-23 09:01:24 UTC; 1h 40min ago
      Until: Fri 2026-01-23 09:01:24 UTC; 1h 40min ago
      Where: /run/credentials/systemd-tmpfiles-setup.service
       What: none

● run-credentials-systemd\x2dtmpfiles\x2dsetup\x2ddev.service.mount - /run/credentials/systemd-tmpfiles-setup-dev.service
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Fri 2026-01-23 09:01:23 UTC; 1h 40min ago
      Until: Fri 2026-01-23 09:01:23 UTC; 1h 40min ago
      Where: /run/credentials/systemd-tmpfiles-setup-dev.service
       What: none

● run-netns.mount - /run/netns
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Fri 2026-01-23 09:44:55 UTC; 56min ago
      Until: Fri 2026-01-23 09:44:55 UTC; 56min ago
      Where: /run/netns
       What: tmpfs

● run-user-1000.mount - /run/user/1000
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Fri 2026-01-23 09:01:44 UTC; 1h 39min ago
      Until: Fri 2026-01-23 09:01:44 UTC; 1h 39min ago
      Where: /run/user/1000
       What: tmpfs

● run-user-42477.mount - /run/user/42477
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Fri 2026-01-23 09:51:33 UTC; 49min ago
      Until: Fri 2026-01-23 09:51:33 UTC; 49min ago
      Where: /run/user/42477
       What: tmpfs

● sys-fs-fuse-connections.mount - FUSE Control File System
     Loaded: loaded (/usr/lib/systemd/system/sys-fs-fuse-connections.mount; static)
     Active: active (mounted) since Fri 2026-01-23 09:01:23 UTC; 1h 40min ago
      Until: Fri 2026-01-23 09:01:23 UTC; 1h 40min ago
      Where: /sys/fs/fuse/connections
       What: fusectl
       Docs: https://docs.kernel.org/filesystems/fuse.html
             https://www.freedesktop.org/wiki/Software/systemd/APIFileSystems
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48560)
     Memory: 4.0K (peak: 540.0K)
        CPU: 3ms
     CGroup: /sys-fs-fuse-connections.mount

Jan 23 09:01:23 localhost systemd[1]: Mounting FUSE Control File System...
Jan 23 09:01:23 localhost systemd[1]: Mounted FUSE Control File System.

● sys-kernel-config.mount - Kernel Configuration File System
     Loaded: loaded (/proc/self/mountinfo; static)
     Active: active (mounted) since Fri 2026-01-23 09:01:22 UTC; 1h 40min ago
      Until: Fri 2026-01-23 09:01:22 UTC; 1h 40min ago
      Where: /sys/kernel/config
       What: configfs
       Docs: https://docs.kernel.org/filesystems/configfs.html
             https://www.freedesktop.org/wiki/Software/systemd/APIFileSystems

● sys-kernel-debug-tracing.mount - /sys/kernel/debug/tracing
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Fri 2026-01-23 10:30:05 UTC; 11min ago
      Until: Fri 2026-01-23 10:30:05 UTC; 11min ago
      Where: /sys/kernel/debug/tracing
       What: tracefs

● sys-kernel-debug.mount - Kernel Debug File System
     Loaded: loaded (/usr/lib/systemd/system/sys-kernel-debug.mount; static)
     Active: active (mounted) since Fri 2026-01-23 09:01:23 UTC; 1h 40min ago
      Until: Fri 2026-01-23 09:01:23 UTC; 1h 40min ago
      Where: /sys/kernel/debug
       What: debugfs
       Docs: https://docs.kernel.org/filesystems/debugfs.html
             https://www.freedesktop.org/wiki/Software/systemd/APIFileSystems
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48560)
     Memory: 4.0K (peak: 540.0K)
        CPU: 5ms
     CGroup: /sys-kernel-debug.mount

Jan 23 09:01:23 localhost systemd[1]: Mounted Kernel Debug File System.

● sys-kernel-tracing.mount - Kernel Trace File System
     Loaded: loaded (/usr/lib/systemd/system/sys-kernel-tracing.mount; static)
     Active: active (mounted) since Fri 2026-01-23 09:01:23 UTC; 1h 40min ago
      Until: Fri 2026-01-23 09:01:23 UTC; 1h 40min ago
      Where: /sys/kerUnit sysroot.mount could not be found.
nel/tracing
       What: tracefs
       Docs: https://docs.kernel.org/trace/ftrace.html
             https://www.freedesktop.org/wiki/Software/systemd/APIFileSystems
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48560)
     Memory: 4.0K (peak: 540.0K)
        CPU: 2ms
     CGroup: /sys-kernel-tracing.mount

Jan 23 09:01:23 localhost systemd[1]: Mounted Kernel Trace File System.

○ tmp.mount - Temporary Directory /tmp
     Loaded: loaded (/usr/lib/systemd/system/tmp.mount; disabled; preset: disabled)
     Active: inactive (dead)
      Where: /tmp
       What: tmpfs
       Docs: https://systemd.io/TEMPORARY_DIRECTORIES
             man:file-hierarchy(7)
             https://www.freedesktop.org/wiki/Software/systemd/APIFileSystems

● var-lib-containers-storage-overlay-01e632e93d849c0bb7e7946a5321ac39dd650986526d3ee8477ae82295cdb153-merged.mount - /var/lib/containers/storage/overlay/01e632e93d849c0bb7e7946a5321ac39dd650986526d3ee8477ae82295cdb153/merged
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Fri 2026-01-23 09:51:16 UTC; 50min ago
      Until: Fri 2026-01-23 09:51:16 UTC; 50min ago
      Where: /var/lib/containers/storage/overlay/01e632e93d849c0bb7e7946a5321ac39dd650986526d3ee8477ae82295cdb153/merged
       What: overlay

● var-lib-containers-storage-overlay-1d5ce19beb3067af916dd9233a0d589cc81624dc4a3b24baf129b085879854bc-merged.mount - /var/lib/containers/storage/overlay/1d5ce19beb3067af916dd9233a0d589cc81624dc4a3b24baf129b085879854bc/merged
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Fri 2026-01-23 09:47:42 UTC; 53min ago
      Until: Fri 2026-01-23 09:47:42 UTC; 53min ago
      Where: /var/lib/containers/storage/overlay/1d5ce19beb3067af916dd9233a0d589cc81624dc4a3b24baf129b085879854bc/merged
       What: overlay

● var-lib-containers-storage-overlay-1f796cc95f534e6a84bd0a7e0228828397ca5e9569b95c8d09d6527edf64deca-merged.mount - /var/lib/containers/storage/overlay/1f796cc95f534e6a84bd0a7e0228828397ca5e9569b95c8d09d6527edf64deca/merged
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Fri 2026-01-23 09:53:25 UTC; 48min ago
      Until: Fri 2026-01-23 09:53:25 UTC; 48min ago
      Where: /var/lib/containers/storage/overlay/1f796cc95f534e6a84bd0a7e0228828397ca5e9569b95c8d09d6527edf64deca/merged
       What: overlay

● var-lib-containers-storage-overlay-242a63e32ddb7bea9e5703add0577d3c52d4285ce1ab2ed72eb9293ba57f4e99-merged.mount - /var/lib/containers/storage/overlay/242a63e32ddb7bea9e5703add0577d3c52d4285ce1ab2ed72eb9293ba57f4e99/merged
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Fri 2026-01-23 10:02:56 UTC; 38min ago
      Until: Fri 2026-01-23 10:02:56 UTC; 38min ago
      Where: /var/lib/containers/storage/overlay/242a63e32ddb7bea9e5703add0577d3c52d4285ce1ab2ed72eb9293ba57f4e99/merged
       What: overlay

● var-lib-containers-storage-overlay-2500fc458994a630ad6b62e3e8adcc094e3bd5300632239ebceb55ebf49962e3-merged.mount - /var/lib/containers/storage/overlay/2500fc458994a630ad6b62e3e8adcc094e3bd5300632239ebceb55ebf49962e3/merged
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Fri 2026-01-23 09:47:45 UTC; 53min ago
      Until: Fri 2026-01-23 09:47:45 UTC; 53min ago
      Where: /var/lib/containers/storage/overlay/2500fc458994a630ad6b62e3e8adcc094e3bd5300632239ebceb55ebf49962e3/merged
       What: overlay

● var-lib-containers-storage-overlay-2790d82b840c49fa2382f0302c8c48615986546c1a8b085b001f0948f4ffe473-merged.mount - /var/lib/containers/storage/overlay/2790d82b840c49fa2382f0302c8c48615986546c1a8b085b001f0948f4ffe473/merged
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Fri 2026-01-23 09:52:33 UTC; 48min ago
      Until: Fri 2026-01-23 09:52:33 UTC; 48min ago
      Where: /var/lib/containers/storage/overlay/2790d82b840c49fa2382f0302c8c48615986546c1a8b085b001f0948f4ffe473/merged
       What: overlay

● var-lib-containers-storage-overlay-42ef0523dc11947d0337c8489f20321b791f8fc891e68acd6e98eafaa7caa0d4-merged.mount - /var/lib/containers/storage/overlay/42ef0523dc11947d0337c8489f20321b791f8fc891e68acd6e98eafaa7caa0d4/merged
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Fri 2026-01-23 10:01:33 UTC; 39min ago
      Until: Fri 2026-01-23 10:01:33 UTC; 39min ago
      Where: /var/lib/containers/storage/overlay/42ef0523dc11947d0337c8489f20321b791f8fc891e68acd6e98eafaa7caa0d4/merged
       What: overlay

● var-lib-containers-storage-overlay-4f6c22edc10962e45899f80643a75fd409e4432e6192b478a2ee6ad5d9f42119-merged.mount - /var/lib/containers/storage/overlay/4f6c22edc10962e45899f80643a75fd409e4432e6192b478a2ee6ad5d9f42119/merged
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Fri 2026-01-23 09:54:46 UTC; 46min ago
      Until: Fri 2026-01-23 09:54:46 UTC; 46min ago
      Where: /var/lib/containers/storage/overlay/4f6c22edc10962e45899f80643a75fd409e4432e6192b478a2ee6ad5d9f42119/merged
       What: overlay

● var-lib-containers-storage-overlay-59938237d20c0ad1457de530a92e033783b0d28b99675d438d51d81eaf3fd06c-merged.mount - /var/lib/containers/storage/overlay/59938237d20c0ad1457de530a92e033783b0d28b99675d438d51d81eaf3fd06c/merged
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Fri 2026-01-23 09:54:01 UTC; 47min ago
      Until: Fri 2026-01-23 09:54:01 UTC; 47min ago
      Where: /var/lib/containers/storage/overlay/59938237d20c0ad1457de530a92e033783b0d28b99675d438d51d81eaf3fd06c/merged
       What: overlay

● var-lib-containers-storage-overlay-6976f01a9e7e4f3455082effd4886627e2d584498d9eada66fd9906f9d72300c-merged.mount - /var/lib/containers/storage/overlay/6976f01a9e7e4f3455082effd4886627e2d584498d9eada66fd9906f9d72300c/merged
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Fri 2026-01-23 09:52:22 UTC; 49min ago
      Until: Fri 2026-01-23 09:52:22 UTC; 49min ago
      Where: /var/lib/containers/storage/overlay/6976f01a9e7e4f3455082effd4886627e2d584498d9eada66fd9906f9d72300c/merged
       What: overlay

● var-lib-containers-storage-overlay-7533e253a2f2eaf0e94372f654b1b0e8b480b8ea04759637cb571ea93f127010-merged.mount - /var/lib/containers/storage/overlay/7533e253a2f2eaf0e94372f654b1b0e8b480b8ea04759637cb571ea93f127010/merged
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Fri 2026-01-23 10:10:54 UTC; 30min ago
      Until: Fri 2026-01-23 10:10:54 UTC; 30min ago
      Where: /var/lib/containers/storage/overlay/7533e253a2f2eaf0e94372f654b1b0e8b480b8ea04759637cb571ea93f127010/merged
       What: overlay

● var-lib-containers-storage-overlay-915da6ae77c99293a7245b9b84b0c8f5a3e0d7b42a42aa6d7d829c17c3a6bb4e-merged.mount - /var/lib/containers/storage/overlay/915da6ae77c99293a7245b9b84b0c8f5a3e0d7b42a42aa6d7d829c17c3a6bb4e/merged
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Fri 2026-01-23 09:53:08 UTC; 48min ago
      Until: Fri 2026-01-23 09:53:08 UTC; 48min ago
      Where: /var/lib/containers/storage/overlay/915da6ae77c99293a7245b9b84b0c8f5a3e0d7b42a42aa6d7d829c17c3a6bb4e/merged
       What: overlay

● var-lib-containers-storage-overlay-92a527e429e3632ae9bb170e93202ca707bcd9bfdbac51cf50f42aad0f8ac0b4-merged.mount - /var/lib/containers/storage/overlay/92a527e429e3632ae9bb170e93202ca707bcd9bfdbac51cf50f42aad0f8ac0b4/merged
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Fri 2026-01-23 09:48:36 UTC; 52min ago
      Until: Fri 2026-01-23 09:48:36 UTC; 52min ago
      Where: /var/lib/containers/storage/overlay/92a527e429e3632ae9bb170e93202ca707bcd9bfdbac51cf50f42aad0f8ac0b4/merged
       What: overlay

● var-lib-containers-storage-overlay-ceecdec01e4df8cb55ab55bc2d40efca340d57ab353df5123ef66bdd22eecc4a-merged.mount - /var/lib/containers/storage/overlay/ceecdec01e4df8cb55ab55bc2d40efca340d57ab353df5123ef66bdd22eecc4a/merged
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Fri 2026-01-23 09:53:57 UTC; 47min ago
      Until: Fri 2026-01-23 09:53:57 UTC; 47min ago
      Where: /var/lib/containers/storage/overlay/ceecdec01e4df8cb55ab55bc2d40efca340d57ab353df5123ef66bdd22eecc4a/merged
       What: overlay

● var-lib-containers-storage-overlay-e74e45de9281b8b1321d7bd4f43611748e33c5862c905273bf511f9bc0c699ee-merged.mount - /var/lib/containers/storage/overlay/e74e45de9281b8b1321d7bd4f43611748e33c5862c905273bf511f9bc0c699ee/merged
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Fri 2026-01-23 09:49:39 UTC; 51min ago
      Until: Fri 2026-01-23 09:49:39 UTC; 51min ago
      Where: /var/lib/containers/storage/overlay/e74e45de9281b8b1321d7bd4f43611748e33c5862c905273bf511f9bc0c699ee/merged
       What: overlay

● var-lib-containers-storage-overlay-ec44db0bbc9902d0b0dffdc31672a0a6a0ef4f1d924df37071d5099fb7667628-merged.mount - /var/lib/containers/storage/overlay/ec44db0bbc9902d0b0dffdc31672a0a6a0ef4f1d924df37071d5099fb7667628/merged
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Fri 2026-01-23 09:54:10 UTC; 47min ago
      Until: Fri 2026-01-23 09:54:10 UTC; 47min ago
      Where: /var/lib/containers/storage/overlay/ec44db0bbc9902d0b0dffdc31672a0a6a0ef4f1d924df37071d5099fb7667628/merged
       What: overlay

● var-lib-containers-storage-overlay-fd7542535c4ceaeda21044a222e74565124785c29e9096a48b5ed65c4a43f2dc-merged.mount - /var/lib/containers/storage/overlay/fd7542535c4ceaeda21044a222e74565124785c29e9096a48b5ed65c4a43f2dc/merged
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Fri 2026-01-23 09:54:48 UTC; 46min ago
      Until: Fri 2026-01-23 09:54:48 UTC; 46min ago
      Where: /var/lib/containers/storage/overlay/fd7542535c4ceaeda21044a222e74565124785c29e9096a48b5ed65c4a43f2dc/merged
       What: overlay

● var-lib-containers-storage-overlay.mount - /var/lib/containers/storage/overlay
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Fri 2026-01-23 09:47:41 UTC; 53min ago
      Until: Fri 2026-01-23 09:47:41 UTC; 53min ago
      Where: /var/lib/containers/storage/overlay
       What: /dev/vda1

● var-lib-containers-storage-overlay\x2dcontainers-7c4b1914e1e86e16566f40dac1c2043d119deee57a046ec037c84640bd0c067d-userdata-shm.mount - /var/lib/containers/storage/overlay-containers/7c4b1914e1e86e16566f40dac1c2043d119deee57a046ec037c84640bd0c067d/userdata/shm
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Fri 2026-01-23 10:02:56 UTC; 38min ago
      Until: Fri 2026-01-23 10:02:56 UTC; 38min ago
      Where: /var/lib/containers/storage/overlay-containers/7c4b1914e1e86e16566f40dac1c2043d119deee57a046ec037c84640bd0c067d/userdata/shm
       What: shm

● var-lib-containers-storage-overlay\x2dcontainers-955f9566d05798cdd71546732c1b5a8107d1262ca33e1e62353f97ae0dc2cc2f-userdata-shm.mount - /var/lib/containers/storage/overlay-containers/955f9566d05798cdd71546732c1b5a8107d1262ca33e1e62353f97ae0dc2cc2f/userdata/shm
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Fri 2026-01-23 10:10:54 UTC; 30min ago
      Until: Fri 2026-01-23 10:10:54 UTC; 30min ago
      Where: /var/lib/containers/storage/overlay-containers/955f9566d05798cdd71546732c1b5a8107d1262ca33e1e62353f97ae0dc2cc2f/userdata/shm
       What: shm

● var-lib-containers-storage-overlay\x2dcontainers-ffd3fd97eb8ccb69f06ea21df042fbfc8784045b6313bea6a684bfa168f1196d-userdata-shm.mount - /var/lib/containers/storage/overlay-containers/ffd3fd97eb8ccb69f06ea21df042fbfc8784045b6313bea6a684bfa168f1196d/userdata/shm
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Fri 2026-01-23 10:01:33 UTC; 39min ago
      Until: Fri 2026-01-23 10:01:33 UTC; 39min ago
      Where: /var/lib/containers/storage/overlay-containers/ffd3fd97eb8ccb69f06ea21df042fbfc8784045b6313bea6a684bfa168f1196d/userdata/shm
       What: shm

○ var-lib-machines.mount - Virtual Machine and Container Storage (Compatibility)
     Loaded: loaded (/usr/lib/systemd/system/var-lib-machines.mount; static)
     Active: inactive (dead)
  Condition: start condition failed at Fri 2026-01-23 10:07:17 UTC; 34min ago
      Where: /var/lib/machines
       What: /var/lib/machines.raw

Jan 23 10:07:17 compute-0 systemd[1]: Virtual Machine and Container Storage (Compatibility) was skipped because of an unmet condition check (ConditionPathExists=/var/lib/machines.raw).

● var-lib-nfs-rpc_pipefs.mount - RPC Pipe File System
     Loaded: loaded (/proc/self/mountinfo; static)
     Active: active (mounted) since Fri 2026-01-23 09:01:22 UTC; 1h 40min ago
      Until: Fri 2026-01-23 09:01:22 UTC; 1h 40min ago
      Where: /var/lib/nfs/rpc_pipefs
       What: rpc_pipefs

● systemd-ask-password-console.path - Dispatch Password Requests to Console Directory Watch
     Loaded: loaded (/usr/lib/systemd/system/systemd-ask-password-console.path; static)
     Active: active (waiting) since Fri 2026-01-23 09:01:22 UTC; 1h 40min ago
      Until: Fri 2026-01-23 09:01:22 UTC; 1h 40min ago
   Triggers: ● systemd-ask-password-console.service
       Docs: man:systemd-ask-password-console.path(8)

● systemd-ask-password-wall.path - Forward Password Requests to Wall Directory Watch
     Loaded: loaded (/usr/lib/systemd/system/systemd-ask-password-wall.path; static)
     Active: active (waiting) since Fri 2026-01-23 09:01:22 UTC; 1h 40min ago
      Until: Fri 2026-01-23 09:01:22 UTC; 1h 40min ago
   Triggers: ● systemd-ask-password-wall.service
       Docs: man:systemd-ask-password-wall.path(8)

● init.scope - System and Service Manager
     Loaded: loaded
  Transient: yes
     Active: active (running) since Fri 2026-01-23 09:01:19 UTC; 1h 40min ago
       Docs: man:systemd(1)
         IO: 764.0K read, 0B written
      Tasks: 1 (limit: 48560)
     Memory: 49.9M (peak: 68.3M)
        CPU: 1min 19.255s
     CGroup: /init.scope
             └─1 /usr/lib/systemd/systemd --switched-root --system --deserialize 31

Jan 23 10:41:17 compute-0 systemd[1]: Started libcrun container.
Jan 23 10:41:17 compute-0 systemd[1]: libpod-08c67959b2d141ae7a57d37ece2da37ad07761fa59f0340f63f41eaed57d5855.scope: Deactivated successfully.
Jan 23 10:41:17 compute-0 systemd[1]: var-lib-containers-storage-overlay-2533e289941a47e639cd0db545742105d138d628a923353a5d3c86e91f47b4f0-merged.mount: Deactivated successfully.
Jan 23 10:41:17 compute-0 systemd[1]: libpod-conmon-08c67959b2d141ae7a57d37ece2da37ad07761fa59f0340f63f41eaed57d5855.scope: Deactivated successfully.
Jan 23 10:41:18 compute-0 systemd[1]: Started libpod-conmon-70935c21499685672b8e368e4d9f412081240a225439ed4442c8712ab1111167.scope.
Jan 23 10:41:18 compute-0 systemd[1]: Started libcrun container.
Jan 23 10:41:18 compute-0 systemd[1]: libpod-70935c21499685672b8e368e4d9f412081240a225439ed4442c8712ab1111167.scope: Deactivated successfully.
Jan 23 10:41:18 compute-0 systemd[1]: libpod-70935c21499685672b8e368e4d9f412081240a225439ed4442c8712ab1111167.scope: Consumed 1.129s CPU time.
Jan 23 10:41:19 compute-0 systemd[1]: var-lib-containers-storage-overlay-1da9cc6cfb41f2879c30509070e68c9ab54203f80ead794e613f67a7cf3a6d72-merged.mount: Deactivated successfully.
Jan 23 10:41:19 compute-0 systemd[1]: libpod-conmon-70935c21499685672b8e368e4d9f412081240a225439ed4442c8712ab1111167.scope: Deactivated successfully.

● libpod-7c4b1914e1e86e16566f40dac1c2043d119deee57a046ec037c84640bd0c067d.scope - libcrun container
     Loaded: loaded (/run/systemd/transient/libpod-7c4b1914e1e86e16566f40dac1c2043d119deee57a046ec037c84640bd0c067d.scope; transient)
  Transient: yes
    Drop-In: /run/systemd/transient/libpod-7c4b1914e1e86e16566f40dac1c2043d119deee57a046ec037c84640bd0c067d.scope.d
             └─dep.conf
     Active: active (running) since Fri 2026-01-23 10:02:57 UTC; 38min ago
         IO: 27.3M read, 2.6M written
      Tasks: 10 (limit: 4096)
     Memory: 439.5M (peak: 484.4M)
        CPU: 17.293s
     CGroup: /machine.slice/libpod-7c4b1914e1e86e16566f40dac1c2043d119deee57a046ec037c84640bd0c067d.scope
             └─container
               ├─161918 dumb-init --single-child -- kolla_start
               ├─161921 "neutron-ovn-metadata-agent (/usr/bin/python3 /usr/bin/neutron-ovn-metadata-agent)"
               ├─162303 "neutron-ovn-metadata-agent (/usr/bin/python3 /usr/bin/neutron-ovn-metadata-agent)"
               ├─162436 /usr/bin/python3 /bin/privsep-helper --config-file /etc/neutron/neutron.conf --config-dir /etc/neutron.conf.d --privsep_context neutron.privileged.namespace_cmd --privsep_sock_path /tmp/tmpm91q9wva/privsep.sock
               ├─255218 /usr/bin/python3 /bin/privsep-helper --config-file /etc/neutron/neutron.conf --config-dir /etc/neutron.conf.d --privsep_context neutron.privileged.default --privsep_sock_path /tmp/tmp5z6r0kvj/privsep.sock
               └─255276 /usr/bin/python3 /bin/privsep-helper --config-file /etc/neutron/neutron.conf --config-dir /etc/neutron.conf.d --privsep_context neutron.privileged.link_cmd --privsep_sock_path /tmp/tmpv8hp88b7/privsep.sock

Jan 23 10:23:52 compute-0 podman[265828]: 2026-01-23 10:23:52.929206101 +0000 UTC m=+0.045291421 container died b32b674b9d53093bf4462ffd0b5c39e0a28039dbcd3f8d8e36d9c29dd751ca07 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn@sha256:85729a662800e6b42ceb088545fed39a2ac58704b4a37fd540cdef3ebf9e59a2, name=neutron-haproxy-ovnmeta-2fb57e44-e877-47c8-860b-b36d5b5ff599, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251202, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, io.buildah.version=1.41.3, org.label-schema.schema-version=1.0, tcib_build_tag=c3923531bcda0b0811b2d5053f189beb, org.label-schema.license=GPLv2, tcib_managed=true)
Jan 23 10:23:52 compute-0 podman[265828]: 2026-01-23 10:23:52.970557604 +0000 UTC m=+0.086642914 container cleanup b32b674b9d53093bf4462ffd0b5c39e0a28039dbcd3f8d8e36d9c29dd751ca07 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn@sha256:85729a662800e6b42ceb088545fed39a2ac58704b4a37fd540cdef3ebf9e59a2, name=neutron-haproxy-ovnmeta-2fb57e44-e877-47c8-860b-b36d5b5ff599, org.label-schema.license=GPLv2, tcib_managed=true, io.buildah.version=1.41.3, tcib_build_tag=c3923531bcda0b0811b2d5053f189beb, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, maintainer=OpenStack Kubernetes Operator team, org.label-schema.schema-version=1.0, org.label-schema.build-date=20251202)
Jan 23 10:23:53 compute-0 podman[265887]: 2026-01-23 10:23:53.036672705 +0000 UTC m=+0.042918431 container remove b32b674b9d53093bf4462ffd0b5c39e0a28039dbcd3f8d8e36d9c29dd751ca07 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn@sha256:85729a662800e6b42ceb088545fed39a2ac58704b4a37fd540cdef3ebf9e59a2, name=neutron-haproxy-ovnmeta-2fb57e44-e877-47c8-860b-b36d5b5ff599, org.label-schema.vendor=CentOS, org.label-schema.build-date=20251202, org.label-schema.license=GPLv2, tcib_build_tag=c3923531bcda0b0811b2d5053f189beb, io.buildah.version=1.41.3, tcib_managed=true, maintainer=OpenStack Kubernetes Operator team, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0)
Jan 23 10:28:05 compute-0 podman[270667]: 2026-01-23 10:28:05.870024036 +0000 UTC m=+0.050471908 container create eedf225d072117fa873404c5c3ca917eaad295469b916f86d736409371dda85e (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn@sha256:85729a662800e6b42ceb088545fed39a2ac58704b4a37fd540cdef3ebf9e59a2, name=neutron-haproxy-ovnmeta-64d8458c-fab0-469a-aa4f-0a8a3ecc755f, tcib_build_tag=c3923531bcda0b0811b2d5053f189beb, org.label-schema.schema-version=1.0, maintainer=OpenStack Kubernetes Operator team, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, io.buildah.version=1.41.3, org.label-schema.build-date=20251202, tcib_managed=true)
Jan 23 10:28:05 compute-0 podman[270667]: 2026-01-23 10:28:05.845458827 +0000 UTC m=+0.025906729 image pull 3695f0466b4af47afdf4b467956f8cc4744d7249671a73e7ca3fd26cca2f59c3 quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn@sha256:85729a662800e6b42ceb088545fed39a2ac58704b4a37fd540cdef3ebf9e59a2
Jan 23 10:28:05 compute-0 podman[270667]: 2026-01-23 10:28:05.943275242 +0000 UTC m=+0.123723134 container init eedf225d072117fa873404c5c3ca917eaad295469b916f86d736409371dda85e (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn@sha256:85729a662800e6b42ceb088545fed39a2ac58704b4a37fd540cdef3ebf9e59a2, name=neutron-haproxy-ovnmeta-64d8458c-fab0-469a-aa4f-0a8a3ecc755f, tcib_build_tag=c3923531bcda0b0811b2d5053f189beb, org.label-schema.license=GPLv2, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, org.label-schema.build-date=20251202, tcib_managed=true, org.label-schema.name=CentOS Stream 9 Base Image)
Jan 23 10:28:05 compute-0 podman[270667]: 2026-01-23 10:28:05.950866991 +0000 UTC m=+0.131314863 container start eedf225d072117fa873404c5c3ca917eaad295469b916f86d736409371dda85e (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn@sha256:85729a662800e6b42ceb088545fed39a2ac58704b4a37fd540cdef3ebf9e59a2, name=neutron-haproxy-ovnmeta-64d8458c-fab0-469a-aa4f-0a8a3ecc755f, maintainer=OpenStack Kubernetes Operator team, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, tcib_managed=true, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, tcib_build_tag=c3923531bcda0b0811b2d5053f189beb, io.buildah.version=1.41.3, org.label-schema.build-date=20251202)
Jan 23 10:28:41 compute-0 podman[271652]: 2026-01-23 10:28:41.0753391 +0000 UTC m=+0.045323684 container died eedf225d072117fa873404c5c3ca917eaad295469b916f86d736409371dda85e (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn@sha256:85729a662800e6b42ceb088545fed39a2ac58704b4a37fd540cdef3ebf9e59a2, name=neutron-haproxy-ovnmeta-64d8458c-fab0-469a-aa4f-0a8a3ecc755f, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251202, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, tcib_build_tag=c3923531bcda0b0811b2d5053f189beb, org.label-schema.license=GPLv2, tcib_managed=true, io.buildah.version=1.41.3, org.label-schema.vendor=CentOS)
Jan 23 10:28:41 compute-0 podman[271652]: 2026-01-23 10:28:41.117551715 +0000 UTC m=+0.087536289 container cleanup eedf225d072117fa873404c5c3ca917eaad295469b916f86d736409371dda85e (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn@sha256:85729a662800e6b42ceb088545fed39a2ac58704b4a37fd540cdef3ebf9e59a2, name=neutron-haproxy-ovnmeta-64d8458c-fab0-469a-aa4f-0a8a3ecc755f, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, tcib_managed=true, org.label-schema.license=GPLv2, maintainer=OpenStack Kubernetes Operator team, tcib_build_tag=c3923531bcda0b0811b2d5053f189beb, io.buildah.version=1.41.3, org.label-schema.vendor=CentOS, org.label-schema.build-date=20251202)
Jan 23 10:28:41 compute-0 podman[271689]: 2026-01-23 10:28:41.178669019 +0000 UTC m=+0.039650822 container remove eedf225d072117fa873404c5c3ca917eaad295469b916f86d736409371dda85e (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn@sha256:85729a662800e6b42ceb088545fed39a2ac58704b4a37fd540cdef3ebf9e59a2, name=neutron-haproxy-ovnmeta-64d8458c-fab0-469a-aa4f-0a8a3ecc755f, org.label-schema.schema-version=1.0, tcib_build_tag=c3923531bcda0b0811b2d5053f189beb, tcib_managed=true, io.buildah.version=1.41.3, org.label-schema.build-date=20251202, maintainer=OpenStack Kubernetes Operator team, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS)

● libpod-955f9566d05798cdd71546732c1b5a8107d1262ca33e1e62353f97ae0dc2cc2f.scope - libcrun container
     Loaded: loaded (/run/systemd/transient/libpod-955f9566d05798cdd71546732c1b5a8107d1262ca33e1e62353f97ae0dc2cc2f.scope; transient)
  Transient: yes
     Active: active (running) since Fri 2026-01-23 10:10:54 UTC; 30min ago
         IO: 40.7M read, 41.7M written
      Tasks: 27 (limit: 4096)
     Memory: 378.7M (peak: 457.2M)
        CPU: 58.420s
     CGroup: /machine.slice/libpod-955f9566d05798cdd71546732c1b5a8107d1262ca33e1e62353f97ae0dc2cc2f.scope
             └─container
               ├─249231 dumb-init --single-child -- kolla_start
               ├─249233 /usr/bin/python3 /usr/bin/nova-compute
               ├─255097 /usr/bin/python3 /bin/privsep-helper --config-file /etc/nova/nova.conf --config-file /etc/nova/nova-compute.conf --config-dir /etc/nova/nova.conf.d --privsep_context vif_plug_ovs.privsep.vif_plug --privsep_sock_path /tmp/tmp_1x_jasi/privsep.sock
               └─255486 /usr/bin/python3 /bin/privsep-helper --config-file /etc/nova/nova.conf --config-file /etc/nova/nova-compute.conf --config-dir /etc/nova/nova.conf.d --privsep_context nova.privsep.sys_admin_pctxt --privsep_sock_path /tmp/tmpsh92gw98/privsep.sock

Jan 23 10:10:54 compute-0 systemd[1]: Started libcrun container.

● libpod-ffd3fd97eb8ccb69f06ea21df042fbfc8784045b6313bea6a684bfa168f1196d.scope - libcrun container
     Loaded: loaded (/run/systemd/transient/libpod-ffd3fd97eb8ccb69f06ea21df042fbfc8784045b6313bea6a684bfa168f1196d.scope; transient)
  Transient: yes
    Drop-In: /run/systemd/transient/libpod-ffd3fd97eb8ccb69f06ea21df042fbfc8784045b6313bea6a684bfa168f1196d.scope.d
             └─dep.conf
     Active: active (running) since Fri 2026-01-23 10:01:34 UTC; 39min ago
         IO: 1008.0K read, 4.0K written
      Tasks: 6 (limit: 4096)
     Memory: 10.5M (peak: 13.9M)
        CPU: 4.309s
     CGroup: /machine.slice/libpod-ffd3fd97eb8ccb69f06ea21df042fbfc8784045b6313bea6a684bfa168f1196d.scope
             └─container
               ├─151636 dumb-init --single-child -- kolla_start
               └─151639 /usr/bin/ovn-controller --pidfile unix:/run/openvswitch/db.sock -p /etc/pki/tls/private/ovndb.key -c /etc/pki/tls/certs/ovndb.crt -C /etc/pki/tls/certs/ovndbca.crt

Jan 23 10:01:34 compute-0 systemd[1]: Started libcrun container.

● session-1.scope - Session 1 of User zuul
     Loaded: loaded (/run/systemd/transient/session-1.scope; transient)
  Transient: yes
     Active: active (abandoned) since Fri 2026-01-23 09:01:44 UTC; 1h 39min ago
         IO: 0B read, 0B written
      Tasks: 1
     Memory: 17.1M (peak: 38.9M)
        CPU: 1min 7.439s
     CGroup: /user.slice/user-1000.slice/session-1.scope
             └─4516 /usr/bin/python3

Jan 23 09:03:42 np0005593293.novalocal python3[7129]: ansible-ansible.legacy.copy Invoked with src=/home/zuul/.ansible/tmp/ansible-tmp-1769159021.3305278-104-94129406854838/source dest=/etc/NetworkManager/system-connections/ci-private-network.nmconnection mode=0600 owner=root group=root follow=False _original_basename=bootstrap-ci-network-nm-connection.nmconnection.j2 checksum=2eff1c515c815bdb72f89804dc252cf6b4af17ef backup=False force=True unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None remote_src=None local_follow=None seuser=None serole=None selevel=None setype=None attributes=None
Jan 23 09:03:42 np0005593293.novalocal sudo[7127]: pam_unix(sudo:session): session closed for user root
Jan 23 09:03:42 np0005593293.novalocal sudo[7177]:     zuul : PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-bpybivpwxddowqekhjbufukghrxhghei ; OS_CLOUD=vexxhost /usr/bin/python3'
Jan 23 09:03:42 np0005593293.novalocal sudo[7177]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Jan 23 09:03:42 np0005593293.novalocal python3[7179]: ansible-ansible.builtin.systemd Invoked with name=NetworkManager state=restarted daemon_reload=False daemon_reexec=False scope=system no_block=False enabled=None force=None masked=None
Jan 23 09:03:43 np0005593293.novalocal sudo[7177]: pam_unix(sudo:session): session closed for user root
Jan 23 09:03:43 np0005593293.novalocal python3[7263]: ansible-ansible.legacy.command Invoked with _raw_params=ip route zuul_log_id=fa163ef9-e89a-4543-3693-0000000000bd-0-controller zuul_ansible_split_streams=False _uses_shell=False warn=False stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None
Jan 23 09:04:43 np0005593293.novalocal sshd-session[4313]: Received disconnect from 38.102.83.114 port 38800:11: disconnected by user
Jan 23 09:04:43 np0005593293.novalocal sshd-session[4313]: Disconnected from user zuul 38.102.83.114 port 38800
Jan 23 09:04:43 np0005593293.novalocal sshd-session[4300]: pam_unix(sshd:session): session closed for user zuul

● session-39.scope - Session 39 of User ceph-admin
     Loaded: loaded (/run/systemd/transient/session-39.scope; transient)
  Transient: yes
     Active: active (running) since Fri 2026-01-23 09:54:20 UTC; 47min ago
         IO: 764.0K read, 149.4M written
      Tasks: 2
     Memory: 4.8M (peak: 73.8M)
        CPU: 3min 24.345s
     CGroup: /user.slice/user-42477.slice/session-39.scope
             ├─99842 "sshd-session: ceph-admin [priv]"
             └─99848 "sshd-session: ceph-admin@notty"

Jan 23 10:41:18 compute-0 podman[295489]: 2026-01-23 10:41:18.037797664 +0000 UTC m=+0.023774503 image pull aade1b12b8e6196a39b8c83a7f707419487931732368729477a8c2bbcbca1d7c quay.io/ceph/ceph@sha256:7c69e59beaeea61ca714e71cb84ff6d5e533db7f1fd84143dd9ba6649a5fd2ec
Jan 23 10:41:18 compute-0 podman[295489]: 2026-01-23 10:41:18.161610781 +0000 UTC m=+0.147587640 container init 70935c21499685672b8e368e4d9f412081240a225439ed4442c8712ab1111167 (image=quay.io/ceph/ceph@sha256:7c69e59beaeea61ca714e71cb84ff6d5e533db7f1fd84143dd9ba6649a5fd2ec, name=compassionate_solomon, io.buildah.version=1.40.1, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=c92aebb279828e9c3c1f5d24613efca272649e62, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250325, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, ceph=True, CEPH_REF=squid, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/)
Jan 23 10:41:18 compute-0 podman[295489]: 2026-01-23 10:41:18.172169193 +0000 UTC m=+0.158146012 container start 70935c21499685672b8e368e4d9f412081240a225439ed4442c8712ab1111167 (image=quay.io/ceph/ceph@sha256:7c69e59beaeea61ca714e71cb84ff6d5e533db7f1fd84143dd9ba6649a5fd2ec, name=compassionate_solomon, org.label-schema.vendor=CentOS, CEPH_SHA1=c92aebb279828e9c3c1f5d24613efca272649e62, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=squid, org.label-schema.schema-version=1.0, io.buildah.version=1.40.1, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250325, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Jan 23 10:41:18 compute-0 podman[295489]: 2026-01-23 10:41:18.175831238 +0000 UTC m=+0.161808087 container attach 70935c21499685672b8e368e4d9f412081240a225439ed4442c8712ab1111167 (image=quay.io/ceph/ceph@sha256:7c69e59beaeea61ca714e71cb84ff6d5e533db7f1fd84143dd9ba6649a5fd2ec, name=compassionate_solomon, io.buildah.version=1.40.1, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, CEPH_SHA1=c92aebb279828e9c3c1f5d24613efca272649e62, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, CEPH_REF=squid, org.label-schema.build-date=20250325, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Jan 23 10:41:18 compute-0 podman[295489]: 2026-01-23 10:41:18.894338682 +0000 UTC m=+0.880315501 container died 70935c21499685672b8e368e4d9f412081240a225439ed4442c8712ab1111167 (image=quay.io/ceph/ceph@sha256:7c69e59beaeea61ca714e71cb84ff6d5e533db7f1fd84143dd9ba6649a5fd2ec, name=compassionate_solomon, ceph=True, CEPH_REF=squid, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, org.label-schema.build-date=20250325, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.40.1, CEPH_SHA1=c92aebb279828e9c3c1f5d24613efca272649e62, org.label-schema.license=GPLv2)
Jan 23 10:41:19 compute-0 podman[295489]: 2026-01-23 10:41:19.24226143 +0000 UTC m=+1.228238259 container remove 70935c21499685672b8e368e4d9f412081240a225439ed4442c8712ab1111167 (image=quay.io/ceph/ceph@sha256:7c69e59beaeea61ca714e71cb84ff6d5e533db7f1fd84143dd9ba6649a5fd2ec, name=compassionate_solomon, CEPH_SHA1=c92aebb279828e9c3c1f5d24613efca272649e62, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, OSD_FLAVOR=default, io.buildah.version=1.40.1, org.label-schema.build-date=20250325, org.label-schema.license=GPLv2, CEPH_REF=squid, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Jan 23 10:41:19 compute-0 sudo[295083]: pam_unix(sudo:session): session closed for user root
Jan 23 10:41:20 compute-0 sudo[296342]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/ls /etc/sysctl.d
Jan 23 10:41:20 compute-0 sudo[296342]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Jan 23 10:41:20 compute-0 sudo[296342]: pam_unix(sudo:session): session closed for user root

● session-59.scope - Session 59 of User zuul
     Loaded: loaded (/run/systemd/transient/session-59.scope; transient)
  Transient: yes
     Active: active (running) since Fri 2026-01-23 10:40:43 UTC; 42s ago
         IO: 18.3M read, 80.7M written
      Tasks: 31
     Memory: 464.8M (peak: 515.1M)
        CPU: 2min 997ms
     CGroup: /user.slice/user-1000.slice/session-59.scope
             ├─289862 "sshd-session: zuul [priv]"
             ├─289865 "sshd-session: zuul@notty"
             ├─289866 sudo bash -c "rm -rf /var/tmp/sos-osp && mkdir /var/tmp/sos-osp && sos report --batch --all-logs --tmp-dir=/var/tmp/sos-osp  -p container,openstack_edpm,system,storage,virt"
             ├─289890 /usr/bin/python3 -s /sbin/sos report --batch --all-logs --tmp-dir=/var/tmp/sos-osp -p container,openstack_edpm,system,storage,virt
             ├─296659 timeout 15s turbostat --debug sleep 10
             ├─297031 timeout 300s systemctl status --all
             ├─297032 systemctl status --all
             ├─297035 timeout 300s ceph osd dump --format json-pretty
             ├─297036 /usr/bin/python3 -s /usr/bin/ceph osd dump --format json-pretty
             ├─297056 timeout 300s semanage module -l
             └─297057 /usr/bin/python3 -EsI /usr/sbin/semanage module -l

Jan 23 10:40:43 compute-0 systemd[1]: Started Session 59 of User zuul.
Jan 23 10:40:43 compute-0 sudo[289866]:     zuul : PWD=/home/zuul ; USER=root ; COMMAND=/bin/bash -c 'rm -rf /var/tmp/sos-osp && mkdir /var/tmp/sos-osp && sos report --batch --all-logs --tmp-dir=/var/tmp/sos-osp  -p container,openstack_edpm,system,storage,virt'
Jan 23 10:40:43 compute-0 sudo[289866]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Jan 23 10:40:50 compute-0 ovs-vsctl[290219]: ovs|00001|db_ctl_base|ERR|no key "dpdk-init" in Open_vSwitUnit apparmor.service could not be found.
Unit apt-daily.service could not be found.
ch record "." column other_config
Jan 23 10:41:17 compute-0 ovs-appctl[295110]: ovs|00001|daemon_unix|WARN|/var/run/openvswitch/ovs-monitor-ipsec.pid: open: No such file or directory
Jan 23 10:41:17 compute-0 ovs-appctl[295126]: ovs|00001|daemon_unix|WARN|/var/run/openvswitch/ovs-monitor-ipsec.pid: open: No such file or directory
Jan 23 10:41:17 compute-0 ovs-appctl[295133]: ovs|00001|daemon_unix|WARN|/var/run/openvswitch/ovs-monitor-ipsec.pid: open: No such file or directory

○ 7c4b1914e1e86e16566f40dac1c2043d119deee57a046ec037c84640bd0c067d-11d4d8d4c83d38a3.service - /usr/bin/podman healthcheck run 7c4b1914e1e86e16566f40dac1c2043d119deee57a046ec037c84640bd0c067d
     Loaded: loaded (/run/systemd/transient/7c4b1914e1e86e16566f40dac1c2043d119deee57a046ec037c84640bd0c067d-11d4d8d4c83d38a3.service; transient)
  Transient: yes
     Active: inactive (dead) since Fri 2026-01-23 10:40:56 UTC; 29s ago
   Duration: 79ms
TriggeredBy: ● 7c4b1914e1e86e16566f40dac1c2043d119deee57a046ec037c84640bd0c067d-11d4d8d4c83d38a3.timer
    Process: 291103 ExecStart=/usr/bin/podman healthcheck run 7c4b1914e1e86e16566f40dac1c2043d119deee57a046ec037c84640bd0c067d (code=exited, status=0/SUCCESS)
   Main PID: 291103 (code=exited, status=0/SUCCESS)
        CPU: 86ms

Jan 23 10:40:56 compute-0 podman[291103]: 2026-01-23 10:40:56.528181084 +0000 UTC m=+0.056057587 container health_status 7c4b1914e1e86e16566f40dac1c2043d119deee57a046ec037c84640bd0c067d (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn@sha256:85729a662800e6b42ceb088545fed39a2ac58704b4a37fd540cdef3ebf9e59a2, name=ovn_metadata_agent, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, config_data={'cgroupns': 'host', 'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'EDPM_CONFIG_HASH': 'cdc8d10f0e05d8a70b43cf26938a886cf76be4340fa6a898edc4cc90e10001b1-99269140098de15b48680c41e5313433c184a4380a28a4d66e6de0ece8f46703-99269140098de15b48680c41e5313433c184a4380a28a4d66e6de0ece8f46703-99269140098de15b48680c41e5313433c184a4380a28a4d66e6de0ece8f46703-0823bd3e096c75f72e4a95820d41b0d4b6a1172bd2892ddb9f29b788a11bc87d'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_metadata_agent', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn@sha256:85729a662800e6b42ceb088545fed39a2ac58704b4a37fd540cdef3ebf9e59a2', 'net': 'host', 'pid': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/run/openvswitch:/run/openvswitch:z', '/var/lib/openstack/neutron-ovn-metadata-agent:/etc/neutron.conf.d:z', '/run/netns:/run/netns:shared', '/var/lib/kolla/config_files/ovn_metadata_agent.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/neutron:/var/lib/neutron:shared,z', '/var/lib/neutron/ovn_metadata_haproxy_wrapper:/usr/local/bin/haproxy:ro', '/var/lib/neutron/kill_scripts:/etc/neutron/kill_scripts:ro', '/var/lib/openstack/cacerts/neutron-metadata/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_metadata_agent:/openstack:ro,z']}, config_id=ovn_metadata_agent, container_name=ovn_metadata_agent, maintainer=OpenStack Kubernetes Operator team, org.label-schema.vendor=CentOS, managed_by=edpm_ansible, tcib_build_tag=c3923531bcda0b0811b2d5053f189beb, io.buildah.version=1.41.3, org.label-schema.license=GPLv2, tcib_managed=true, org.label-schema.build-date=20251202)

● auditd.service - Security Auditing Service
     Loaded: loaded (/usr/lib/systemd/system/auditd.service; enabled; preset: enabled)
     Active: active (running) since Fri 2026-01-23 09:01:23 UTC; 1h 40min ago
       Docs: man:auditd(8)
 Unit auto-cpufreq.service could not be found.
Unit autofs.service could not be found.
            https://github.com/linux-audit/audit-documentation
   Main PID: 699 (auditd)
         IO: 0B read, 24.2M written
      Tasks: 4 (limit: 48560)
     Memory: 16.0M (peak: 16.5M)
        CPU: 5.909s
     CGroup: /system.slice/auditd.service
             ├─699 /sbin/auditd
             └─701 /usr/sbin/sedispatch

Jan 23 09:01:23 localhost augenrules[719]: failure 1
Jan 23 09:01:23 localhost augenrules[719]: pid 699
Jan 23 09:01:23 localhost augenrules[719]: rate_limit 0
Jan 23 09:01:23 localhost augenrules[719]: backlog_limit 8192
Jan 23 09:01:23 localhost augenrules[719]: lost 0
Jan 23 09:01:23 localhost augenrules[719]: backlog 2
Jan 23 09:01:23 localhost augenrules[719]: backlog_wait_time 60000
Jan 23 09:01:23 localhost augenrules[719]: backlog_wait_time_actual 0
Jan 23 09:01:23 localhost systemd[1]: Started Security Auditing Service.
Jan 23 10:06:21 compute-0 auditd[699]: Audit daemon rotating log files

○ auth-rpcgss-module.service - Kernel Module supporting RPCSEC_GSS
     Loaded: loaded (/usr/lib/systemd/system/auth-rpcgss-module.service; static)
     Active: inactive (dead)
  Condition: start condition failed at Fri 2026-01-23 09:01:22 UTC; 1h 40min ago

○ blk-availability.service - Availability of block devices
     Loaded: loaded (/usr/lib/systemd/system/blk-availability.service; disabled; preset: disabled)
     Active: inactive (dead)

● ceph-f3005f84-239a-55b6-a948-8f1fb592b920@alertmanager.compute-0.service - Ceph alertmanager.compute-0 for f3005f84-239a-55b6-a948-8f1fb592b920
     Loaded: loaded (/etc/systemd/system/ceph-f3005f84-239a-55b6-a948-8f1fb592b920@.service; enabled; preset: disabled)
     Active: active (running) since Fri 2026-01-23 09:54:46 UTC; 46min ago
   Main PID: 104185 (conmon)
         IO: 2.5M read, 257.5K written
      Tasks: 15 (limit: 48560)
     Memory: 30.8M (peak: 43.4M)
        CPU: 7.941s
     CGroup: /system.slice/system-ceph\x2df3005f84\x2d239a\x2d55b6\x2da948\x2d8f1fb592b920.slice/ceph-f3005f84-239a-55b6-a948-8f1fb592b920@alertmanager.compute-0.service
             ├─libpod-payload-a2ddeb968d99b1961970b788dda423e2c0177d966be5d6216090bc7f97658982
             │ ├─104187 /run/podman-init -- /bin/alertmanager --cluster.listen-address=:9094 --web.listen-address=192.168.122.100:9093 --config.file=/etc/alertmanager/alertmanager.yml
             │ └─104189 /bin/alertmanager --cluster.listen-address=:9094 --web.listen-address=192.168.122.100:9093 --config.file=/etc/alertmanager/alertmanager.yml
             └─runtime
               └─104185 /usr/bin/conmon --api-version 1 -c a2ddeb968d99b1961970b788dda423e2c0177d966be5d6216090bc7f97658982 -u a2ddeb968d99b1961970b788dda423e2c0177d966be5d6216090bc7f97658982 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/a2ddeb968d99b1961970b788dda423e2c0177d966be5d6216090bc7f97658982/userdata -p /run/containers/storage/overlay-containers/a2ddeb968d99b1961970b788dda423e2c0177d966be5d6216090bc7f97658982/userdata/pidfile -n ceph-f3005f84-239a-55b6-a948-8f1fb592b920-alertmanager-compute-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/a2ddeb968d99b1961970b788dda423e2c0177d966be5d6216090bc7f97658982 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/a2ddeb968d99b1961970b788dda423e2c0177d966be5d6216090bc7f97658982/userdata/oci-log --conmon-pidfile /run/ceph-f3005f84-239a-55b6-a948-8f1fb592b920@alertmanager.compute-0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg a2ddeb968d99b1961970b788dda423e2c0177d966be5d6216090bc7f97658982

Jan 23 10:41:03 compute-0 ceph-f3005f84-239a-55b6-a948-8f1fb592b920-alertmanager-compute-0[104185]: ts=2026-01-23T10:41:03.809Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=1 err="ceph-dashboard/webhook[2]: notify retry canceled after 2 attempts: Post \"http://compute-2.ctlplane.example.com:8443/api/prometheus_receiver\": context deadline exceeded; ceph-dashboard/webhook[1]: notify retry canceled after 2 attempts: Post \"http://compute-1.ctlplane.example.com:8443/api/prometheus_receiver\": context deadline exceeded"
Jan 23 10:41:07 compute-0 ceph-f3005f84-239a-55b6-a948-8f1fb592b920-alertmanager-compute-0[104185]: ts=2026-01-23T10:41:07.920Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=1 err="ceph-dashboard/webhook[2]: notify retry canceled after 2 attempts: Post \"http://compute-2.ctlplane.example.com:8443/api/prometheus_receiver\": context deadline exceeded; ceph-dashboard/webhook[1]: notify retry canceled after 2 attempts: Post \"http://compute-1.ctlplane.example.com:8443/api/prometheus_receiver\": context deadline exceeded"
Jan 23 10:41:09 compute-0 ceph-f3005f84-239a-55b6-a948-8f1fb592b920-alertmanager-compute-0[104185]: ts=2026-01-23T10:41:09.000Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=1 err="ceph-dashboard/webhook[2]: notify retry canceled after 2 attempts: Post \"http://compute-2.ctlplane.example.com:8443/api/prometheus_receiver\": context deadline exceeded; ceph-dashboard/webhook[1]: notify retry canceled after 2 attempts: Post \"http://compute-1.ctlplane.example.com:8443/api/prometheus_receiver\": context deadline exceeded"
Jan 23 10:41:13 compute-0 ceph-f3005f84-239a-55b6-a948-8f1fb592b920-alertmanager-compute-0[104185]: ts=2026-01-23T10:41:13.810Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=1 err="ceph-dashboard/webhook[2]: notify retry canceled after 2 attempts: Post \"http://compute-2.ctlplane.example.com:8443/api/prometheus_receiver\": context deadline exceeded; ceph-dashboard/webhook[1]: notify retry canceled after 2 attempts: Post \"http://compute-1.ctlplane.example.com:8443/api/prometheus_receiver\": context deadline exceeded"
Jan 23 10:41:17 compute-0 ceph-f3005f84-239a-55b6-a948-8f1fb592b920-alertmanager-compute-0[104185]: ts=2026-01-23T10:41:17.921Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=1 err="ceph-dashboard/webhook[2]: notify retry canceled after 2 attempts: Post \"http://compute-2.ctlplane.example.com:8443/api/prometheus_receiver\": context deadline exceeded; ceph-dashboard/webhook[1]: notify retry canceled after 2 attempts: Post \"http://compute-1.ctlplane.example.com:8443/api/prometheus_receiver\": context deadline exceeded"
Jan 23 10:41:19 compute-0 ceph-f3005f84-239a-55b6-a948-8f1fb592b920-alertmanager-compute-0[104185]: ts=2026-01-23T10:41:19.000Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[2] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"http://compute-2.ctlplane.example.com:8443/api/prometheus_receiver\": dial tcp 192.168.122.102:8443: i/o timeout"
Jan 23 10:41:19 compute-0 ceph-f3005f84-239a-55b6-a948-8f1fb592b920-alertmanager-compute-0[104185]: ts=2026-01-23T10:41:19.003Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=1 err="ceph-dashboard/webhook[1]: notify retry canceled after 2 attempts: Post \"http://compute-1.ctlplane.example.com:8443/api/prometheus_receiver\": context deadline exceeded; ceph-dashboard/webhook[2]: notify retry canceled after 3 attempts: Post \"http://compute-2.ctlplane.example.com:8443/api/prometheus_receiver\": context deadline exceeded"
Jan 23 10:41:23 compute-0 ceph-f3005f84-239a-55b6-a948-8f1fb592b920-alertmanager-compute-0[104185]: ts=2026-01-23T10:41:23.810Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"http://compute-1.ctlplane.example.com:8443/api/prometheus_receiver\": dial tcp 192.168.122.101:8443: i/o timeout"
Jan 23 10:41:23 compute-0 ceph-f3005f84-239a-55b6-a948-8f1fb592b920-alertmanager-compute-0[104185]: ts=2026-01-23T10:41:23.811Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[2] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"http://compute-2.ctlplane.example.com:8443/api/prometheus_receiver\": dial tcp 192.168.122.102:8443: i/o timeout"
Jan 23 10:41:23 compute-0 ceph-f3005f84-239a-55b6-a948-8f1fb592b920-alertmanager-compute-0[104185]: ts=2026-01-23T10:41:23.811Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=1 err="ceph-dashboard/webhook[1]: notify retry canceled after 3 attempts: Post \"http://compute-1.ctlplane.example.com:8443/api/prometheus_receiver\": context deadline exceeded; ceph-dashboard/webhook[2]: notify retry canceled after 3 attempts: Post \"http://compute-2.ctlplane.example.com:8443/api/prometheus_receiver\": context deadline exceeded"

● ceph-f3005f84-239a-55b6-a948-8f1fb592b920@crash.compute-0.service - Ceph crash.compute-0 for f3005f84-239a-55b6-a948-8f1fb592b920
     Loaded: loaded (/etc/systemd/system/ceph-f3005f84-239a-55b6-a948-8f1fb592b920@.service; enabled; preset: disabled)
     Active: active (running) since Fri 2026-01-23 09:48:36 UTC; 52min ago
   Main PID: 79594 (conmon)
         IO: 0B read, 178.5K written
      Tasks: 3 (limit: 48560)
     Memory: 7.7M (peak: 24.6M)
        CPU: 475ms
     CGroup: /system.slice/system-ceph\x2df3005f84\x2d239a\x2d55b6\x2da948\x2d8f1fb592b920.slice/ceph-f3005f84-239a-55b6-a948-8f1fb592b920@crash.compute-0.service
             ├─libpod-payload-ae2342c943dc0b4633eaeef8f7726de29e0287eb0b10e37d37a519b117896a21
             │ ├─79596 /run/podman-init -- /usr/bin/ceph-crash -n client.crash.compute-0
             │ └─79598 /usr/bin/python3 -s /usr/bin/ceph-crash -n client.crash.compute-0
             └─runtime
               └─79594 /usr/bin/conmon --api-version 1 -c ae2342c943dc0b4633eaeef8f7726de29e0287eb0b10e37d37a519b117896a21 -u ae2342c943dc0b4633eaeef8f7726de29e0287eb0b10e37d37a519b117896a21 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/ae2342c943dc0b4633eaeef8f7726de29e0287eb0b10e37d37a519b117896a21/userdata -p /run/containers/storage/overlay-containers/ae2342c943dc0b4633eaeef8f7726de29e0287eb0b10e37d37a519b117896a21/userdata/pidfile -n ceph-f3005f84-239a-55b6-a948-8f1fb592b920-crash-compute-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/ae2342c943dc0b4633eaeef8f7726de29e0287eb0b10e37d37a519b117896a21 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/ae2342c943dc0b4633eaeef8f7726de29e0287eb0b10e37d37a519b117896a21/userdata/oci-log --conmon-pidfile /run/ceph-f3005f84-239a-55b6-a948-8f1fb592b920@crash.compute-0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg ae2342c943dc0b4633eaeef8f7726de29e0287eb0b10e37d37a519b117896a21

Jan 23 09:48:36 compute-0 ceph-f3005f84-239a-55b6-a948-8f1fb592b920-crash-compute-0[79594]: 2026-01-23T09:48:36.940+0000 7f21baf35640 -1 AuthRegistry(0x7f21baf33ff0) no keyring found at /etc/ceph/ceph.client.admin.keyring,/etc/ceph/ceph.keyring,/etc/ceph/keyring,/etc/ceph/keyring.bin, disabling cephx
Jan 23 09:48:36 compute-0 ceph-f3005f84-239a-55b6-a948-8f1fb592b920-crash-compute-0[79594]: 2026-01-23T09:48:36.941+0000 7f21b8caa640 -1 monclient(hunting): handle_auth_bad_method server allowed_methods [2] but i only support [1]
Jan 23 09:48:36 compute-0 ceph-f3005f84-239a-55b6-a948-8f1fb592b920-crash-compute-0[79594]: 2026-01-23T09:48:36.941+0000 7f21baf35640 -1 monclient: authenticate NOTE: no keyring found; disabled cephx authentication
Jan 23 09:48:36 compute-0 ceph-f3005f84-239a-55b6-a948-8f1fb592b920-crash-compute-0[79594]: [errno 13] RADOS permission denied (error connecting to the cluster)
Jan 23 09:48:36 compute-0 ceph-f3005f84-239a-55b6-a948-8f1fb592b920-crash-compute-0[79594]: INFO:ceph-crash:monitoring path /var/lib/ceph/crash, delay 600s
Jan 23 09:58:37 compute-0 ceph-f3005f84-239a-55b6-a948-8f1fb592b920-crash-compute-0[79594]: ERROR:ceph-crash:Error scraping /var/lib/ceph/crash: [Errno 13] Permission denied: '/var/lib/ceph/crash'
Jan 23 10:08:37 compute-0 ceph-f3005f84-239a-55b6-a948-8f1fb592b920-crash-compute-0[79594]: ERROR:ceph-crash:Error scraping /var/lib/ceph/crash: [Errno 13] Permission denied: '/var/lib/ceph/crash'
Jan 23 10:18:37 compute-0 ceph-f3005f84-239a-55b6-a948-8f1fb592b920-crash-compute-0[79594]: ERROR:ceph-crash:Error scraping /var/lib/ceph/crash: [Errno 13] Permission denied: '/var/lib/ceph/crash'
Jan 23 10:28:37 compute-0 ceph-f3005f84-239a-55b6-a948-8f1fb592b920-crash-compute-0[79594]: ERROR:ceph-crash:Error scraping /var/lib/ceph/crash: [Errno 13] Permission denied: '/var/lib/ceph/crash'
Jan 23 10:38:37 compute-0 ceph-f3005f84-239a-55b6-a948-8f1fb592b920-crash-compute-0[79594]: ERROR:ceph-crash:Error scraping /var/lib/ceph/crash: [Errno 13] Permission denied: '/var/lib/ceph/crash'

● ceph-f3005f84-239a-55b6-a948-8f1fb592b920@grafana.compute-0.service - Ceph grafana.compute-0 for f3005f84-239a-55b6-a948-8f1fb592b920
     Loaded: loaded (/etc/systemd/system/ceph-f3005f84-239a-55b6-a948-8f1fb592b920@.service; enabled; preset: disabled)
     Active: active (running) since Fri 2026-01-23 09:54:48 UTC; 46min ago
   Main PID: 104501 (conmon)
         IO: 34.1M read, 294.0K written
      Tasks: 18 (limit: 48560)
     Memory: 124.0M (peak: 160.6M)
        CPU: 34.028s
     CGroup: /system.slice/system-ceph\x2df3005f84\x2d239a\x2d55b6\x2da948\x2d8f1fb592b920.slice/ceph-f3005f84-239a-55b6-a948-8f1fb592b920@grafana.compute-0.service
             ├─libpod-payload-91a745e69178e4c0b1322185fa504c92fadf052d1491b19cbcad743ff263de28
             │ ├─104505 /run/podman-init -- /run.sh
             │ └─104507 grafana server --homepath=/usr/share/grafana --config=/etc/grafana/grafana.ini --packaging=docker cfg:default.log.mode=console cfg:default.paths.data=/var/lib/grafana cfg:default.paths.logs=/var/log/grafana cfg:default.paths.plugins=/var/lib/grafana/plugins cfg:default.paths.provisioning=/etc/grafana/provisioning
             └─runtime
               └─104501 /usr/bin/conmon --api-version 1 -c 91a745e69178e4c0b1322185fa504c92fadf052d1491b19cbcad743ff263de28 -u 91a745e69178e4c0b1322185fa504c92fadf052d1491b19cbcad743ff263de28 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/91a745e69178e4c0b1322185fa504c92fadf052d1491b19cbcad743ff263de28/userdata -p /run/containers/storage/overlay-containers/91a745e69178e4c0b1322185fa504c92fadf052d1491b19cbcad743ff263de28/userdata/pidfile -n ceph-f3005f84-239a-55b6-a948-8f1fb592b920-grafana-compute-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/91a745e69178e4c0b1322185fa504c92fadf052d1491b19cbcad743ff263de28 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/91a745e69178e4c0b1322185fa504c92fadf052d1491b19cbcad743ff263de28/userdata/oci-log --conmon-pidfile /run/ceph-f3005f84-239a-55b6-a948-8f1fb592b920@grafana.compute-0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 91a745e69178e4c0b1322185fa504c92fadf052d1491b19cbcad743ff263de28

Jan 23 10:14:48 compute-0 ceph-f3005f84-239a-55b6-a948-8f1fb592b920-grafana-compute-0[104501]: logger=plugins.update.checker t=2026-01-23T10:14:48.748343586Z level=info msg="Update check succeeded" duration=54.340288ms
Jan 23 10:14:48 compute-0 ceph-f3005f84-239a-55b6-a948-8f1fb592b920-grafana-compute-0[104501]: logger=grafana.update.checker t=2026-01-23T10:14:48.749095368Z level=info msg="Update check succeeded" duration=51.284462ms
Jan 23 10:14:48 compute-0 ceph-f3005f84-239a-55b6-a948-8f1fb592b920-grafana-compute-0[104501]: logger=cleanup t=2026-01-23T10:14:48.808468087Z level=info msg="Completed cleanup jobs" duration=184.428216ms
Jan 23 10:24:48 compute-0 ceph-f3005f84-239a-55b6-a948-8f1fb592b920-grafana-compute-0[104501]: logger=grafana.update.checker t=2026-01-23T10:24:48.749425388Z level=info msg="Update check succeeded" duration=52.366632ms
Jan 23 10:24:48 compute-0 ceph-f3005f84-239a-55b6-a948-8f1fb592b920-grafana-compute-0[104501]: logger=plugins.update.checker t=2026-01-23T10:24:48.752176418Z level=info msg="Update check succeeded" duration=57.899803ms
Jan 23 10:24:48 compute-0 ceph-f3005f84-239a-55b6-a948-8f1fb592b920-grafana-compute-0[104501]: logger=cleanup t=2026-01-23T10:24:48.752213829Z level=info msg="Completed cleanup jobs" duration=128.082489ms
Jan 23 10:26:11 compute-0 ceph-f3005f84-239a-55b6-a948-8f1fb592b920-grafana-compute-0[104501]: logger=infra.usagestats t=2026-01-23T10:26:11.673829496Z level=info msg="Usage stats are ready to report"
Jan 23 10:34:48 compute-0 ceph-f3005f84-239a-55b6-a948-8f1fb592b920-grafana-compute-0[104501]: logger=cleanup t=2026-01-23T10:34:48.641654034Z level=info msg="Completed cleanup jobs" duration=17.60909ms
Jan 23 10:34:48 compute-0 ceph-f3005f84-239a-55b6-a948-8f1fb592b920-grafana-compute-0[104501]: logger=plugins.update.checker t=2026-01-23T10:34:48.744842794Z level=info msg="Update check succeeded" duration=50.823943ms
Jan 23 10:34:48 compute-0 ceph-f3005f84-239a-55b6-a948-8f1fb592b920-grafana-compute-0[104501]: logger=grafana.update.checker t=2026-01-23T10:34:48.745329007Z level=info msg="Update check succeeded" duration=48.286511ms

● ceph-f3005f84-239a-55b6-a948-8f1fb592b920@haproxy.nfs.cephfs.compute-0.yeogal.service - Ceph haproxy.nfs.cephfs.compute-0.yeogal for f3005f84-239a-55b6-a948-8f1fb592b920
     Loaded: loaded (/etc/systemd/system/ceph-f3005f84-239a-55b6-a948-8f1fb592b920@.service; enabled; preset: disabled)
     Active: active (running) since Fri 2026-01-23 09:53:08 UTC; 48min ago
   Main PID: 95465 (conmon)
         IO: 4.8M read, 1.3M written
      Tasks: 11 (limit: 48560)
     Memory: 10.9M (peak: 17.5M)
        CPU: 4.805s
     CGroup: /system.slice/system-ceph\x2df3005f84\x2d239a\x2d55b6\x2da948\x2d8f1fb592b920.slice/ceph-f3005f84-239a-55b6-a948-8f1fb592b920@haproxy.nfs.cephfs.compute-0.yeogal.service
             ├─libpod-payload-2675dd2af0d87249968094bd6c1eb5d25ac7173a5fd992ed1bd309216a505178
             │ ├─95467 /run/podman-init -- docker-entrypoint.sh haproxy -f /var/lib/haproxy/haproxy.cfg
             │ ├─95469 haproxy -W -db -f /var/lib/haproxy/haproxy.cfg
             │ └─95471 haproxy -W -db -f /var/lib/haproxy/haproxy.cfg
             └─runtime
               └─95465 /usr/bin/conmon --api-version 1 -c 2675dd2af0d87249968094bd6c1eb5d25ac7173a5fd992ed1bd309216a505178 -u 2675dd2af0d87249968094bd6c1eb5d25ac7173a5fd992ed1bd309216a505178 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/2675dd2af0d87249968094bd6c1eb5d25ac7173a5fd992ed1bd309216a505178/userdata -p /run/containers/storage/overlay-containers/2675dd2af0d87249968094bd6c1eb5d25ac7173a5fd992ed1bd309216a505178/userdata/pidfile -n ceph-f3005f84-239a-55b6-a948-8f1fb592b920-haproxy-nfs-cephfs-compute-0-yeogal --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/2675dd2af0d87249968094bd6c1eb5d25ac7173a5fd992ed1bd309216a505178 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/2675dd2af0d87249968094bd6c1eb5d25ac7173a5fd992ed1bd309216a505178/userdata/oci-log --conmon-pidfile /run/ceph-f3005f84-239a-55b6-a948-8f1fb592b920@haproxy.nfs.cephfs.compute-0.yeogal.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 2675dd2af0d87249968094bd6c1eb5d25ac7173a5fd992ed1bd309216a505178

Jan 23 10:21:59 compute-0 ceph-f3005f84-239a-55b6-a948-8f1fb592b920-haproxy-nfs-cephfs-compute-0-yeogal[95465]: [WARNING] 022/102159 (4) : Server backend/nfs.cephfs.1 is DOWN, reason: Layer4 connection problem, info: "Connection refused", check duration: 0ms. 2 active and 0 backup servers left. 0 sessions active, 0 requeued, 0 remaining in queue.
Jan 23 10:22:19 compute-0 ceph-f3005f84-239a-55b6-a948-8f1fb592b920-haproxy-nfs-cephfs-compute-0-yeogal[95465]: [WARNING] 022/102219 (4) : Server backend/nfs.cephfs.1 is UP, reason: Layer4 check passed, check duration: 0ms. 3 active and 0 backup servers online. 0 sessions requeued, 0 total in queue.
Jan 23 10:22:51 compute-0 ceph-f3005f84-239a-55b6-a948-8f1fb592b920-haproxy-nfs-cephfs-compute-0-yeogal[95465]: [WARNING] 022/102251 (4) : Server backend/nfs.cephfs.1 is DOWN, reason: Layer4 connection problem, info: "Connection refused", check duration: 0ms. 2 active and 0 backup servers left. 0 sessions active, 0 requeued, 0 remaining in queue.
Jan 23 10:23:11 compute-0 ceph-f3005f84-239a-55b6-a948-8f1fb592b920-haproxy-nfs-cephfs-compute-0-yeogal[95465]: [WARNING] 022/102311 (4) : Server backend/nfs.cephfs.1 is UP, reason: Layer4 check passed, check duration: 0ms. 3 active and 0 backup servers online. 0 sessions requeued, 0 total in queue.
Jan 23 10:23:32 compute-0 ceph-f3005f84-239a-55b6-a948-8f1fb592b920-haproxy-nfs-cephfs-compute-0-yeogal[95465]: [WARNING] 022/102332 (4) : Server backend/nfs.cephfs.0 is DOWN, reason: Layer4 connection problem, info: "Connection refused", check duration: 1ms. 2 active and 0 backup servers left. 0 sessions active, 0 requeued, 0 remaining in queue.
Jan 23 10:23:54 compute-0 ceph-f3005f84-239a-55b6-a948-8f1fb592b920-haproxy-nfs-cephfs-compute-0-yeogal[95465]: [WARNING] 022/102354 (4) : Server backend/nfs.cephfs.0 is UP, reason: Layer4 check passed, check duration: 0ms. 3 active and 0 backup servers online. 0 sessions requeued, 0 total in queue.
Jan 23 10:24:17 compute-0 ceph-f3005f84-239a-55b6-a948-8f1fb592b920-haproxy-nfs-cephfs-compute-0-yeogal[95465]: [WARNING] 022/102417 (4) : Server backend/nfs.cephfs.1 is DOWN, reason: Layer4 connection problem, info: "Connection refused", check duration: 0ms. 2 active and 0 backup servers left. 0 sessions active, 0 requeued, 0 remaining in queue.
Jan 23 10:24:28 compute-0 ceph-f3005f84-239a-55b6-a948-8f1fb592b920-haproxy-nfs-cephfs-compute-0-yeogal[95465]: [WARNING] 022/102428 (4) : Server backend/nfs.cephfs.0 is DOWN, reason: Layer4 connection problem, info: "Connection refused", check duration: 0ms. 1 active and 0 backup servers left. 0 sessions active, 0 requeued, 0 remaining in queue.
Jan 23 10:25:47 compute-0 ceph-f3005f84-239a-55b6-a948-8f1fb592b920-haproxy-nfs-cephfs-compute-0-yeogal[95465]: [WARNING] 022/102547 (4) : Server backend/nfs.cephfs.2 is DOWN, reason: Layer4 connection problem, info: "Connection refused", check duration: 0ms. 0 active and 0 backup servers left. 0 sessions active, 0 requeued, 0 remaining in queue.
Jan 23 10:25:47 compute-0 ceph-f3005f84-239a-55b6-a948-8f1fb592b920-haproxy-nfs-cephfs-compute-0-yeogal[95465]: [ALERT] 022/102547 (4) : backend 'backend' has no server available!

● ceph-f3005f84-239a-55b6-a948-8f1fb592b920@haproxy.rgw.default.compute-0.qabsws.service - Ceph haproxy.rgw.default.compute-0.qabsws for f3005f84-239a-55b6-a948-8f1fb592b920
     Loaded: loaded (/etc/systemd/system/ceph-f3005f84-239a-55b6-a948-8f1fb592b920@.service; enabled; preset: disabled)
     Active: active (running) since Fri 2026-01-23 09:53:58 UTC; 47min ago
   Main PID: 97804 (conmon)
         IO: 0B read, 162.0K written
      Tasks: 11 (limit: 48560)
     Memory: 5.8M (peak: 19.8M)
        CPU: 5.083s
     CGroup: /system.slice/system-ceph\x2df3005f84\x2d239a\x2d55b6\x2da948\x2d8f1fb592b920.slice/ceph-f3005f84-239a-55b6-a948-8f1fb592b920@haproxy.rgw.default.compute-0.qabsws.service
             ├─libpod-payload-872bb66aeaa09288ead0a99e17c29682960b011c0b0f7af2b0513c1ab79aba61
             │ ├─97806 /run/podman-init -- docker-entrypoint.sh haproxy -f /var/lib/haproxy/haproxy.cfg
             │ ├─97808 haproxy -W -db -f /var/lib/haproxy/haproxy.cfg
             │ └─97810 haproxy -W -db -f /var/lib/haproxy/haproxy.cfg
             └─runtime
               └─97804 /usr/bin/conmon --api-version 1 -c 872bb66aeaa09288ead0a99e17c29682960b011c0b0f7af2b0513c1ab79aba61 -u 872bb66aeaa09288ead0a99e17c29682960b011c0b0f7af2b0513c1ab79aba61 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/872bb66aeaa09288ead0a99e17c29682960b011c0b0f7af2b0513c1ab79aba61/userdata -p /run/containers/storage/overlay-containers/872bb66aeaa09288ead0a99e17c29682960b011c0b0f7af2b0513c1ab79aba61/userdata/pidfile -n ceph-f3005f84-239a-55b6-a948-8f1fb592b920-haproxy-rgw-default-compute-0-qabsws --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/872bb66aeaa09288ead0a99e17c29682960b011c0b0f7af2b0513c1ab79aba61 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/872bb66aeaa09288ead0a99e17c29682960b011c0b0f7af2b0513c1ab79aba61/userdata/oci-log --conmon-pidfile /run/ceph-f3005f84-239a-55b6-a948-8f1fb592b920@haproxy.rgw.default.compute-0.qabsws.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 872bb66aeaa09288ead0a99e17c29682960b011c0b0f7af2b0513c1ab79aba61

Jan 23 09:53:57 compute-0 systemd[1]: Starting Ceph haproxy.rgw.default.compute-0.qabsws for f3005f84-239a-55b6-a948-8f1fb592b920...
Jan 23 09:53:57 compute-0 podman[97788]: 2026-01-23 09:53:57.940528837 +0000 UTC m=+0.041943151 container create 872bb66aeaa09288ead0a99e17c29682960b011c0b0f7af2b0513c1ab79aba61 (image=quay.io/ceph/haproxy:2.3, name=ceph-f3005f84-239a-55b6-a948-8f1fb592b920-haproxy-rgw-default-compute-0-qabsws)
Jan 23 09:53:57 compute-0 podman[97788]: 2026-01-23 09:53:57.9978881 +0000 UTC m=+0.099302434 container init 872bb66aeaa09288ead0a99e17c29682960b011c0b0f7af2b0513c1ab79aba61 (image=quay.io/ceph/haproxy:2.3, name=ceph-f3005f84-239a-55b6-a948-8f1fb592b920-haproxy-rgw-default-compute-0-qabsws)
Jan 23 09:53:58 compute-0 podman[97788]: 2026-01-23 09:53:58.003084342 +0000 UTC m=+0.104498646 container start 872bb66aeaa09288ead0a99e17c29682960b011c0b0f7af2b0513c1ab79aba61 (image=quay.io/ceph/haproxy:2.3, name=ceph-f3005f84-239a-55b6-a948-8f1fb592b920-haproxy-rgw-default-compute-0-qabsws)
Jan 23 09:53:58 compute-0 bash[97788]: 872bb66aeaa09288ead0a99e17c29682960b011c0b0f7af2b0513c1ab79aba61
Jan 23 09:53:58 compute-0 podman[97788]: 2026-01-23 09:53:57.922902454 +0000 UTC m=+0.024316788 image pull e85424b0d443f37ddd2dd8a3bb2ef6f18dd352b987723a921b64289023af2914 quay.io/ceph/haproxy:2.3
Jan 23 09:53:58 compute-0 systemd[1]: Started Ceph haproxy.rgw.default.compute-0.qabsws for f3005f84-239a-55b6-a948-8f1fb592b920.
Jan 23 09:53:58 compute-0 ceph-f3005f84-239a-55b6-a948-8f1fb592b920-haproxy-rgw-default-compute-0-qabsws[97804]: [NOTICE] 022/095358 (2) : New worker #1 (4) forked

● ceph-f3005f84-239a-55b6-a948-8f1fb592b920@keepalived.nfs.cephfs.compute-0.lrsdkc.service - Ceph keepalived.nfs.cephfs.compute-0.lrsdkc for f3005f84-239a-55b6-a948-8f1fb592b920
     Loaded: loaded (/etc/systemd/system/ceph-f3005f84-239a-55b6-a948-8f1fb592b920@.service; enabled; preset: disabled)
     Active: active (running) since Fri 2026-01-23 09:53:25 UTC; 48min ago
   Main PID: 96148 (conmon)
         IO: 9.9M read, 179.0K written
      Tasks: 4 (limit: 48560)
     Memory: 12.8M (peak: 22.2M)
        CPU: 12.861s
     CGroup: /system.slice/system-ceph\x2df3005f84\x2d239a\x2d55b6\x2da948\x2d8f1fb592b920.slice/ceph-f3005f84-239a-55b6-a948-8f1fb592b920@keepalived.nfs.cephfs.compute-0.lrsdkc.service
             ├─libpod-payload-4783fa4a0e03b39b894c380a1696ad8cf3e72c4e94fd2480817944beb7891609
             │ ├─96150 /run/podman-init -- ./init.sh
             │ ├─96152 /usr/sbin/keepalived -n -l -f /etc/keepalived/keepalived.conf
             │ └─96154 /usr/sbin/keepalived -n -l -f /etc/keepalived/keepalived.conf
             └─runtime
               └─96148 /usr/bin/conmon --api-version 1 -c 4783fa4a0e03b39b894c380a1696ad8cf3e72c4e94fd2480817944beb7891609 -u 4783fa4a0e03b39b894c380a1696ad8cf3e72c4e94fd2480817944beb7891609 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/4783fa4a0e03b39b894c380a1696ad8cf3e72c4e94fd2480817944beb7891609/userdata -p /run/containers/storage/overlay-containers/4783fa4a0e03b39b894c380a1696ad8cf3e72c4e94fd2480817944beb7891609/userdata/pidfile -n ceph-f3005f84-239a-55b6-a948-8f1fb592b920-keepalived-nfs-cephfs-compute-0-lrsdkc --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/4783fa4a0e03b39b894c380a1696ad8cf3e72c4e94fd2480817944beb7891609 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/4783fa4a0e03b39b894c380a1696ad8cf3e72c4e94fd2480817944beb7891609/userdata/oci-log --conmon-pidfile /run/ceph-f3005f84-239a-55b6-a948-8f1fb592b920@keepalived.nfs.cephfs.compute-0.lrsdkc.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 4783fa4a0e03b39b894c380a1696ad8cf3e72c4e94fd2480817944beb7891609

Jan 23 09:53:25 compute-0 ceph-f3005f84-239a-55b6-a948-8f1fb592b920-keepalived-nfs-cephfs-compute-0-lrsdkc[96148]: Fri Jan 23 09:53:25 2026: Configuration file /etc/keepalived/keepalived.conf
Jan 23 09:53:25 compute-0 ceph-f3005f84-239a-55b6-a948-8f1fb592b920-keepalived-nfs-cephfs-compute-0-lrsdkc[96148]: Fri Jan 23 09:53:25 2026: NOTICE: setting config option max_auto_priority should result in better keepalived performance
Jan 23 09:53:25 compute-0 ceph-f3005f84-239a-55b6-a948-8f1fb592b920-keepalived-nfs-cephfs-compute-0-lrsdkc[96148]: Fri Jan 23 09:53:25 2026: Starting VRRP child process, pid=4
Jan 23 09:53:25 compute-0 ceph-f3005f84-239a-55b6-a948-8f1fb592b920-keepalived-nfs-cephfs-compute-0-lrsdkc[96148]: Fri Jan 23 09:53:25 2026: Startup complete
Jan 23 09:53:25 compute-0 ceph-f3005f84-239a-55b6-a948-8f1fb592b920-keepalived-nfs-cephfs-compute-0-lrsdkc[96148]: Fri Jan 23 09:53:25 2026: (VI_0) Entering BACKUP STATE (init)
Jan 23 09:53:25 compute-0 ceph-f3005f84-239a-55b6-a948-8f1fb592b920-keepalived-nfs-cephfs-compute-0-lrsdkc[96148]: Fri Jan 23 09:53:25 2026: VRRP_Script(check_backend) succeeded
Jan 23 09:53:29 compute-0 ceph-f3005f84-239a-55b6-a948-8f1fb592b920-keepalived-nfs-cephfs-compute-0-lrsdkc[96148]: Fri Jan 23 09:53:29 2026: (VI_0) Entering MASTER STATE
Jan 23 09:53:35 compute-0 ceph-f3005f84-239a-55b6-a948-8f1fb592b920-keepalived-nfs-cephfs-compute-0-lrsdkc[96148]: Fri Jan 23 09:53:35 2026: (VI_0) Received advert from 192.168.122.102 with lower priority 90, ours 100, forcing new election
Jan 23 09:54:01 compute-0 ceph-f3005f84-239a-55b6-a948-8f1fb592b920-keepalived-nfs-cephfs-compute-0-lrsdkc[96148]: Fri Jan 23 09:54:01 2026: (VI_0) Entering BACKUP STATE
Jan 23 09:54:02 compute-0 ceph-f3005f84-239a-55b6-a948-8f1fb592b920-keepalived-nfs-cephfs-compute-0-lrsdkc[96148]: Fri Jan 23 09:54:02 2026: (VI_0) Entering MASTER STATE

● ceph-f3005f84-239a-55b6-a948-8f1fb592b920@keepalived.rgw.default.compute-0.tytkrd.service - Ceph keepalived.rgw.default.compute-0.tytkrd for f3005f84-239a-55b6-a948-8f1fb592b920
     Loaded: loaded (/etc/systemd/system/ceph-f3005f84-239a-55b6-a948-8f1fb592b920@.service; enabled; preset: disabled)
     Active: active (running) since Fri 2026-01-23 09:54:01 UTC; 47min ago
   Main PID: 98094 (conmon)
         IO: 4.2M read, 183.5K written
      Tasks: 4 (limit: 48560)
     Memory: 7.1M (peak: 23.3M)
        CPU: 12.081s
     CGroup: /system.slice/system-ceph\x2df3005f84\x2d239a\x2d55b6\x2da948\x2d8f1fb592b920.slice/ceph-f3005f84-239a-55b6-a948-8f1fb592b920@keepalived.rgw.default.compute-0.tytkrd.service
             ├─libpod-payload-fb69e42829d45e3b674ff9bd3f3333c8c90dc07a3801eda65c7a0ef9a0f84b50
             │ ├─98096 /run/podman-init -- ./init.sh
             │ ├─98098 /usr/sbin/keepalived -n -l -f /etc/keepalived/keepalived.conf
             │ └─98100 /usr/sbin/keepalived -n -l -f /etc/keepalived/keepalived.conf
             └─runtime
               └─98094 /usr/bin/conmon --api-version 1 -c fb69e42829d45e3b674ff9bd3f3333c8c90dc07a3801eda65c7a0ef9a0f84b50 -u fb69e42829d45e3b674ff9bd3f3333c8c90dc07a3801eda65c7a0ef9a0f84b50 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/fb69e42829d45e3b674ff9bd3f3333c8c90dc07a3801eda65c7a0ef9a0f84b50/userdata -p /run/containers/storage/overlay-containers/fb69e42829d45e3b674ff9bd3f3333c8c90dc07a3801eda65c7a0ef9a0f84b50/userdata/pidfile -n ceph-f3005f84-239a-55b6-a948-8f1fb592b920-keepalived-rgw-default-compute-0-tytkrd --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/fb69e42829d45e3b674ff9bd3f3333c8c90dc07a3801eda65c7a0ef9a0f84b50 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/fb69e42829d45e3b674ff9bd3f3333c8c90dc07a3801eda65c7a0ef9a0f84b50/userdata/oci-log --conmon-pidfile /run/ceph-f3005f84-239a-55b6-a948-8f1fb592b920@keepalived.rgw.default.compute-0.tytkrd.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg fb69e42829d45e3b674ff9bd3f3333c8c90dc07a3801eda65c7a0ef9a0f84b50

Jan 23 09:54:01 compute-0 ceph-f3005f84-239a-55b6-a948-8f1fb592b920-keepalived-rgw-default-compute-0-tytkrd[98094]: Fri Jan 23 09:54:01 2026: Running on Linux 5.14.0-661.el9.x86_64 #1 SMP PREEMPT_DYNAMIC Fri Jan 16 09:19:22 UTC 2026 (built for Linux 5.14.0)
Jan 23 09:54:01 compute-0 ceph-f3005f84-239a-55b6-a948-8f1fb592b920-keepalived-rgw-default-compute-0-tytkrd[98094]: Fri Jan 23 09:54:01 2026: Command line: '/usr/sbin/keepalived' '-n' '-l' '-f' '/etc/keepalived/keepalived.conf'
Jan 23 09:54:01 compute-0 ceph-f3005f84-239a-55b6-a948-8f1fb592b920-keepalived-rgw-default-compute-0-tytkrd[98094]: Fri Jan 23 09:54:01 2026: Configuration file /etc/keepalived/keepalived.conf
Jan 23 09:54:01 compute-0 ceph-f3005f84-239a-55b6-a948-8f1fb592b920-keepalived-rgw-default-compute-0-tytkrd[98094]: Fri Jan 23 09:54:01 2026: Failed to bind to process monitoring socket - errno 98 - Address already in use
Jan 23 09:54:01 compute-0 ceph-f3005f84-239a-55b6-a948-8f1fb592b920-keepalived-rgw-default-compute-0-tytkrd[98094]: Fri Jan 23 09:54:01 2026: NOTICE: setting config option max_auto_priority should result in better keepalived performance
Jan 23 09:54:01 compute-0 ceph-f3005f84-239a-55b6-a948-8f1fb592b920-keepalived-rgw-default-compute-0-tytkrd[98094]: Fri Jan 23 09:54:01 2026: Starting VRRP child process, pid=4
Jan 23 09:54:01 compute-0 ceph-f3005f84-239a-55b6-a948-8f1fb592b920-keepalived-rgw-default-compute-0-tytkrd[98094]: Fri Jan 23 09:54:01 2026: Startup complete
Jan 23 09:54:01 compute-0 ceph-f3005f84-239a-55b6-a948-8f1fb592b920-keepalived-rgw-default-compute-0-tytkrd[98094]: Fri Jan 23 09:54:01 2026: (VI_0) Entering BACKUP STATE (init)
Jan 23 09:54:01 compute-0 ceph-f3005f84-239a-55b6-a948-8f1fb592b920-keepalived-rgw-default-compute-0-tytkrd[98094]: Fri Jan 23 09:54:01 2026: VRRP_Script(check_backend) succeeded
Jan 23 09:54:05 compute-0 ceph-f3005f84-239a-55b6-a948-8f1fb592b920-keepalived-rgw-default-compute-0-tytkrd[98094]: Fri Jan 23 09:54:05 2026: (VI_0) Entering MASTER STATE

● ceph-f3005f84-239a-55b6-a948-8f1fb592b920@mds.cephfs.compute-0.ymknms.service - Ceph mds.cephfs.compute-0.ymknms for f3005f84-239a-55b6-a948-8f1fb592b920
     Loaded: loaded (/etc/systemd/system/ceph-f3005f84-239a-55b6-a948-8f1fb592b920@.service; enabled; preset: disabled)
     Active: active (running) since Fri 2026-01-23 09:52:33 UTC; 48min ago
   Main PID: 94624 (conmon)
         IO: 0B read, 219.0K written
      Tasks: 18 (limit: 48560)
     Memory: 27.2M (peak: 27.7M)
        CPU: 2.087s
     CGroup: /system.slice/system-ceph\x2df3005f84\x2d239a\x2d55b6\x2da948\x2d8f1fb592b920.slice/ceph-f3005f84-239a-55b6-a948-8f1fb592b920@mds.cephfs.compute-0.ymknms.service
             ├─libpod-payload-e4542c7adce0f518b7f99d99679470337d13a224c21a637a7c2e54819c64d093
             │ ├─94626 /run/podman-init -- /usr/bin/ceph-mds -n mds.cephfs.compute-0.ymknms -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ └─94628 /usr/bin/ceph-mds -n mds.cephfs.compute-0.ymknms -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             └─runtime
               └─94624 /usr/bin/conmon --api-version 1 -c e4542c7adce0f518b7f99d99679470337d13a224c21a637a7c2e54819c64d093 -u e4542c7adce0f518b7f99d99679470337d13a224c21a637a7c2e54819c64d093 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/e4542c7adce0f518b7f99d99679470337d13a224c21a637a7c2e54819c64d093/userdata -p /run/containers/storage/overlay-containers/e4542c7adce0f518b7f99d99679470337d13a224c21a637a7c2e54819c64d093/userdata/pidfile -n ceph-f3005f84-239a-55b6-a948-8f1fb592b920-mds-cephfs-compute-0-ymknms --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/e4542c7adce0f518b7f99d99679470337d13a224c21a637a7c2e54819c64d093 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/e4542c7adce0f518b7f99d99679470337d13a224c21a637a7c2e54819c64d093/userdata/oci-log --conmon-pidfile /run/ceph-f3005f84-239a-55b6-a948-8f1fb592b920@mds.cephfs.compute-0.ymknms.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg e4542c7adce0f518b7f99d99679470337d13a224c21a637a7c2e54819c64d093

Jan 23 10:40:53 compute-0 ceph-mds[94628]: mds.cephfs.compute-0.ymknms Can't run that command on an inactive MDS!
Jan 23 10:40:54 compute-0 ceph-mds[94628]: mds.cephfs.compute-0.ymknms asok_command: dump_ops_in_flight {prefix=dump_ops_in_flight} (starting...)
Jan 23 10:40:54 compute-0 ceph-mds[94628]: mds.cephfs.compute-0.ymknms Can't run that command on an inactive MDS!
Jan 23 10:40:54 compute-0 ceph-mds[94628]: mds.cephfs.compute-0.ymknms asok_command: get subtrees {prefix=get subtrees} (starting...)
Jan 23 10:40:54 compute-0 ceph-mds[94628]: mds.cephfs.compute-0.ymknms Can't run that command on an inactive MDS!
Jan 23 10:40:54 compute-0 ceph-mds[94628]: mds.cephfs.compute-0.ymknms asok_command: ops {prefix=ops} (starting...)
Jan 23 10:40:54 compute-0 ceph-mds[94628]: mds.cephfs.compute-0.ymknms Can't run that command on an inactive MDS!
Jan 23 10:40:55 compute-0 ceph-mds[94628]: mds.cephfs.compute-0.ymknms asok_command: session ls {prefix=session ls} (starting...)
Jan 23 10:40:55 compute-0 ceph-mds[94628]: mds.cephfs.compute-0.ymknms Can't run that command on an inactive MDS!
Jan 23 10:40:55 compute-0 ceph-mds[94628]: mds.cephfs.compute-0.ymknms asok_command: status {prefix=status} (starting...)

● ceph-f3005f84-239a-55b6-a948-8f1fb592b920@mgr.compute-0.nbdygh.service - Ceph mgr.compute-0.nbdygh for f3005f84-239a-55b6-a948-8f1fb592b920
     Loaded: loaded (/etc/systemd/system/ceph-f3005f84-239a-55b6-a948-8f1fb592b920@.service; enabled; preset: disabled)
     Active: active (running) since Fri 2026-01-23 09:47:45 UTC; 53min ago
   Main PID: 74629 (conmon)
         IO: 0B read, 3.2M written
      Tasks: 176 (limit: 48560)
     Memory: 565.4M (peak: 566.6M)
        CPU: 2min 6.111s
     CGroup: /system.slice/system-ceph\x2df3005f84\x2d239a\x2d55b6\x2da948\x2d8f1fb592b920.slice/ceph-f3005f84-239a-55b6-a948-8f1fb592b920@mgr.compute-0.nbdygh.service
             ├─libpod-payload-e4a1c45f747e69af65041011d00875cfaaf16149f31875bd1585747dd24058b3
             │ ├─74631 /run/podman-init -- /usr/bin/ceph-mgr -n mgr.compute-0.nbdygh -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ └─74633 /usr/bin/ceph-mgr -n mgr.compute-0.nbdygh -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             └─runtime
               └─74629 /usr/bin/conmon --api-version 1 -c e4a1c45f747e69af65041011d00875cfaaf16149f31875bd1585747dd24058b3 -u e4a1c45f747e69af65041011d00875cfaaf16149f31875bd1585747dd24058b3 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/e4a1c45f747e69af65041011d00875cfaaf16149f31875bd1585747dd24058b3/userdata -p /run/containers/storage/overlay-containers/e4a1c45f747e69af65041011d00875cfaaf16149f31875bd1585747dd24058b3/userdata/pidfile -n ceph-f3005f84-239a-55b6-a948-8f1fb592b920-mgr-compute-0-nbdygh --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/e4a1c45f747e69af65041011d00875cfaaf16149f31875bd1585747dd24058b3 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/e4a1c45f747e69af65041011d00875cfaaf16149f31875bd1585747dd24058b3/userdata/oci-log --conmon-pidfile /run/ceph-f3005f84-239a-55b6-a948-8f1fb592b920@mgr.compute-0.nbdygh.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg e4a1c45f747e69af65041011d00875cfaaf16149f31875bd1585747dd24058b3

Jan 23 10:41:23 compute-0 ceph-mgr[74633]: [pg_autoscaler INFO root] Pool 'default.rgw.control' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Jan 23 10:41:23 compute-0 ceph-mgr[74633]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Jan 23 10:41:23 compute-0 ceph-mgr[74633]: [pg_autoscaler INFO root] Pool 'default.rgw.meta' root_id -1 using 1.2718141564107572e-07 of space, bias 4.0, pg target 0.00015261769876929088 quantized to 32 (current 32)
Jan 23 10:41:24 compute-0 ceph-mgr[74633]: log_channel(audit) log [DBG] : from='client.27722 -' entity='client.admin' cmd=[{"prefix": "pg dump", "target": ["mon-mgr", ""]}]: dispatch
Jan 23 10:41:24 compute-0 ceph-mgr[74633]: log_channel(audit) log [DBG] : from='client.27728 -' entity='client.admin' cmd=[{"prefix": "pg stat", "target": ["mon-mgr", ""]}]: dispatch
Jan 23 10:41:24 compute-0 ceph-mgr[74633]: log_channel(audit) log [DBG] : from='client.18573 -' entity='client.admin' cmd=[{"prefix": "osd blocked-by", "target": ["mon-mgr", ""], "format": "json-pretty"}]: dispatch
Jan 23 10:41:25 compute-0 ceph-mgr[74633]: log_channel(audit) log [DBG] : from='client.18585 -' entity='client.admin' cmd=[{"prefix": "osd df", "output_method": "tree", "target": ["mon-mgr", ""], "format": "json-pretty"}]: dispatch
Jan 23 10:41:25 compute-0 ceph-mgr[74633]: log_channel(cluster) log [DBG] : pgmap v1454: 353 pgs: 353 active+clean; 41 MiB data, 255 MiB used, 60 GiB / 60 GiB avail; 511 B/s rd, 0 op/s
Jan 23 10:41:26 compute-0 ceph-mgr[74633]: log_channel(audit) log [DBG] : from='client.28078 -' entity='client.admin' cmd=[{"prefix": "pg dump", "target": ["mon-mgr", ""]}]: dispatch
Jan 23 10:41:26 compute-0 ceph-mgr[74633]: log_channel(audit) log [DBG] : from='client.18594 -' entity='client.admin' cmd=[{"prefix": "osd df", "target": ["mon-mgr", ""], "format": "json-pretty"}]: dispatch

● ceph-f3005f84-239a-55b6-a948-8f1fb592b920@mon.compute-0.service - Ceph mon.compute-0 for f3005f84-239a-55b6-a948-8f1fb592b920
     Loaded: loaded (/etc/systemd/system/ceph-f3005f84-239a-55b6-a948-8f1fb592b920@.service; enabled; preset: disabled)
     Active: active (running) since Fri 2026-01-23 09:47:41 UTC; 53min ago
   Main PID: 74331 (conmon)
         IO: 3.1M read, 538.2M written
      Tasks: 27 (limit: 48560)
     Memory: 110.5M (peak: 126.9M)
        CPU: 1min 3.617s
     CGroup: /system.slice/system-ceph\x2df3005f84\x2d239a\x2d55b6\x2da948\x2d8f1fb592b920.slice/ceph-f3005f84-239a-55b6-a948-8f1fb592b920@mon.compute-0.service
             ├─libpod-payload-cbfd7f9a2ad9887ed3adf829b401ebf60f670e8aa91916ede409f75a12aeb3e6
             │ ├─74333 /run/podman-init -- /usr/bin/ceph-mon -n mon.compute-0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false
             │ └─74335 /usr/bin/ceph-mon -n mon.compute-0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false
             └─runtime
               └─74331 /usr/bin/conmon --api-version 1 -c cbfd7f9a2ad9887ed3adf829b401ebf60f670e8aa91916ede409f75a12aeb3e6 -u cbfd7f9a2ad9887ed3adf829b401ebf60f670e8aa91916ede409f75a12aeb3e6 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/cbfd7f9a2ad9887ed3adf829b401ebf60f670e8aa91916ede409f75a12aeb3e6/userdata -p /run/containers/storage/overlay-containers/cbfd7f9a2ad9887ed3adf829b401ebf60f670e8aa91916ede409f75a12aeb3e6/userdata/pidfile -n ceph-f3005f84-239a-55b6-a948-8f1fb592b920-mon-compute-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/cbfd7f9a2ad9887ed3adf829b401ebf60f670e8aa91916ede409f75a12aeb3e6 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/cbfd7f9a2ad9887ed3adf829b401ebf60f670e8aa91916ede409f75a12aeb3e6/userdata/oci-log --conmon-pidfile /run/ceph-f3005f84-239a-55b6-a948-8f1fb592b920@mon.compute-0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg cbfd7f9a2ad9887ed3adf829b401ebf60f670e8aa91916ede409f75a12aeb3e6

Jan 23 10:41:24 compute-0 ceph-mon[74335]: from='client.? 192.168.122.101:0/2473056180' entity='client.admin' cmd=[{"prefix": "osd stat"}]: dispatch
Jan 23 10:41:24 compute-0 ceph-mon[74335]: from='client.18549 -' entity='client.admin' cmd=[{"prefix": "fs status", "target": ["mon-mgr", ""], "format": "json-pretty"}]: dispatch
Jan 23 10:41:24 compute-0 ceph-mon[74335]: pgmap v1453: 353 pgs: 353 active+clean; 41 MiB data, 255 MiB used, 60 GiB / 60 GiB avail; 608 B/s rd, 0 op/s
Jan 23 10:41:24 compute-0 ceph-mon[74335]: from='client.28048 -' entity='client.admin' cmd=[{"prefix": "osd pool autoscale-status", "target": ["mon-mgr", ""]}]: dispatch
Jan 23 10:41:24 compute-0 ceph-mon[74335]: from='client.? 192.168.122.100:0/1229505767' entity='client.admin' cmd=[{"prefix": "mds stat", "format": "json-pretty"}]: dispatch
Jan 23 10:41:24 compute-0 ceph-mon[74335]: from='client.27722 -' entity='client.admin' cmd=[{"prefix": "pg dump", "target": ["mon-mgr", ""]}]: dispatch
Jan 23 10:41:24 compute-0 ceph-mon[74335]: from='client.? 192.168.122.100:0/1649330852' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json-pretty"}]: dispatch
Jan 23 10:41:24 compute-0 ceph-mon[74335]: from='client.? 192.168.122.102:0/1616323667' entity='client.admin' cmd=[{"prefix": "osd pool ls", "detail": "detail"}]: dispatch
Jan 23 10:41:25 compute-0 ceph-mon[74335]: mon.compute-0@0(leader) e3 handle_command mon_command({"prefix": "osd blocklist ls", "format": "json-pretty"} v 0)
Jan 23 10:41:25 compute-0 ceph-mon[74335]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/467195774' entity='client.admin' cmd=[{"prefix": "osd blocklist ls", "format": "json-pretty"}]: dispatch

× ceph-f3005f84-239a-55b6-a948-8f1fb592b920@nfs.cephfs.2.0.compute-0.fenqiu.service - Ceph nfs.cephfs.2.0.compute-0.fenqiu for f3005f84-239a-55b6-a948-8f1fb592b920
     Loaded: loaded (/etc/systemd/system/ceph-f3005f84-239a-55b6-a948-8f1fb592b920@.service; enabled; preset: disabled)
     Active: failed (Result: exit-code) since Fri 2026-01-23 10:25:53 UTC; 15min ago
   Duration: 4min 36.844s
    Process: 262715 ExecStartPre=/bin/rm -f /run/ceph-f3005f84-239a-55b6-a948-8f1fb592b920@nfs.cephfs.2.0.compute-0.fenqiu.service-pid /run/ceph-f3005f84-239a-55b6-a948-8f1fb592b920@nfs.cephfs.2.0.compute-0.fenqiu.service-cid (code=exited, status=0/SUCCESS)
    Process: 262716 ExecStart=/bin/bash /var/lib/ceph/f3005f84-239a-55b6-a948-8f1fb592b920/nfs.cephfs.2.0.compute-0.fenqiu/unit.run (code=exited, status=0/SUCCESS)
    Process: 267509 ExecStopPost=/bin/bash /var/lib/ceph/f3005f84-239a-55b6-a948-8f1fb592b920/nfs.cephfs.2.0.compute-0.fenqiu/unit.poststop (code=exited, status=0/SUCCESS)
    Process: 267536 ExecStopPost=/bin/rm -f /run/ceph-f3005f84-239a-55b6-a948-8f1fb592b920@nfs.cephfs.2.0.compute-0.fenqiu.service-pid /run/ceph-f3005f84-239a-55b6-a948-8f1fb592b920@nfs.cephfs.2.0.compute-0.fenqiu.service-cid (code=exited, status=0/SUCCESS)
   Main PID: 262782 (code=exited, status=139)
        CPU: 1.979s

Jan 23 10:25:53 compute-0 systemd[1]: ceph-f3005f84-239a-55b6-a948-8f1fb592b920@nfs.cephfs.2.0.compute-0.fenqiu.service: Scheduled restart job, restart counter is at 12.
Jan 23 10:25:53 compute-0 systemd[1]: Stopped Ceph nfs.cephfs.2.0.compute-0.fenqiu for f3005f84-239a-55b6-a948-8f1fb592b920.
Jan 23 10:25:53 compute-0 systemd[1]: ceph-f3005f84-239a-55b6-a948-8f1fb592b920@nfs.cephfs.2.0.compute-0.fenqiu.service: Consumed 1.979s CPU time.
Jan 23 10:25:53 compute-0 systemd[1]: ceph-f3005f84-239a-55b6-a948-8f1fb592b920@nfs.cephfs.2.0.compute-0.fenqiu.service: Start request repeated too quickly.
Jan 23 10:25:53 compute-0 systemd[1]: ceph-f3005f84-239a-55b6-a948-8f1fb592b920@nfs.cephfs.2.0.compute-0.fenqiu.service: Failed with result 'exit-code'.
Jan 23 10:25:53 compute-0 systemd[1]: Failed to start Ceph nfs.cephfs.2.0.compute-0.fenqiu for f3005f84-239a-55b6-a948-8f1fb592b920.

● ceph-f3005f84-239a-55b6-a948-8f1fb592b920@node-exporter.compute-0.service - Ceph node-exporter.compute-0 for f3005f84-239a-55b6-a948-8f1fb592b920
     Loaded: loaded (/etc/systemd/system/ceph-f3005f84-239a-55b6-a948-8f1fb592b920@.service; enabled; preset: disabled)
     Active: active (running) since Fri 2026-01-23 09:51:16 UTC; 50min ago
   Main PID: 89471 (conmon)
         IO: 0B read, 24.8M written
      Tasks: 7 (limit: 48560)
     Memory: 34.8M (peak: 84.6M)
        CPU: 11.167s
     CGroup: /system.slice/system-ceph\x2df3005f84\x2d239a\x2d55b6\x2da948\x2d8f1fb592b920.slice/ceph-f3005f84-239a-55b6-a948-8f1fb592b920@node-exporter.compute-0.service
             ├─libpod-payload-97848d12ab6322cfe3cc805f7972048af088977ee2e693bcc0a5bb581613a0d8
             │ ├─89474 /run/podman-init -- /bin/node_exporter --no-collector.timex --web.listen-address=:9100 --path.procfs=/host/proc --path.sysfs=/host/sys --path.rootfs=/rootfs
             │ └─89477 /bin/node_exporter --no-collector.timex --web.listen-address=:9100 --path.procfs=/host/proc --path.sysfs=/host/sys --path.rootfs=/rootfs
             └─runtime
               └─89471 /usr/bin/conmon --api-version 1 -c 97848d12ab6322cfe3cc805f7972048af088977ee2e693bcc0a5bb581613a0d8 -u 97848d12ab6322cfe3cc805f7972048af088977ee2e693bcc0a5bb581613a0d8 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/97848d12ab6322cfe3cc805f7972048af088977ee2e693bcc0a5bb581613a0d8/userdata -p /run/containers/storage/overlay-containers/97848d12ab6322cfe3cc805f7972048af088977ee2e693bcc0a5bb581613a0d8/userdata/pidfile -n ceph-f3005f84-239a-55b6-a948-8f1fb592b920-node-exporter-compute-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/97848d12ab6322cfe3cc805f7972048af088977ee2e693bcc0a5bb581613a0d8 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/97848d12ab6322cfe3cc805f7972048af088977ee2e693bcc0a5bb581613a0d8/userdata/oci-log --conmon-pidfile /run/ceph-f3005f84-239a-55b6-a948-8f1fb592b920@node-exporter.compute-0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 97848d12ab6322cfe3cc805f7972048af088977ee2e693bcc0a5bb581613a0d8

Jan 23 09:51:16 compute-0 ceph-f3005f84-239a-55b6-a948-8f1fb592b920-node-exporter-compute-0[89471]: ts=2026-01-23T09:51:16.568Z caller=node_exporter.go:117 level=info collector=thermal_zone
Jan 23 09:51:16 compute-0 ceph-f3005f84-239a-55b6-a948-8f1fb592b920-node-exporter-compute-0[89471]: ts=2026-01-23T09:51:16.568Z caller=node_exporter.go:117 level=info collector=time
Jan 23 09:51:16 compute-0 ceph-f3005f84-239a-55b6-a948-8f1fb592b920-node-exporter-compute-0[89471]: ts=2026-01-23T09:51:16.568Z caller=node_exporter.go:117 level=info collector=udp_queues
Jan 23 09:51:16 compute-0 ceph-f3005f84-239a-55b6-a948-8f1fb592b920-node-exporter-compute-0[89471]: ts=2026-01-23T09:51:16.568Z caller=node_exporter.go:117 level=info collector=uname
Jan 23 09:51:16 compute-0 ceph-f3005f84-239a-55b6-a948-8f1fb592b920-node-exporter-compute-0[89471]: ts=2026-01-23T09:51:16.568Z caller=node_exporter.go:117 level=info collector=vmstat
Jan 23 09:51:16 compute-0 ceph-f3005f84-239a-55b6-a948-8f1fb592b920-node-exporter-compute-0[89471]: ts=2026-01-23T09:51:16.568Z caller=node_exporter.go:117 level=info collector=xfs
Jan 23 09:51:16 compute-0 ceph-f3005f84-239a-55b6-a948-8f1fb592b920-node-exporter-compute-0[89471]: ts=2026-01-23T09:51:16.568Z caller=node_exporter.go:117 level=info collector=zfs
Jan 23 09:51:16 compute-0 ceph-f3005f84-239a-55b6-a948-8f1fb592b920-node-exporter-compute-0[89471]: ts=2026-01-23T09:51:16.569Z caller=tls_config.go:274 level=info msg="Listening on" address=[::]:9100
Jan 23 09:51:16 compute-0 ceph-f3005f84-239a-55b6-a948-8f1fb592b920-node-exporter-compute-0[89471]: ts=2026-01-23T09:51:16.569Z caller=tls_config.go:277 level=info msg="TLS is disabled." http2=false address=[::]:9100
Jan 23 09:51:16 compute-0 systemd[1]: Started Ceph node-exporter.compute-0 for f3005f84-239a-55b6-a948-8f1fb592b920.

● ceph-f3005f84-239a-55b6-a948-8f1fb592b920@osd.1.service - Ceph osd.1 for f3005f84-239a-55b6-a948-8f1fb592b920
     Loaded: loaded (/etc/systemd/system/ceph-f3005f84-239a-55b6-a948-8f1fb592b920@.service; enabled; preset: disabled)
     Active: active (running) since Fri 2026-01-23 09:49:39 UTC; 51min ago
   Main PID: 82637 (conmon)
         IO: 138.0M read, 1.7G written
      Tasks: 60 (limit: 48560)
     Memory: 540.4M (peak: 582.1M)
        CPU: 35.510s
     CGroup: /system.slice/system-ceph\x2df3005f84\x2d239a\x2d55b6\x2da948\x2d8f1fb592b920.slice/ceph-f3005f84-239a-55b6-a948-8f1fb592b920@osd.1.service
             ├─libpod-payload-ba38de35226506cb699780f729ec895e86e90cec52f99c13abc1fc038212a39b
             │ ├─82639 /run/podman-init -- /usr/bin/ceph-osd -n osd.1 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ └─82641 /usr/bin/ceph-osd -n osd.1 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             └─runtime
               └─82637 /usr/bin/conmon --api-version 1 -c ba38de35226506cb699780f729ec895e86e90cec52f99c13abc1fc038212a39b -u ba38de35226506cb699780f729ec895e86e90cec52f99c13abc1fc038212a39b -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/ba38de35226506cb699780f729ec895e86e90cec52f99c13abc1fc038212a39b/userdata -p /run/containers/storage/overlay-containers/ba38de35226506cb699780f729ec895e86e90cec52f99c13abc1fc038212a39b/userdata/pidfile -n ceph-f3005f84-239a-55b6-a948-8f1fb592b920-osd-1 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/ba38de35226506cb699780f729ec895e86e90cec52f99c13abc1fc038212a39b --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/ba38de35226506cb699780f729ec895e86e90cec52f99c13abc1fc038212a39b/userdata/oci-log --conmon-pidfile /run/ceph-f3005f84-239a-55b6-a948-8f1fb592b920@osd.1.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg ba38de35226506cb699780f729ec895e86e90cec52f99c13abc1fc038212a39b

Jan 23 10:40:59 compute-0 ceph-osd[82641]: monclient: tick
Jan 23 10:40:59 compute-0 ceph-osd[82641]: monclient: _check_auth_tickets
Jan 23 10:40:59 compute-0 ceph-osd[82641]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2026-01-23T10:40:27.347629+0000)
Jan 23 10:40:59 compute-0 ceph-osd[82641]: prioritycache tune_memory target: 4294967296 mapped: 117145600 unmapped: 26787840 heap: 143933440 old mem: 2845415832 new mem: 2845415832
Jan 23 10:40:59 compute-0 ceph-osd[82641]: osd.1 146 heartbeat osd_stat(store_statfs(0x4fac33000/0x0/0x4ffc00000, data 0x56c43f/0x629000, compress 0x0/0x0/0x0, omap 0x63b, meta 0x499f9c5), peers [0,2] op hist [])
Jan 23 10:40:59 compute-0 ceph-osd[82641]: monclient: tick
Jan 23 10:40:59 compute-0 ceph-osd[82641]: monclient: _check_auth_tickets
Jan 23 10:40:59 compute-0 ceph-osd[82641]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2026-01-23T10:40:28.347754+0000)
Jan 23 10:40:59 compute-0 ceph-osd[82641]: prioritycache tune_memory target: 4294967296 mapped: 117489664 unmapped: 26443776 heap: 143933440 old mem: 2845415832 new mem: 2845415832
Jan 23 10:40:59 compute-0 ceph-osd[82641]: do_command 'log dump' '{prefix=log dump}'

● ceph-f3005f84-239a-55b6-a948-8f1fb592b920@prometheus.compute-0.service - Ceph prometheus.compute-0 for f3005f84-239a-55b6-a948-8f1fb592b920
     Loaded: loaded (/etc/systemd/system/ceph-f3005f84-239a-55b6-a948-8f1fb592b920@.service; enabled; preset: disabled)
     Active: active (running) since Fri 2026-01-23 09:54:11 UTC; 47min ago
   Main PID: 98641 (conmon)
         IO: 15.2M read, 15.3M written
      Tasks: 16 (limit: 48560)
     Memory: 80.8M (peak: 95.2M)
        CPU: 14.477s
     CGroup: /system.slice/system-ceph\x2df3005f84\x2d239a\x2d55b6\x2da948\x2d8f1fb592b920.slice/ceph-f3005f84-239a-55b6-a948-8f1fb592b920@prometheus.compute-0.service
             ├─libpod-payload-8d18c97a753c8610913f4f1be41e91ee0fa0045d3fad5bb17483ebd74168eedf
             │ ├─98643 /run/podman-init -- /bin/prometheus --config.file=/etc/prometheus/prometheus.yml --storage.tsdb.path=/prometheus --storage.tsdb.retention.time=15d --storage.tsdb.retention.size=0 --web.external-url=http://compute-0.ctlplane.example.com:9095 --web.listen-address=192.168.122.100:9095
             │ └─98645 /bin/prometheus --config.file=/etc/prometheus/prometheus.yml --storage.tsdb.path=/prometheus --storage.tsdb.retention.time=15d --storage.tsdb.retention.size=0 --web.external-url=http://compute-0.ctlplane.example.com:9095 --web.listen-address=192.168.122.100:9095
             └─runtime
               └─98641 /usr/bin/conmon --api-version 1 -c 8d18c97a753c8610913f4f1be41e91ee0fa0045d3fad5bb17483ebd74168eedf -u 8d18c97a753c8610913f4f1be41e91ee0fa0045d3fad5bb17483ebd74168eedf -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/8d18c97a753c8610913f4f1be41e91ee0fa0045d3fad5bb17483ebd74168eedf/userdata -p /run/containers/storage/overlay-containers/8d18c97a753c8610913f4f1be41e91ee0fa0045d3fad5bb17483ebd74168eedf/userdata/pidfile -n ceph-f3005f84-239a-55b6-a948-8f1fb592b920-prometheus-compute-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/8d18c97a753c8610913f4f1be41e91ee0fa0045d3fad5bb17483ebd74168eedf --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/8d18c97a753c8610913f4f1be41e91ee0fa0045d3fad5bb17483ebd74168eedf/userdata/oci-log --conmon-pidfile /run/ceph-f3005f84-239a-55b6-a948-8f1fb592b920@prometheus.compute-0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 8d18c97a753c8610913f4f1be41e91ee0fa0045d3fad5bb17483ebd74168eedf

Jan 23 09:54:11 compute-0 ceph-f3005f84-239a-55b6-a948-8f1fb592b920-prometheus-compute-0[98641]: ts=2026-01-23T09:54:11.100Z caller=head.go:698 level=info component=tsdb msg="On-disk memory mappable chunks replay completed" duration=2.08µs
Jan 23 09:54:11 compute-0 ceph-f3005f84-239a-55b6-a948-8f1fb592b920-prometheus-compute-0[98641]: ts=2026-01-23T09:54:11.100Z caller=head.go:706 level=info component=tsdb msg="Replaying WAL, this may take a while"
Jan 23 09:54:11 compute-0 ceph-f3005f84-239a-55b6-a948-8f1fb592b920-prometheus-compute-0[98641]: ts=2026-01-23T09:54:11.101Z caller=head.go:778 level=info component=tsdb msg="WAL segment loaded" segment=0 maxSegment=0
Jan 23 09:54:11 compute-0 ceph-f3005f84-239a-55b6-a948-8f1fb592b920-prometheus-compute-0[98641]: ts=2026-01-23T09:54:11.101Z caller=head.go:815 level=info component=tsdb msg="WAL replay completed" checkpoint_replay_duration=32.131µs wal_replay_duration=505.014µs wbl_replay_duration=160ns total_replay_duration=561.576µs
Jan 23 09:54:11 compute-0 ceph-f3005f84-239a-55b6-a948-8f1fb592b920-prometheus-compute-0[98641]: ts=2026-01-23T09:54:11.102Z caller=main.go:1150 level=info fs_type=XFS_SUPER_MAGIC
Jan 23 09:54:11 compute-0 ceph-f3005f84-239a-55b6-a948-8f1fb592b920-prometheus-compute-0[98641]: ts=2026-01-23T09:54:11.102Z caller=main.go:1153 level=info msg="TSDB started"
Jan 23 09:54:11 compute-0 ceph-f3005f84-239a-55b6-a948-8f1fb592b920-prometheus-compute-0[98641]: ts=2026-01-23T09:54:11.102Z caller=main.go:1335 level=info msg="Loading configuration file" filename=/etc/prometheus/prometheus.yml
Jan 23 09:54:11 compute-0 ceph-f3005f84-239a-55b6-a948-8f1fb592b920-prometheus-compute-0[98641]: ts=2026-01-23T09:54:11.127Z caller=main.go:1372 level=info msg="Completed loading of configuration file" filename=/etc/prometheus/prometheus.yml totalDuration=24.654921ms db_storage=1.29µs remote_storage=2.35µs web_handler=540ns query_engine=13.501µs scrape=3.145806ms scrape_sd=138.834µs notify=18.321µs notify_sd=11.67µs rules=20.728453ms tracing=14.71µs
Jan 23 09:54:11 compute-0 ceph-f3005f84-239a-55b6-a948-8f1fb592b920-prometheus-compute-0[98641]: ts=2026-01-23T09:54:11.127Z caller=main.go:1114 level=info msg="Server is ready to receive web requests."
Jan 23 09:54:11 compute-0 ceph-f3005f84-239a-55b6-a948-8f1fb592b920-prometheus-compute-0[98641]: ts=2026-01-23T09:54:11.127Z caller=manager.go:163 level=info component="rule manager" msg="Starting rule manager..."

● ceph-f3005f84-239a-55b6-a948-8f1fb592b920@rgw.rgw.compute-0.jbpfwf.service - Ceph rgw.rgw.compute-0.jbpfwf for f3005f84-239a-55b6-a948-8f1fb592b920
     Loaded: loaded (/etc/systemd/system/ceph-f3005f84-239a-55b6-a948-8f1fb592b920@.service; enabled; preset: disabled)
     Active: active (running) since Fri 2026-01-23 09:52:22 UTC; 49min ago
   Main PID: 93744 (conmon)
         IO: 832.0K read, 3.3M written
      Tasks: 613 (limit: 48560)
     Memory: 122.5M (peak: 123.4M)
        CPU: 25.666s
     CGroup: /system.slice/system-ceph\x2df3005f84\x2d239a\x2d55b6\x2da948\x2d8f1fb592b920.slice/ceph-f3005f84-239a-55b6-a948-8f1fb592b920@rgw.rgw.compute-0.jbpfwf.service
             ├─libpod-payload-318a53d5c542e8639ae8cbe910fcc9d6c8a7c7006a978c4655d1d2582222973a
             │ ├─93746 /run/podman-init -- /usr/bin/radosgw -n client.rgw.rgw.compute-0.jbpfwf -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ └─93748 /usr/bin/radosgw -n client.rgw.rgw.compute-0.jbpfwf -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             └─runtime
               └─93744 /usr/bin/conmon --api-version 1 -c 318a53d5c542e8639ae8cbe910fcc9d6c8a7c7006a978c4655d1d2582222973a -u 318a53d5c542e8639ae8cbe910fcc9d6c8a7c7006a978c4655d1d2582222973a -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/318a53d5c542e8639ae8cbe910fcc9d6c8a7c7006a978c4655d1d2582222973a/userdata -p /run/containers/storage/overlay-containers/318a53d5c542e8639ae8cbe910fcc9d6c8a7c7006a978c4655d1d2582222973a/userdata/pidfile -n ceph-f3005f84-239a-55b6-a948-8f1fb592b920-rgw-rgw-compute-0-jbpfwf --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/318a53d5c542e8639ae8cbe910fcc9d6c8a7c7006a978c4655d1d2582222973a --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/318a53d5c542e8639ae8cbe910fcc9d6c8a7c7006a978c4655d1d2582222973a/userdata/oci-log --conmon-pidfile /run/ceph-f3005f84-239a-55b6-a948-8f1fb592b920@rgw.rgw.compute-0.jbpfwf.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 318a53d5c542e8639ae8cbe910fcc9d6c8a7c7006a978c4655d1d2582222973a

Jan 23 10:41:22 compute-0 radosgw[93748]: beast: 0x7fa5c588c5d0: 192.168.122.102 - anonymous [23/Jan/2026:10:41:22.455 +0000] "HEAD / HTTP/1.0" 200 0 - - - latency=0.000000000s
Jan 23 10:41:23 compute-0 radosgw[93748]: ====== starting new request req=0x7fa5c588c5d0 =====
Jan 23 10:41:23 compute-0 radosgw[93748]: ====== req done req=0x7fa5c588c5d0 op status=0 http_status=200 latency=0.002000058s ======
Jan 23 10:41:23 compute-0 radosgw[93748]: beast: 0x7fa5c588c5d0: 192.168.122.100 - anonymous [23/Jan/2026:10:41:23.373 +0000] "HEAD / HTTP/1.0" 200 0 - - - latency=0.002000058s
Jan 23 10:41:24 compute-0 radosgw[93748]: ====== starting new request req=0x7fa5c588c5d0 =====
Jan 23 10:41:24 compute-0 radosgw[93748]: ====== req done req=0x7fa5c588c5d0 op status=0 http_status=200 latency=0.000000000s ======
Jan 23 10:41:24 compute-0 radosgw[93748]: beast: 0x7fa5c588c5d0: 192.168.122.102 - anonymous [23/Jan/2026:10:41:24.457 +0000] "HEAD / HTTP/1.0" 200 0 - - - latency=0.000000000s
Jan 23 10:41:25 compute-0 radosgw[93748]: ====== starting new request req=0x7fa5c588c5d0 =====
Jan 23 10:41:25 compute-0 radosgw[93748]: ====== req done req=0x7fa5c588c5d0 op status=0 http_status=200 latency=0.000000000s ======
Jan 23 10:41:25 compute-0 radosgw[93748]: beast: 0x7fa5c588c5d0: 192.168.122.100 - anonymous [23/Jan/2026:10:41:25.376 +0000] "HEAD / HTTP/1.0" 200 0 - - - latency=0.000000000s

● ceph-osd-losetup-0.service - Ceph OSD losetup
     Loaded: loaded (/etc/systemd/system/ceph-osd-losetup-0.service; enabled; preset: disabled)
     Active: active (exited) since Fri 2026-01-23 09:46:33 UTC; 54min ago
   Main PID: 72399 (code=exited, status=0/SUCCESS)
        CPU: 21ms

Jan 23 09:46:33 compute-0 systemd[1]: Starting Ceph OSD losetup...
Jan 23 09:46:33 compute-0 bash[72400]: /dev/loop3: [64513]:4328449 (/var/lib/ceph-osd-0.img)
Jan 23 09:46:33 compute-0 systemd[1]: Finished Ceph OSD losetup.

● chronyd.service - NTP client/server
     Loaded: loaded (/usr/lib/systemd/system/chronyd.service; enabled; preset: enabled)
     Active: active (running) since Fri 2026-01-23 09:44:21 UTC; 57min ago
       Docs: man:chronyd(8)
             man:chrony.conf(5)
   Main PID: 58433 (chronyd)
         IO: 0B read, 0B written
      Tasks: 1 (limit: 48560)
     Memory: 1020.0K (peak: 1.9M)
        CPU: 77ms
     CGroup: /system.slice/chronyd.service
             └─58433 /usr/sbin/chronyd -F 2

Jan 23 09:44:21 compute-0 systemd[1]: Starting NTP client/server...
Jan 23 09:44:21 compute-0 chronyd[58433]: chronyd version 4.8 starting (+CMDMON +REFCLOCK +RTC +PRIVDROP +SCFILTER +SIGND +NTS +SECHASH +IPV6 +DEBUG)
Jan 23 09:44:21 compute-0 chronyd[58433]: Frequency -28.544 +/- 0.222 ppm read from /var/lib/chrony/drift
Jan 23 09:44:21 compute-0 chronyd[58433]: Loaded seccomp filter (level 2)
Jan 23 09:44:21 compute-0 systemd[1]: Started NTP client/server.
Jan 23 09:46:31 compute-0 chronyd[58433]: Selected source 167.160.187.179 (pool.ntp.org)

● cloud-config.service - Cloud-init: Config Stage
     Loaded: loaded (/usr/lib/systemd/system/cloud-config.service; enabled; preset: disabled)
     Active: active (exited) since Fri 2026-01-23 09:01:30 UTC; 1h 39min ago
   Main PID: 1000 (code=exited, status=0/SUCCESS)
        CPU: 412ms

Jan 23 09:01:30 np0005593293.novalocal systemd[1]: Starting Cloud-init: Config Stage...
Jan 23 09:01:30 np0005593293.novalocal cloud-init[1147]: Cloud-init v. 24.4-8.el9 running 'modules:config' at Fri, 23 Jan 2026 09:01:30 +0000. Up 12.16 seconds.
Jan 23 09:01:30 np0005593293.novalocal systemd[1]: Finished Cloud-init: Config Stage.

● cloud-final.service - Cloud-init: Final Stage
     Loaded: loaded (/usr/lib/systemd/system/cloud-final.service; enabled; preset: disabled)
     Active: active (exited) since Fri 2026-01-23 09:01:31 UTC; 1h 39min ago
   Main PID: 1210 (code=exited, status=0/SUCCESS)
        CPU: 512ms

Jan 23 09:01:31 np0005593293.novalocal cloud-init[1310]: Cloud-init v. 24.4-8.el9 running 'modules:final' at Fri, 23 Jan 2026 09:01:30 +0000. Up 12.56 seconds.
Jan 23 09:01:31 np0005593293.novalocal cloud-init[1343]: #############################################################
Jan 23 09:01:31 np0005593293.novalocal cloud-init[1348]: -----BEGIN SSH HOST KEY FINGERPRINTS-----
Jan 23 09:01:31 np0005593293.novalocal cloud-init[1356]: 256 SHA256:miLeFBmA+MsGxU8XSCXX2tzFYSlAjlwKVaJr1qwMkok root@np0005593293.novalocal (ECDSA)
Jan 23 09:01:31 np0005593293.novalocal cloud-init[1359]: 256 SHA256:b0o3optgTJFv6mmeQoOvX31kKYaaUgPCCmDUcQEZQds root@np0005593293.novalocal (ED25519)
Jan 23 09:01:31 np0005593293.novalocal cloud-init[1361]: 3072 SHA256:Zv5Nklgu5Px7Z2L6MoVKKy0OyX+BRyE7O8mBGzaGMew root@np0005593293.novalocal (RSA)
Jan 23 09:01:31 np0005593293.novalocal cloud-init[1362]: -----END SSH HOST KEY FINGERPRINTS-----
Jan 23 09:01:31 np0005593293.novalocal cloud-init[1363]: #############################################################
Jan 23 09:01:31 np0005593293.novalocal cloud-init[1310]: Cloud-init v. 24.4-8.el9 finished at Fri, 23 Jan 2026 09:01:31 +0000. Datasource DataSourceConfigDrive [net,ver=2][source=/dev/sr0].  Up 12.75 seconds
Jan 23 09:01:31 np0005593293.novalocal systemd[1]: Finished Cloud-init: Final Stage.

● cloud-init-local.service - Cloud-init: Local Stage (pre-network)
     Loaded: loaded (/usr/lib/systemd/system/cloud-init-local.service; enabled; preset: disabled)
     Active: active (exited) since Fri 2026-01-23 09:01:24 UTC; 1h 40min ago
   Main PID: 776 (code=exited, status=0/SUCCESS)
        CPU: 695ms

Jan 23 09:01:23 localhost systemd[1]: Starting Cloud-init: Local Stage (pre-network)...
Jan 23 09:01:24 localhost cloud-init[836]: Cloud-init v. 24.4-8.el9 running 'init-local' at Fri, 23 Jan 2026 09:01:24 +0000. Up 5.87 seconds.
Jan 23 09:01:24 np0005593293.novalocal systemd[1]: Finished Cloud-init: Local Stage (pre-network).

● cloud-init.service - Cloud-init: Network Stage
     Loaded: loaded (/usr/lib/systemd/system/cloud-init.service; enabled; preset: disabled)
     Active: active (exited) since Fri 2026-01-23 09:01:30 UTC; 1h 39min ago
   Main PID: 898 (code=exited, status=0/SUCCESS)
        CPU: 1.157s

Jan 23 09:01:30 np0005593293.novalocal cloud-init[918]: |+  o+.           |
Jan 23 09:01:30 np0005593293.novalocal cloud-init[918]: |+. .oE           |
Jan 23 09:01:30 np0005593293.novalocal cloud-init[918]: |+.   +   .       |
Jan 23 09:01:30 np0005593293.novalocal cloud-init[918]: |..o o = S        Unit display-manager.service could not be found.
|
Jan 23 09:01:30 np0005593293.novalocal cloud-init[918]: |..o* = + .       |
Jan 23 09:01:30 np0005593293.novalocal cloud-init[918]: |.oo.* . + =      |
Jan 23 09:01:30 np0005593293.novalocal cloud-init[918]: | .o+.+ = = .     |
Jan 23 09:01:30 np0005593293.novalocal cloud-init[918]: |.o.+= +..        |
Jan 23 09:01:30 np0005593293.novalocal cloud-init[918]: +----[SHA256]-----+
Jan 23 09:01:30 np0005593293.novalocal systemd[1]: Finished Cloud-init: Network Stage.

○ cpupower.service - Configure CPU power related settings
     Loaded: loaded (/usr/lib/systemd/system/cpupower.service; disabled; preset: disabled)
     Active: inactive (dead)

● crond.service - Command Scheduler
     Loaded: loaded (/usr/lib/systemd/system/crond.service; enabled; preset: enabled)
     Active: active (running) since Fri 2026-01-23 09:01:30 UTC; 1h 39min ago
   Main PID: 1008 (crond)
         IO: 160.0K read, 0B written
      Tasks: 2 (limit: 48560)
     Memory: 1.5M (peak: 4.3M)
        CPU: 117ms
     CGroup: /system.slice/crond.service
             ├─  1008 /usr/sbin/crond -n
             └─147517 /usr/sbin/anacron -s

Jan 23 09:01:30 np0005593293.novalocal crond[1008]: (CRON) INFO (RANDOM_DELAY will be scaled with factor 50% if used.)
Jan 23 09:01:30 np0005593293.novalocal crond[1008]: (CRON) INFO (running with inotify support)
Jan 23 10:01:01 compute-0 CROND[147492]: (root) CMD (run-parts /etc/cron.hourly)
Jan 23 10:01:01 compute-0 anacron[147517]: Anacron started on 2026-01-23
Jan 23 10:01:01 compute-0 anacron[147517]: Will run job `cron.daily' in 45 min.
Jan 23 10:01:01 compute-0 anacron[147517]: Will run job `cron.weekly' in 65 min.
Jan 23 10:01:01 compute-0 anacron[147517]: Will run job `cron.monthly' in 85 min.
Jan 23 10:01:01 compute-0 anacron[147517]: Jobs will be executed sequentially
Jan 23 10:01:01 compute-0 run-parts[147521]: (/etc/cron.hourly) finished 0anacron
Jan 23 10:01:01 compute-0 CROND[147486]: (root) CMDEND (run-parts /etc/cron.hourly)

● dbus-broker.service - D-Bus System Message Bus
     Loaded: loaded (/usr/lib/systemd/system/dbus-broker.service; enabled; preset: enabled)
     Active: active (running) since Fri 2026-01-23 09:01:23 UTC; 1h 40min ago
TriggeredBy: ● dbus.socket
       Docs: man:dbus-broker-launch(1)
   Main PID: 765 (dbus-broker-lau)
         IO: 0B read, 0B written
      Tasks: 2 (limit: 48560)
     Memory: 2.9M (peak: 3.7M)
        CPU: 6.825s
     CGroup: /system.slice/dbus-broker.service
             ├─765 /usr/bin/dbus-broker-launch --scope system --audit
             └─769 dbus-broker --log 4 --controller 9 --machine-id 85ac68c10a6e7ae08ceb898dbdca0cb5 --max-bytes 536870912 --max-fds 4096 --max-matches 131072 --audit

Jan 23 09:41:20 compute-0 dbus-broker-launch[765]: Noticed file-system modification, trigger reload.
Jan 23 09:42:17 compute-0 dbus-broker-launch[769]: avc:  op=load_policy lsm=selinux seqno=7 res=1
Jan 23 09:42:32 compute-0 dbus-broker-launch[769]: avc:  op=load_policy lsm=selinux seqno=8 res=1
Jan 23 10:00:20 compute-0 dbus-broker-launch[769]: avc:  op=load_policy lsm=selinux seqno=9 res=1
Jan 23 10:04:26 compute-0 dbus-broker-launch[769]: avc:  op=load_policy lsm=selinux seqno=10 res=1
Jan 23 10:04:39 compute-0 dbus-broker-launch[769]: avc:  op=load_policy lsm=selinux seqno=11 res=1
Jan 23 10:05:36 compute-0 dbus-broker-launch[769]: avc:  op=load_policy lsm=selinux seqno=12 res=1
Jan 23 10:05:36 compute-0 dbus-broker-launch[765]: Noticed file-system modification, trigger reload.
Jan 23 10:05:36 compute-0 dbus-broker-launch[765]: Noticed file-system modification, trigger reload.
Jan 23 10:07:06 compute-0 dbus-broker-launch[769]: avc:  op=load_policy lsm=selinux seqno=13 res=1

○ dm-event.service - Device-mapper event daemon
     Loaded: loaded (/usr/lib/systemd/system/dm-event.service; static)
     Active: inactive (dead)
TriggeredBy: ● dm-event.socket
       Docs: man:dmeventd(8)

○ dnf-makecache.service - dnf makecache
     Loaded: loaded (/usr/lib/systemd/system/dnf-makecache.service; static)
     Active: inactive (dead) since Fri 2026-01-23 09:14:51 UTC; 1h 26min ago
TriggeredBy: ● dnf-makecache.timer
   Main PID: 7479 (code=exited, status=0/SUCCESS)
        CPU: 803ms

Jan 23 09:14:49 np0005593293.novalocal systemd[1]: Starting dnf makecache...
Jan 23 09:14:50 np0005593293.novalocal dnf[7479]: Failed determining last makecache time.
Jan 23 09:14:50 np0005593293.novalocal dnf[7479]: CentOS Stream 9 - BaseOS                         50 kB/s | 6.7 kB     00:00
Jan 23 09:14:50 np0005593293.novalocal dnf[7479]: CentOS Stream 9 - AppStream                      68 kB/s | 6.8 kB     00:00
Jan 23 09:14:50 np0005593293.novalocal dnf[7479]: CentOS Stream 9 - CRB                            54 kB/s | 6.6 kB     00:00
Jan 23 09:14:50 np0005593293.novalocal dnf[7479]: CentOS Stream 9 - Extras packages                55 kB/s | 7.3 kB     00:00
Jan 23 09:14:51 np0005593293.novalocal dnf[7479]: Metadata cache created.
Jan 23 09:14:51 np0005593293.novalocal systemd[1]: dnf-makecache.service: Deactivated successfully.
Jan 23 09:14:51 np0005593293.novalocal systemd[1]: Finished dnf makecache.

○ dracut-cmdline.service - dracut cmdline hook
     Loaded: loaded (/usr/lib/systemd/system/dracut-cmdline.service; static)
     Active: inactive (dead) since Fri 2026-01-23 09:01:22 UTC; 1h 40min ago
   Duration: 1.690s
       Docs: man:dracut-cmdline.service(8)
             man:dracut.bootup(7)
   Main PID: 323 (code=exited, status=0/SUCCESS)
        CPU: 144ms

Jan 23 09:01:20 localhost systemd[1]: Starting dracut cmdline hook...
Jan 23 09:01:20 localhost dracut-cmdline[323]: dracut-9 dracut-057-102.git20250818.el9
Jan 23 09:01:20 localhost dracut-cmdline[323]: Using kernel command line parameters:    BOOT_IMAGE=(hd0,msdos1)/boot/vmlinuz-5.14.0-661.el9.x86_64 root=UUID=22ac9141-3960-4912-b20e-19fc8a328d40 ro console=ttyS0,115200n8 no_timer_check net.ifnames=0 crashkernel=1G-2G:192M,2G-64G:256M,64G-:512M
Jan 23 09:01:20 localhost systemd[1]: Finished dracut cmdline hook.
Jan 23 09:01:22 localhost systemd[1]: dracut-cmdline.service: Deactivated successfully.
Jan 23 09:01:22 localhost systemd[1]: Stopped dracut cmdline hook.

○ dracut-initqueue.service - dracut initqueue hook
     Loaded: loaded (/usr/lib/systemd/system/dracut-initqueue.service; static)
     Active: inactive (dead) since Fri 2026-01-23 09:01:22 UTC; 1h 40min ago
   Duration: 790ms
       Docs: man:dracut-initqueue.service(8)
             man:dracut.bootup(7)
   Main PID: 497 (code=exited, status=0/SUCCESS)
        CPU: 29ms

Jan 23 09:01:20 localhost systemd[1]: Starting dracut initqueue hook...
Jan 23 09:01:21 localhost systemd[1]: Finished dracut initqueue hook.
Jan 23 09:01:22 localhost systemd[1]: dracut-initqueue.service: Deactivated successfully.
Jan 23 09:01:22 localhost systemd[1]: Stopped dracut initqueue hook.

○ dracut-mount.service - dracut mount hook
     Loaded: loaded (/usr/lib/systemd/system/dracut-mount.service; static)
     Active: inactive (dead) since Fri 2026-01-23 09:01:22 UTC; 1h 40min ago
   Duration: 153ms
       Docs: man:dracut-mount.service(8)
             man:dracut.bootup(7)
   Main PID: 568 (code=exited, status=0/SUCCESS)
        CPU: 17ms

Jan 23 09:01:21 localhost systemd[1]: Starting dracut mount hook...
Jan 23 09:01:21 localhost systemd[1]: Finished dracut mount hook.
Jan 23 09:01:22 localhost systemd[1]: dracut-mount.service: Deactivated successfully.
Jan 23 09:01:22 localhost systemd[1]: Stopped dracut mount hook.

○ dracut-pre-mount.service - dracut pre-mount hook
     Loaded: loaded (/usr/lib/systemd/system/dracut-pre-mount.service; static)
     Active: inactive (dead) since Fri 2026-01-23 09:01:22 UTC; 1h 40min ago
   Duration: 754ms
       Docs: man:dracut-pre-mount.service(8)
             man:dracut.bootup(7)
   Main PID: 546 (code=exited, status=0/SUCCESS)
        CPU: 8ms

Jan 23 09:01:21 localhost systemd[1]: Starting dracut pre-mount hook...
Jan 23 09:01:21 localhost systemd[1]: Finished dracut pre-mount hook.
Jan 23 09:01:22 localhost systemd[1]: dracut-pre-mount.service: Deactivated successfully.
Jan 23 09:01:22 localhost systemd[1]: Stopped dracut pre-mount hook.

○ dracut-pre-pivot.service - dracut pre-pivot and cleanup hook
     Loaded: loaded (/usr/lib/systemd/system/dracut-pre-pivot.service; static)
     Active: inactive (dead) since Fri 2026-01-23 09:01:22 UTC; 1h 40min ago
   Duration: 33ms
       Docs: man:dracut-pre-pivot.service(8)
             man:dracut.bootup(7)
   Main PID: 574 (code=exited, status=0/SUCCESS)
        CPU: 89ms

Jan 23 09:01:21 localhost systemd[1]: Starting dracut pre-pivot and cleanup hook...
Jan 23 09:01:22 localhost systemd[1]: Finished dracut pre-pivot and cleanup hook.
Jan 23 09:01:22 localhost systemd[1]: dracut-pre-pivot.service: Deactivated successfully.
Jan 23 09:01:22 localhost systemd[1]: Stopped dracut pre-pivot and cleanup hook.

○ dracut-pre-trigger.service - dracut pre-trigger hook
     Loaded: loaded (/usr/lib/systemd/system/dracut-pre-trigger.service; static)
     Active: inactive (dead) since Fri 2026-01-23 09:01:22 UTC; 1h 40min ago
   Duration: 1.317s
       Docs: man:dracut-pre-trigger.service(8)
             man:dracut.bootup(7)
   Main PID: 463 (code=exited, status=0/SUCCESS)
        CPU: 15ms

Jan 23 09:01:20 localhost systemd[1]: Starting dracut pre-trigger hook...
Jan 23 09:01:20 localhost systemd[1]: Finished dracut pre-trigger hook.
Jan 23 09:01:22 localhost systemd[1]: dracut-pre-trigger.service: Deactivated successfully.
Jan 23 09:01:22 localhost systemd[1]: Stopped dracut pre-trigger hook.

○ dracut-pre-udev.service - dracut pre-udev hook
     Loaded: loaded (/usr/lib/systemd/system/dracut-pre-udev.service; static)
     Active: inactive (dead) since Fri 2026-01-23 09:01:22 UTC; 1h 40min ago
   Duration: 1.402s
       Docs: man:dracut-pre-udev.service(8)
             man:dracut.bootup(7)
   Main PID: 412 (code=exited, status=0/SUCCESS)
        CPU: 274ms

Jan 23 09:01:20 localhost systemd[1]: Starting dracut pre-udev hook...
Jan 23 09:01:20 localhost rpc.statd[440]: Version 2.5.4 starting
Jan 23 09:01:20 localhost rpc.statd[440]: Initializing NSM state
Jan 23 09:01:20 localhost rpc.idmapd[445]: Setting log level to 0
Jan 23 09:01:20 localhost systemd[1]: Finished dracut pre-udev hook.
Jan 23 09:01:22 localhost rpc.idmapd[445]: exiting on signal 15
Jan 23 09:01:22 localhost systemd[1]: dracut-pre-udev.service: Deactivated successfully.
Jan 23 09:01:22 localhost systemd[1]: Stopped dracut pre-udev hook.

○ dracut-shutdown-onfailure.service - Service executing upon dracut-shutdown failure to perform cleanup
     Loaded: loaded (/usr/lib/systemd/system/dracut-shutdown-onfailure.service; static)
     Active: inactive (dead)
       Docs: man:dracut-shutdown.service(8)

● dracut-shutdown.service - Restore /run/initramfs on shutdown
     Loaded: loaded (/usr/lib/systemd/system/dracut-shutdown.service; static)
     Active: active (exited) since Fri 2026-01-23 09:01:23 UTC; 1h 40min ago
       Docs: man:dracut-shutdown.service(8)
   Main PID: 777 (code=exited, status=0/SUCCESS)
        CPU: 3ms

Jan 23 09:01:23 localhost systemd[1]: Starting Restore /run/initramfs on shutdown...
Jan 23 09:01:23 localhost systemd[1]: Finished Restore /run/initramfs on shutdown.

● edpm-container-shutdown.service - EDPM Container Shutdown
     Loaded: loaded (/etc/systemd/system/edpm-container-shutdown.service; enabled; preset: enabled)
     Active: active (exited) since Fri 2026-01-23 09:44:51 UTC; 56min ago
       Docs: https://github.com/openstack-k8s-operators/docs
   Main PID: 61428 (code=exited, status=0/SUCCESS)
        CPU: 2ms

Jan 23 09:44:51 compute-0 systemd[1]: Starting EDPM Container Shutdown...
Jan 23 09:44:51 compute-0 systemd[1]: Finished EDPM Container Shutdown.

○ edpm_libvirt_guests.service - Suspend libvirt Guests in edpm
     Loaded: loaded (/etc/systemd/system/edpm_libvirt_guests.service; enabled; preset: disabled)
     Active: inactive (dead)
       Docs: man:libvirtd(8)
             https://libvirt.org

● edpm_nova_compute.service - nova_compute container
     Loaded: loaded (/etc/systemd/system/edpm_nova_compute.service; enabled; preset: disabled)
     Active: active (running) since Fri 2026-01-23 10:10:54 UTC; 30min ago
    Process: 249214 ExecStart=/usr/bin/podman start nova_compute (code=exited, status=0/SUCCESS)
   Main PID: 249229 (conmon)
         IO: 0B read, 101.0K written
      Tasks: 1 (limit: 48560)
     Memory: 684.0K (peak: 16.6M)
        CPU: 542ms
     CGroup: /system.slice/edpm_nova_compute.service
             └─249229 /usr/bin/conmon --api-version 1 -c 955f9566d05798cdd71546732c1b5a8107d1262ca33e1e62353f97ae0dc2cc2f -u 955f9566d05798cdd71546732c1b5a8107d1262ca33e1e62353f97ae0dc2cc2f -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/955f9566d05798cdd71546732c1b5a8107d1262ca33e1e62353f97ae0dc2cc2f/userdata -p /run/containers/storage/overlay-containers/955f9566d05798cdd71546732c1b5a8107d1262ca33e1e62353f97ae0dc2cc2f/userdata/pidfile -n nova_compute --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/955f9566d05798cdd71546732c1b5a8107d1262ca33e1e62353f97ae0dc2cc2f --full-attach -s -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/955f9566d05798cdd71546732c1b5a8107d1262ca33e1e62353f97ae0dc2cc2f/userdata/oci-log --conmon-pidfile /run/nova_compute.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg 955f9566d05798cdd71546732c1b5a8107d1262ca33e1e62353f97ae0dc2cc2f

Jan 23 10:41:19 compute-0 nova_compute[249229]: 2026-01-23 10:41:19.134 249233 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263[00m
Jan 23 10:41:21 compute-0 nova_compute[249229]: 2026-01-23 10:41:21.717 249233 DEBUG oslo_service.periodic_task [None req-0623b36c-8378-484d-a465-75209495a966 - - - - - -] Running periodic task ComputeManager._heal_instance_info_cache run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210[00m
Jan 23 10:41:21 compute-0 nova_compute[249229]: 2026-01-23 10:41:21.718 249233 DEBUG nova.compute.manager [None req-0623b36c-8378-484d-a465-75209495a966 - - - - - -] Starting heal instance info cache _heal_instance_info_cache /usr/lib/python3.9/site-packages/nova/compute/manager.py:9858[00m
Jan 23 10:41:21 compute-0 nova_compute[249229]: 2026-01-23 10:41:21.718 249233 DEBUG nova.compute.manager [None req-0623b36c-8378-484d-a465-75209495a966 - - - - - -] Rebuilding the list of instances to heal _heal_instance_info_cache /usr/lib/python3.9/site-packages/nova/compute/manager.py:9862[00m
Jan 23 10:41:21 compute-0 nova_compute[249229]: 2026-01-23 10:41:21.963 249233 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263[00m
Jan 23 10:41:22 compute-0 nova_compute[249229]: 2026-01-23 10:41:22.106 249233 DEBUG nova.compute.manager [None req-0623b36c-8378-484d-a465-75209495a966 - - - - - -] Didn't find any instances for network info cache update. _heal_instance_info_cache /usr/lib/python3.9/site-packages/nova/compute/manager.py:9944[00m
Jan 23 10:41:22 compute-0 nova_compute[249229]: 2026-01-23 10:41:22.716 249233 DEBUG oslo_service.periodic_task [None req-0623b36c-8378-484d-a465-75209495a966 - - - - - -] Running periodic task ComputeManager._reclaim_queued_deletes run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210[00m
Jan 23 10:41:22 compute-0 nova_compute[249229]: 2026-01-23 10:41:22.717 249233 DEBUG nova.compute.manager [None req-0623b36c-8378-484d-a465-75209495a966 - - - - - -] CONF.reclaim_instance_interval <= 0, skipping... _reclaim_queued_deletes /usr/lib/python3.9/site-packages/nova/compute/manager.py:10477[00m
Jan 23 10:41:24 compute-0 nova_compute[249229]: 2026-01-23 10:41:24.136 249233 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263[00m
Jan 23 10:41:24 compute-0 nova_compute[249229]: 2026-01-23 10:41:24.717 249233 DEBUG oslo_service.periodic_task [None req-0623b36c-8378-484d-a465-75209495a966 - - - - - -] Running periodic task ComputeManager._poll_unconfirmed_resizes run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210[00m

● edpm_ovn_controller.service - ovn_controller container
     Loaded: loaded (/etc/systemd/system/edpm_ovn_controller.service; enabled; preset: disabled)
     Active: active (running) since Fri 2026-01-23 10:01:36 UTC; 39min ago
   Main PID: 151634 (conmon)
         IO: 0B read, 152.5K written
      Tasks: 1 (limit: 48560)
     Memory: 692.0K (peak: 18.0M)
        CPU: 301ms
     CGroup: /system.slice/edpm_ovn_controller.service
             └─151634 /usr/bin/conmon --api-version 1 -c ffd3fd97eb8ccb69f06ea21df042fbfc8784045b6313bea6a684bfa168f1196d -u ffd3fd97eb8ccb69f06ea21df042fbfc8784045b6313bea6a684bfa168f1196d -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/ffd3fd97eb8ccb69f06ea21df042fbfc8784045b6313bea6a684bfa168f1196d/userdata -p /run/containers/storage/overlay-containers/ffd3fd97eb8ccb69f06ea21df042fbfc8784045b6313bea6a684bfa168f1196d/userdata/pidfile -n ovn_controller --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/ffd3fd97eb8ccb69f06ea21df042fbfc8784045b6313bea6a684bfa168f1196d --full-attach -s -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/ffd3fd97eb8ccb69f06ea21df042fbfc8784045b6313bea6a684bfa168f1196d/userdata/oci-log --conmon-pidfile /run/ovn_controller.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg ffd3fd97eb8ccb69f06ea21df042fbfc8784045b6313bea6a684bfa168f1196d

Jan 23 10:28:13 compute-0 ovn_controller[151634]: 2026-01-23T10:28:13Z|00067|binding|INFO|Releasing lport 5cbba0a0-5f58-4d90-8d1c-814aceb1262d from this chassis (sb_readonly=0)
Jan 23 10:28:31 compute-0 ovn_controller[151634]: 2026-01-23T10:28:31Z|00008|pinctrl(ovn_pinctrl0)|INFO|DHCPOFFER fa:16:3e:07:d3:68 10.100.0.3
Jan 23 10:28:31 compute-0 ovn_controller[151634]: 2026-01-23T10:28:31Z|00009|pinctrl(ovn_pinctrl0)|INFO|DHCPACK fa:16:3e:07:d3:68 10.100.0.3
Jan 23 10:28:37 compute-0 ovn_controller[151634]: 2026-01-23T10:28:37Z|00068|binding|INFO|Releasing lport 5cbba0a0-5f58-4d90-8d1c-814aceb1262d from this chassis (sb_readonly=0)
Jan 23 10:28:37 compute-0 ovn_controller[151634]: 2026-01-23T10:28:37Z|00069|binding|INFO|Releasing lport 5cbba0a0-5f58-4d90-8d1c-814aceb1262d from this chassis (sb_readonly=0)
Jan 23 10:28:39 compute-0 ovn_controller[151634]: 2026-01-23T10:28:39Z|00070|binding|INFO|Releasing lport 5cbba0a0-5f58-4d90-8d1c-814aceb1262d from this chassis (sb_readonly=0)
Jan 23 10:28:40 compute-0 ovn_controller[151634]: 2026-01-23T10:28:40Z|00071|binding|INFO|Releasing lport 5775c66b-2d08-4c9d-83fe-d4c692e19472 from this chassis (sb_readonly=0)
Jan 23 10:28:40 compute-0 ovn_controller[151634]: 2026-01-23T10:28:40Z|00072|binding|INFO|Setting lport 5775c66b-2d08-4c9d-83fe-d4c692e19472 down in Southbound
Jan 23 10:28:40 compute-0 ovn_controller[151634]: 2026-01-23T10:28:40Z|00073|binding|INFO|Removing iface tap5775c66b-2d ovn-installed in OVS
Jan 23 10:29:22 compute-0 ovn_controller[151634]: 2026-01-23T10:29:22Z|00074|memory_trim|INFO|Detected inactivity (last active 30012 ms ago): trimming memory

● edpm_ovn_metadata_agent.service - ovn_metadata_agent container
     Loaded: loaded (/etc/systemd/system/edpm_ovn_metadata_agent.service; enabled; preset: disabled)
     Active: active (running) since Fri 2026-01-23 10:02:57 UTC; 38min ago
   Main PID: 161916 (conmon)
         IO: 0B read, 107.0K written
      Tasks: 1 (limit: 48560)
     Memory: 716.0K (peak: 20.3M)
        CPU: 307ms
     CGroup: /system.slice/edpm_ovn_metadata_agent.service
             └─161916 /usr/bin/conmon --api-version 1 -c 7c4b1914e1e86e16566f40dac1c2043d119deee57a046ec037c84640bd0c067d -u 7c4b1914e1e86e16566f40dac1c2043d119deee57a046ec037c84640bd0c067d -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/7c4b1914e1e86e16566f40dac1c2043d119deee57a046ec037c84640bd0c067d/userdata -p /run/containers/storage/overlay-containers/7c4b1914e1e86e16566f40dac1c2043d119deee57a046ec037c84640bd0c067d/userdata/pidfile -n ovn_metadata_agent --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/7c4b1914e1e86e16566f40dac1c2043d119deee57a046ec037c84640bd0c067d --full-attach -s -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/7c4b1914e1e86e16566f40dac1c2043d119deee57a046ec037c84640bd0c067d/userdata/oci-log --conmon-pidfile /run/ovn_metadata_agent.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg 7c4b1914e1e86e16566f40dac1c2043d119deee57a046ec037c84640bd0c067d

Jan 23 10:37:59 compute-0 ovn_metadata_agent[161916]: 2026-01-23 10:37:59.795 161921 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" "released" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: held 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423[00m
Jan 23 10:38:59 compUnit fcoe.service could not be found.
ute-0 ovn_metadata_agent[161916]: 2026-01-23 10:38:59.795 161921 DEBUG oslo_concurrency.lockutils [-] Acquiring lock "_check_child_processes" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404[00m
Jan 23 10:38:59 compute-0 ovn_metadata_agent[161916]: 2026-01-23 10:38:59.796 161921 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" acquired by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409[00m
Jan 23 10:38:59 compute-0 ovn_metadata_agent[161916]: 2026-01-23 10:38:59.796 161921 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" "released" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: held 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423[00m
Jan 23 10:39:59 compute-0 ovn_metadata_agent[161916]: 2026-01-23 10:39:59.797 161921 DEBUG oslo_concurrency.lockutils [-] Acquiring lock "_check_child_processes" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404[00m
Jan 23 10:39:59 compute-0 ovn_metadata_agent[161916]: 2026-01-23 10:39:59.797 161921 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" acquired by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409[00m
Jan 23 10:39:59 compute-0 ovn_metadata_agent[161916]: 2026-01-23 10:39:59.797 161921 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" "released" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: held 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423[00m
Jan 23 10:40:59 compute-0 ovn_metadata_agent[161916]: 2026-01-23 10:40:59.798 161921 DEBUG oslo_concurrency.lockutils [-] Acquiring lock "_check_child_processes" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404[00m
Jan 23 10:40:59 compute-0 ovn_metadata_agent[161916]: 2026-01-23 10:40:59.799 161921 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" acquired by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409[00m
Jan 23 10:40:59 compute-0 ovn_metadata_agent[161916]: 2026-01-23 10:40:59.799 161921 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" "released" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: held 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423[00m

○ emergency.service - Emergency Shell
     Loaded: loaded (/usr/lib/systemd/system/emergency.service; static)
     Active: inactive (dead)
       Docs: man:sulogin(8)

○ ffd3fd97eb8ccb69f06ea21df042fbfc8784045b6313bea6a684bfa168f1196d-47444066448a439e.service - /usr/bin/podman healthcheck run ffd3fd97eb8ccb69f06ea21df042fbfc8784045b6313bea6a684bfa168f1196d
     Loaded: loaded (/run/systemd/transient/ffd3fd97eb8ccb69f06ea21df042fbfc8784045b6313bea6a684bfa168f1196d-47444066448a439e.service; transient)
  Transient: yes
     Active: inactive (dead) since Fri 2026-01-23 10:41:21 UTC; 5s ago
   Duration: 208ms
TriggeredBy: ● ffd3fd97eb8ccb69f06ea21df042fbfc8784045b6313bea6a684bfa168f1196d-47444066448a439e.timer
    Process: 296590 ExecStart=/usr/bin/podman healthcheck run ffd3fd97eb8ccb69f06ea21df042fbfc8784045b6313bea6a684bfa168f1196d (code=exited, status=0/SUCCESS)
   Main PID: 296590 (code=exited, status=0/SUCCESS)
        CPU: 180ms

Jan 23 10:41:20 compute-0 podman[296590]: 2026-01-23 10:41:20.951425116 +0000 UTC m=+0.089930538 container health_status ffd3fd97eb8ccb69f06ea21df042fbfc8784045b6313bea6a684bfa168f1196d (Unit hv_kvp_daemon.service could not be found.
image=quay.io/podified-antelope-centos9/openstack-ovn-controller@sha256:fa24ce4aa285e3632c86a53e8d0385d4c788d049da42dd06570ad9d44aae00de, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251202, tcib_build_tag=c3923531bcda0b0811b2d5053f189beb, tcib_managed=true, managed_by=edpm_ansible, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, config_id=ovn_controller, container_name=ovn_controller, org.label-schema.name=CentOS Stream 9 Base Image, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'EDPM_CONFIG_HASH': 'cdc8d10f0e05d8a70b43cf26938a886cf76be4340fa6a898edc4cc90e10001b1-99269140098de15b48680c41e5313433c184a4380a28a4d66e6de0ece8f46703-99269140098de15b48680c41e5313433c184a4380a28a4d66e6de0ece8f46703-99269140098de15b48680c41e5313433c184a4380a28a4d66e6de0ece8f46703'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller@sha256:fa24ce4aa285e3632c86a53e8d0385d4c788d049da42dd06570ad9d44aae00de', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, io.buildah.version=1.41.3)

● getty@tty1.service - Getty on tty1
     Loaded: loaded (/usr/lib/systemd/system/getty@.service; enabled; preset: enabled)
     Active: active (running) since Fri 2026-01-23 09:01:30 UTC; 1h 39min ago
       Docs: man:agetty(8)
             man:systemd-getty-generator(8)
             http://0pointer.de/blog/projects/serial-console.html
   Main PID: 1009 (agetty)
         IO: 0B read, 0B written
      Tasks: 1 (limit: 48560)
     Memory: 276.0K (peak: 756.0K)
        CPU: 10ms
     CGroup: /system.slice/system-getty.slice/getty@tty1.service
             └─1009 /sbin/agetty -o "-p -- \\u" --noclear - linux

Jan 23 09:01:30 np0005593293.novalocal systemd[1]: Started Getty on tty1.

● gssproxy.service - GSSAPI Proxy Daemon
     Loaded: loaded (/usr/lib/systemd/system/gssproxy.service; disabled; preset: disabled)
     Active: active (running) since Fri 2026-01-23 09:01:24 UTC; 1h 40min ago
   Main PID: 868 (gssproxy)
         IO: 0B read, 0B written
      Tasks: 6 (limit: 48560)
     Memory: 1.8M (peak: 2.9M)
        CPU: 23ms
     CGroup: /system.slice/gssproxy.service
             └─868 /usr/sbin/gssproxy -D

Jan 23 09:01:24 np0005593293.novalocal systemd[1]: Starting GSSAPI Proxy Daemon...
Jan 23 09:01:24 np0005593293.novalocal systemd[1]: Started GSSAPI Proxy Daemon.

○ import-state.service - Import network configuration from initramfs
     Loaded: loaded (/usr/lib/systemd/system/import-state.service; enabled; preset: enabled)
     Active: inactive (dead)

○ initrd-cleanup.service - Cleaning Up and Shutting Down Daemons
     Loaded: loaded (/usr/lib/systemd/system/initrd-cleanup.service; static)
     Active: inactive (dead) since Fri 2026-01-23 09:01:22 UTC; 1h 40min ago
   Main PID: 616 (code=exited, status=0/SUCCESS)
        CPU: 14ms

Jan 23 09:01:22 localhost systemd[1]: Starting Cleaning Up and Shutting Down Daemons...
Jan 23 09:01:22 localhost systemd[1]: initrd-cleanup.service: Deactivated successfully.
Jan 23 09:01:22 localhost systemd[1]: Finished Cleaning Up and Shutting Down Daemons.

○ initrd-parse-etc.service - Mountpoints Configured in the Real Root
     Loaded: loaded (/usr/lib/systemd/system/initrd-parse-etc.service; static)
     Active: inactive (dead) since Fri 2026-01-23 09:01:21 UTC; 1h 40min ago
   Main PID: 567 (code=exited, status=0/SUCCESS)
        CPU: 15ms

Jan 23 09:01:21 localhost systemd[1]: Starting Mountpoints Configured in the Real Root...
Jan 23 09:01:21 localhost systemd[1]: initrd-parse-etc.service: Deactivated successfully.
Jan 23 09:01:21 localhost systemd[1]: Finished Mountpoints Configured in the Real Root.

○ initrd-switch-root.service - Switch Root
     Loaded: loaded (/usr/lib/systemd/system/initrd-switch-root.service; static)
     Active: inactive (dead) since Fri 2026-01-23 09:01:22 UTC; 1h 40min ago
   Main PID: 619 (code=exited, status=0/SUCCESS)
        CPU: 9ms

Jan 23 09:01:22 localhost systemd[1]: Starting Switch Root...

○ initrd-udevadm-cleanup-db.service - Cleanup udev Database
     Loaded: loaded (/usr/lib/systemd/system/initrd-udevadm-cleanup-db.service; static)
     Active: inactive (dead) since Fri 2026-01-23 09:01:22 UTC; 1h 40min ago
   Main PID: 617 (code=exited, status=0/SUCCESS)
        CPU: 12ms

Jan 23 09:01:22 localhost systemd[1]: Starting Cleanup udev Database...
Jan 23 09:01:22 localhost systemd[1]: initrd-udevadm-cleanup-db.service: Deactivated successfully.
Jan 23 09:01:22 localhost systemd[1]: Finished Cleanup udev Database.

○ ip6tables.service - IPv6 firewall with ip6tables
     Loaded: loaded (/usr/lib/systemd/system/ip6tables.service; disabled; preset: disabled)
     Active: inactive (dead)

○ iptables.service - IPv4 firewall with iptables
     Loaded: loaded (/usr/lib/systemd/system/iptables.service; disabled; preset: disabled)
     Active: inactive (dead) since Fri 2026-01-23 09:45:02 UTC; 56min ago
   Duration: 43min 37.768s
   Main PID: 778 (code=exited, status=0/SUCCESS)
        CPU: 108ms

Jan 23 09:01:23 localhost systemd[1]: Starting IPv4 firewall with iptables...
Jan 23 09:01:24 localhost iptables.init[778]: iptables: Applying firewall rules: [  OK  ]
Jan 23 09:01:24 localhost systemd[1]: Finished IPv4 firewall with iptables.
Jan 23 09:45:01 compute-0 systemd[1]: Stopping IPv4 firewall with iptables...
Jan 23 09:45:02 compute-0 iptables.init[62678]: iptables: Setting chains to policy ACCEPT: raw mangle filter nat [  OK  ]
Jan 23 09:45:02 compute-0 iptables.init[62678]: iptables: Flushing firewall rules: [  OK  ]
Jan 23 09:45:02 compute-0 systemd[1]: iptables.service: Deactivated successfully.
Jan 23 09:45:02 compute-0 systemd[1]: Stopped IPv4 firewall with iptables.

● irqbalance.service - irqbalance daemon
     Loaded: loaded (/usr/lib/systemd/system/irqbalance.service; enabled; preset: enabled)
     Active: active (running) since Fri 2026-01-23 09:01:23 UTC; 1h 40min ago
       Docs: man:irqbalance(1)
             https://github.com/Irqbalance/irqbalance
   Main PID: 779 (irqbalance)
         IO: 0B read, 0B written
      Tasks: 2 (limit: 48560)
     Memory: 1.1M (peak: 1.4M)
        CPU: 531ms
     CGroup: /system.slice/irqbalance.service
             └─779 /usr/sbin/irqbalance

Jan 23 09:01:34 np0005593293.novalocal irqbalance[779]: Cannot change IRQ 28 affinity: Operation not permitted
Jan 23 09:01:34 np0005593293.novalocal irqbalance[779]: IRQ 28 affinity is now unmanaged
Jan 23 09:01:34 np0005593293.novalocal irqbalance[779]: Cannot change IRQ 34 affinity: Operation not permitted
Jan 23 09:01:34 np0005593293.novalocal irqbalance[779]: IRQ 34 affinity is now unmanaged
Jan 23 09:01:34 np0005593293.novalocal irqbalance[779]: Cannot change IRQ 32 affinity: Operation not permitted
Jan 23 09:01:34 np0005593293.novalocal irqbalance[779]: IRQ 32 affinity is now unmanaged
Jan 23 09:01:34 np0005593293.novalocal irqbalance[779]: Cannot change IRQ 30 affinity: Operation not permitted
Jan 23 09:01:34 np0005593293.novalocal irqbalance[779]: IRQ 30 affinity is now unmanaged
Jan 23 09:01:34 np0005593293.novalocal irqbalance[779]: Cannot change IRQ 29 affinity: Operation not permitted
Jan 23 09:01:34 np0005593293.novalocal irqbalance[779]: IRQ 29 affinity is now unmanaged

○ iscsi-init.service - One time configuration for iscsi.service
     Loaded: loaded (/usr/lib/systemd/system/iscsi-init.service; disabled; preset: disabled)
     Active: inactive (dead)
  Condition: start condition failed at Fri 2026-01-23 10:09:01 UTC; 32min ago

Jan 23 10:08:20 compute-0 systemd[1]: One time configuration for iscsi.service was skipped because of an unmet condition check (ConditionPathExists=!/etc/iscsi/initiatorname.iscsi).
Jan 23 10:09:01 compute-0 systemd[1]: One time configuration for iscsi.service was skipped because of an unmet condition check (ConditionPathExists=!/etc/iscsi/initiatorname.iscsi).

○ iscsi-onboot.service - Special handling of early boot iSCSI sessions
     Loaded: loaded (/usr/lib/systemd/system/iscsi-onboot.service; enabled; preset: enabled)
     Active: inactive (dead)
       Docs: man:iscsiadm(8)
             man:iscsid(8)

● iscsi-shutdown.service - Logout off all iSCSI sessions on shutdown
     Loaded: loaded (/usr/lib/systemd/system/iscsi-shutdown.service; static)
     Active: active (exited) since Fri 2026-01-23 10:08:20 UTC; 33min ago
       Docs: man:iscsid(8)
             man:iscsiadm(8)
   Main PID: 226501 (code=exited, status=0/SUCCESS)
        CPU: 3ms

Jan 23 10:08:20 compute-0 systemd[1]: Starting Logout off all iSCSI sessions on shutdown...
Jan 23 10:08:20 compute-0 systemd[1]: Finished Logout off all iSCSI sessions on shutdown.

○ iscsi-starter.service
     Loaded: loaded (/usr/lib/systemd/system/iscsi-starter.service; enabled; preset: disabled)
     Active: inactive (dead)

○ iscsi.service - Login and scanning of iSCSI devices
     Loaded: loaded (/usr/lib/systemd/system/iscsi.service; indirect; preset: enabled)
     Active: inactive (dead)
       Docs: man:iscsiadm(8)
             man:iscsid(8)

● iscsid.service - Open-iSCSI
     Loaded: loaded (/usr/lib/systemd/system/iscsid.service; enabled; preset: disabled)
     Active: active (running) since Fri 2026-01-23 10:09:01 UTC; 32min ago
TriggeredBy: ● iscsid.socket
       Docs: man:iscsid(8)
             man:iscsiuio(8)
             man:iscsiadm(8)
   Main PID: 233083 (iscsid)
     Status: "Ready to process requests"
         IO: 0B read, 0B written
      Tasks: 1 (limit: 48560)
     Memory: 1.9M (peak: 2.0M)
        CPU: 9ms
     CGroup: /system.slice/iscsid.service
             └─233083 /usr/sbin/iscsid -f

Jan 23 10:09:01 compute-0 systemd[1]: Starting Open-iSCSI...
Jan 23 10:09:01 compute-0 systemd[1]: Started Open-iSCSI.

○ iscsiuio.service - iSCSI UserSpace I/O driver
     Loaded: loaded (/usr/lib/systemd/system/iscsiuio.service; disabled; preset: disabled)
     Active: inactive (dead)
TriggeredBy: ○ iscsiuio.socket
       Docs: man:iscsiuio(8)

● kdump.service - Crash recovery kernel arming
     Loaded: loaded (/usr/lib/systemd/system/kdump.service; enabled; preset: enabled)
     Active: active (exited) since Fri 2026-01-23 09:01:42 UTC; 1h 39min ago
   Main PID: 1006 (code=exited, status=0/SUCCESS)
        CPU: 18.250s

Jan 23 09:01:41 np0005593293.novalocal dracut[1284]: Linked:         0 files
Jan 23 09:01:41 np0005593293.novalocal dracut[1284]: Compared:       0 xattrs
Jan 23 09:01:41 np0005593293.novalocal dracut[1284]: Compared:       0 files
Jan 23 09:01:41 np0005593293.novalocal dracut[1284]: Saved:          0 B
Jan 23 09:01:41 np0005593293.novalocal dracut[1284]: Duration:       0.000595 seconds
Jan 23 09:01:41 np0005593293.novalocal dracut[1284]: *** Hardlinking files done ***
Jan 23 09:01:41 np0005593293.novalocal dracut[1284]: *** Creating initramfs image file '/boot/initramfs-5.14.0-661.el9.x86_64kdump.img' done ***
Jan 23 09:01:42 np0005593293.novalocal kdumpctl[1013]: kdump: kexec: loaded kdump kernel
Jan 23 09:01:42 np0005593293.novalocal kdumpctl[1013]: kdump: Starting kdump: [OK]
Jan 23 09:01:42 np0005593293.novalocal systemd[1]: Finished Crash recovery kernel arming.

● kmod-static-nodes.service - Create List of Static Device Nodes
     Loaded: loaded (/usr/lib/systemd/system/kmod-static-nodes.service; static)
     Active: active (exited) since Fri 2026-01-23 09:01:23 UTC; 1h 40min ago
  Unit lvm2-activation-early.service could not be found.
 Main PID: 669 (code=exited, status=0/SUCCESS)
        CPU: 8ms

Jan 23 09:01:23 localhost systemd[1]: Finished Create List of Static Device Nodes.

● ldconfig.service - Rebuild Dynamic Linker Cache
     Loaded: loaded (/usr/lib/systemd/system/ldconfig.service; static)
     Active: active (exited) since Fri 2026-01-23 09:01:23 UTC; 1h 40min ago
       Docs: man:ldconfig(8)
   Main PID: 692 (code=exited, status=0/SUCCESS)
        CPU: 62ms

Jan 23 09:01:23 localhost systemd[1]: Starting Rebuild Dynamic Linker Cache...
Jan 23 09:01:23 localhost systemd[1]: Finished Rebuild Dynamic Linker Cache.

○ libvirtd.service
     Loaded: masked (Reason: Unit libvirtd.service is masked.)
     Active: inactive (dead)
TriggeredBy: ○ libvirtd-ro.socket
             ○ libvirtd.socket
             ○ libvirtd-admin.socket

○ loadmodules.service - Load legacy module configuration
     Loaded: loaded (/usr/lib/systemd/system/loadmodules.service; enabled; preset: enabled)
     Active: inactive (dead)

○ logrotate.service - Rotate log files
     Loaded: loaded (/usr/lib/systemd/system/logrotate.service; static)
     Active: inactive (dead)
TriggeredBy: ● logrotate.timer
       Docs: man:logrotate(8)
             man:logrotate.conf(5)

○ lvm2-lvmpolld.service - LVM2 poll daemon
     Loaded: loaded (/usr/lib/systemd/system/lvm2-lvmpolld.service; static)
     Active: inactive (dead)
TriggeredBy: ● lvm2-lvmpolld.socket
       Docs: man:lvmpolld(8)

● lvm2-monitor.service - Monitoring of LVM2 mirrors, snapshots etc. using dmeventd or progress polling
     Loaded: loaded (/usr/lib/systemd/system/lvm2-monitor.service; enabled; preset: enabled)
     Active: active (exited) since Fri 2026-01-23 09:39:04 UTC; 1h 2min ago
       Docs: man:dmeventd(8)
             man:lvcreate(8)
             man:lvchange(8)
             man:vgchange(8)
   Main PID: 33941 (code=exited, status=0/SUCCESS)
        CPU: 25ms

Jan 23 09:39:04 compute-0 systemd[1]: Starting Monitoring of LVM2 mirrors, snapshots etc. using dmeventd or progress polling...
Jan 23 09:39:04 compute-0 systemd[1]: Finished Monitoring of LVM2 mirrors, snapshots etc. using dmeventd or progress polling.

○ mdmonitor.service - Software RAID monitoring and management
     Loaded: loaded (/usr/lib/systemd/system/mdmonitor.service; enabled; preset: enabled)
     Active: inactive (dead)

○ microcode.service - Load CPU microcode update
     Loaded: loaded (/usr/lib/systemd/system/microcode.service; enabled; preset: enabled)
     Active: inactive (dead)
  Condition: start condition failed at Fri 2026-01-23 09:01:23 UTC; 1h 40min ago

Jan 23 09:01:23 localhost systemd[1]: Load CPU microcode update was skipped because of an unmet condition check (ConditionPathExists=/sys/devices/system/cpu/microcode/reload).

○ modprobe@configfs.service - Load Kernel Module configfs
     Loaded: loaded (/usr/lib/systemd/system/modprobe@.service; static)
     Active: inactive (dead) since Fri 2026-01-23 09:01:23 UTC; 1h 40min ago
       Docs: man:modprobe(8)
   Main PID: 767 (code=exited, status=0/SUCCESS)
        CPU: 7ms

Jan 23 09:01:23 localhost systemd[1]: Starting Load Kernel Module configfs...
Jan 23 09:01:23 localhost systemd[1]: modprobe@configfs.service: Deactivated successfully.
Jan 23 09:01:23 localhost systemd[1]: Finished Load Kernel Module configfs.

○ modprobe@drm.service - Load Kernel Module drm
     Loaded: loaded (/usr/lib/systemd/system/modprobe@.service; static)
     Active: inactive (dead) since Fri 2026-01-23 09:01:23 UTC; 1h 40min ago
       Docs: man:modprobe(8)
   Main PID: 671 (code=exited, status=0/SUCCESS)
        CPU: 108ms

Jan 23 09:01:23 localhost systemd[1]: modprobe@drm.service: Deactivated successfully.
Jan 23 09:01:23 localhost systemd[1]: Finished Load Kernel Module drm.

○ modprobe@efi_pstore.service - Load Kernel Module efi_pstore
     Loaded: loaded (/usr/lib/systemd/system/modprobe@.service; static)
     Active: inactive (dead) since Fri 2026-01-23 09:01:23 UTC; 1h 40min ago
       Docs: man:modprobe(8)
   Main PID: 672 (code=exited, status=0/SUCCESS)
        CPU: 3ms

Jan 23 09:01:23 localhost systemd[1]: modprobe@efi_pstore.service: Deactivated successfully.
Jan 23 09:01:23 localhost systemd[1]: Finished Load Kernel Module efi_pstore.

○ modprobe@fuse.service - Load Kernel Module fuse
     Loaded: loaded (/usr/lib/systemd/system/modprobe@.service; static)
     Active: inactive (dead) since Fri 2026-01-23 09:01:23 UTC; 1h 40min ago
       Docs: man:modprobe(8)
   Main PID: 673 (code=exited, status=0/SUCCESS)
        CPU: 23ms

Jan 23 09:01:23 localhost systemd[1]: modprobe@fuse.service: Deactivated successfully.
Jan 23 09:01:23 localhost systemd[1]: Finished Load Kernel Module fuse.

● multipathd.service - Device-Mapper Multipath Device Controller
     Loaded: loaded (/usr/lib/systemd/system/multipathd.service; enabled; preset: enabled)
     Active: active (running) since Fri 2026-01-23 10:09:03 UTC; 32min ago
TriggeredBy: ● multipathd.socket
   Main PID: 233315 (multipathd)
     Status: "up"
         IO: 0B read, 0B written
      Tasks: 7
     Memory: 18.4M (peak: 18.9M)
        CPU: 336ms
     CGroup: /system.slice/multipathd.service
             └─233315 /sbin/multipathd -d -s

Jan 23 10:09:03 compute-0 systemd[1]: Starting Device-Mapper Multipath Device Controller...
Jan 23 10:09:03 compute-0 multipathd[233315]: --------start up--------
Jan 23 10:09:03 compute-0 multipathd[233315]: read /etc/multipath.conf
Jan 23 10:09:03 compute-0 multipathd[233315]: path checkers start up
Jan 23 10:09:03 compute-0 systemd[1]: Started Device-Mapper Multipath Device Controller.

○ netns-placeholder.service - Create netns directory
     Loaded: loaded (/etc/systemd/system/netns-placeholder.service; enabled; preset: enabled)
     Active: inactive (dead) since Fri 2026-01-23 10:02:22 UTC; 39min ago
   Main PID: 158971 (code=exited, status=0/SUCCESS)
        CPU: 10ms

Jan 23 10:02:22 compute-0 systemd[1]: Starting Create netns directory...
Jan 23 10:02:22 compute-0 systemd[1]: netns-placeholder.service: Deactivated successfully.
Jan 23 10:02:22 compute-0 systemd[1]: Finished Create netns directory.

○ network.service - LSB: Bring up/down networking
     Loaded: loaded (/etc/rc.d/init.d/network; generated)
     Active: inactive (dead)
       Docs: man:systemd-sysv-generator(8)

● NetworkManager-wait-online.service - Network Manager Wait Online
     Loaded: loaded (/usr/lib/systemd/system/NetworkManager-wait-online.service; enabled; preset: disabled)
     Active: active (exited) since Fri 2026-01-23 09:42:45 UTC; 58min ago
       Docs: man:NetworkManager-wait-online.service(8)
   Main PID: 48886 (code=exited, status=0/SUCCESS)
        CPU: 21ms

Jan 23 09:42:45 compute-0 systemd[1]: Starting Network Manager Wait Online...
Jan 23 09:42:45 compute-0 systemd[1]: Finished Network Manager Wait Online.

● NetworkManager.service - Network Manager
     Loaded: loaded (/usr/lib/systemd/system/NetworkManager.service; enabled; preset: enabled)
    Drop-In: /usr/lib/systemd/system/NetworkManager.service.d
             └─NetworkManager-ovs.conf
     Active: active (running) since Fri 2026-01-23 09:42:45 UTC; 58min ago
       Docs: man:NetworkManager(8)
   Main PID: 48866 (NetworkManager)
         IO: 104.0K read, 230.5K written
      Tasks: 3 (limit: 48560)
     Memory: 5.5M (peak: 6.6M)
        CPU: 28.820s
     CGroup: /system.slice/NetworkManager.service
             └─48866 /usr/sbin/NetworkManager --no-daemon

Jan 23 10:28:05 compute-0 NetworkManager[48866]: <info>  [1769164085.1511] device (tap5775c66b-2d): state change: unmanaged -> unavailable (reason 'connection-assumed', managed-type: 'external')
Jan 23 10:28:05 compute-0 NetworkManager[48866]: <info>  [1769164085.1521] device (tap5775c66b-2d): state change: unavailable -> disconnected (reason 'none', managed-type: 'external')
Jan 23 10:28:05 compute-0 NetworkManager[48866]: <info>  [1769164085.2148] manager: (tap64d8458c-f0): new Veth device (/org/freedesktop/NetworkManager/Devices/45)
Jan 23 10:28:05 compute-0 NetworkManager[48866]: <info>  [1769164085.2712] device (tap64d8458c-f0): carrier: link connected
Jan 23 10:Unit ntpd.service could not be found.
Unit ntpdate.service could not be found.
28:05 compute-0 NetworkManager[48866]: <info>  [1769164085.4425] manager: (tap64d8458c-f0): new Open vSwitch Port device (/org/freedesktop/NetworkManager/Devices/46)
Jan 23 10:28:13 compute-0 NetworkManager[48866]: <info>  [1769164093.1207] manager: (patch-provnet-995e8c2d-ca55-405c-bf26-97e408875e42-to-br-int): new Open vSwitch Port device (/org/freedesktop/NetworkManager/Devices/47)
Jan 23 10:28:13 compute-0 NetworkManager[48866]: <info>  [1769164093.1216] manager: (patch-br-int-to-provnet-995e8c2d-ca55-405c-bf26-97e408875e42): new Open vSwitch Port device (/org/freedesktop/NetworkManager/Devices/48)
Jan 23 10:28:39 compute-0 NetworkManager[48866]: <info>  [1769164119.4546] manager: (patch-provnet-995e8c2d-ca55-405c-bf26-97e408875e42-to-br-int): new Open vSwitch Port device (/org/freedesktop/NetworkManager/Devices/49)
Jan 23 10:28:39 compute-0 NetworkManager[48866]: <info>  [1769164119.4560] manager: (patch-br-int-to-provnet-995e8c2d-ca55-405c-bf26-97e408875e42): new Open vSwitch Port device (/org/freedesktop/NetworkManager/Devices/50)
Jan 23 10:28:40 compute-0 NetworkManager[48866]: <info>  [1769164120.9145] device (tap5775c66b-2d): state change: disconnected -> unmanaged (reason 'unmanaged', managed-type: 'removed')

○ nfs-idmapd.service - NFSv4 ID-name mapping service
     Loaded: loaded (/usr/lib/systemd/system/nfs-idmapd.service; static)
     Active: inactive (dead)
       Docs: man:idmapd(8)

○ nfs-mountd.service - NFS Mount Daemon
     Loaded: loaded (/usr/lib/systemd/system/nfs-mountd.service; static)
     Active: inactive (dead)
       Docs: man:rpc.mountd(8)

○ nfs-server.service - NFS server and services
     Loaded: loaded (/usr/lib/systemd/system/nfs-server.service; disabled; preset: disabled)
     Active: inactive (dead)
       Docs: man:rpc.nfsd(8)
             man:exportfs(8)

○ nfs-utils.service - NFS server and client services
     Loaded: loaded (/usr/lib/systemd/system/nfs-utils.service; static)
     Active: inactive (dead)

○ nfsdcld.service - NFSv4 Client Tracking Daemon
     Loaded: loaded (/usr/lib/systemd/system/nfsdcld.service; static)
     Active: inactive (dead)
       Docs: man:nfsdcld(8)

● nftables.service - Netfilter Tables
     Loaded: loaded (/usr/lib/systemd/system/nftables.service; enabled; preset: disabled)
     Active: active (exited) since Fri 2026-01-23 09:45:04 UTC; 56min ago
       Docs: man:nft(8)
   Main PID: 63069 (code=exited, status=0/SUCCESS)
        CPU: 19ms

Jan 23 09:45:04 compute-0 systemd[1]: Starting Netfilter Tables...
Jan 23 09:45:04 compute-0 systemd[1]: Finished Netfilter Tables.

● nis-domainname.service - Read and set NIS domainname from /etc/sysconfig/network
     Loaded: loaded (/usr/lib/systemd/system/nis-domainname.service; enabled; preset: enabled)
     Active: active (exited) since Fri 2026-01-23 09:01:23 UTC; 1h 40min ago
   Main PID: 674 (code=exited, status=0/SUCCESS)
        CPU: 5ms

Jan 23 09:01:23 localhost systemd[1]: Finished Read and set NIS domainname from /etc/sysconfig/network.

○ nvmefc-boot-connections.service - Auto-connect to subsystems on FC-NVME devices found during boot
     Loaded: loaded (/usr/lib/systemd/system/nvmefc-boot-connections.service; enabled; preset: enabled)
     Active: inactive (dead)

● openvswitch.service - Open vSwitch
     Loaded: loaded (/usr/lib/systemd/system/openvswitch.service; enabled; preset: disabled)
     Active: active (exited) since Fri 2026-01-23 09:42:26 UTC; 59min ago
   Main PID: 47180 (code=exited, status=0/SUCCESS)
        CPU: 2ms

Jan 23 09:42:26 compute-0 systemd[1]: Starting Open vSwitch...
Jan 23 09:42:26 compute-0 systemd[1]: Finished Open vSwitch.

● ovs-delete-transient-ports.service - Open vSwitch Delete Transient Ports
     Loaded: loaded (/usr/lib/systemd/system/ovs-delete-transient-ports.service; static)
     Active: active (exited) since Fri 2026-01-23 09:42:26 UTC; 59min ago
   Main PID: 47118 (code=exited, status=0/SUCCESS)
        CPU: 31ms

Jan 23 09:42:26 compute-0 systemd[1]: Starting Open vSwitch Delete Transient Ports...
Jan 23 09:42:26 compute-0 systemd[1]Unit pacemaker.service could not be found.
Unit plymouth-quit-wait.service could not be found.
Unit plymouth-start.service could not be found.
: Finished Open vSwitch Delete Transient Ports.

● ovs-vswitchd.service - Open vSwitch Forwarding Unit
     Loaded: loaded (/usr/lib/systemd/system/ovs-vswitchd.service; static)
     Active: active (running) since Fri 2026-01-23 09:42:26 UTC; 59min ago
   Main PID: 47171 (ovs-vswitchd)
         IO: 3.4M read, 140.0K written
      Tasks: 13 (limit: 48560)
     Memory: 244.0M (peak: 248.2M)
        CPU: 10.461s
     CGroup: /system.slice/ovs-vswitchd.service
             └─47171 ovs-vswitchd unix:/var/run/openvswitch/db.sock -vconsole:emer -vsyslog:err -vfile:info --mlockall --user openvswitch:hugetlbfs --no-chdir --log-file=/var/log/openvswitch/ovs-vswitchd.log --pidfile=/var/run/openvswitch/ovs-vswitchd.pid --detach

Jan 23 09:42:26 compute-0 systemd[1]: Starting Open vSwitch Forwarding Unit...
Jan 23 09:42:26 compute-0 ovs-ctl[47161]: Inserting openvswitch module [  OK  ]
Jan 23 09:42:26 compute-0 ovs-ctl[47130]: Starting ovs-vswitchd [  OK  ]
Jan 23 09:42:26 compute-0 ovs-vsctl[47178]: ovs|00001|vsctl|INFO|Called as ovs-vsctl --no-wait add Open_vSwitch . external-ids hostname=compute-0
Jan 23 09:42:26 compute-0 ovs-ctl[47130]: Enabling remote OVSDB managers [  OK  ]
Jan 23 09:42:26 compute-0 systemd[1]: Started Open vSwitch Forwarding Unit.

● ovsdb-server.service - Open vSwitch Database Unit
     Loaded: loaded (/usr/lib/systemd/system/ovsdb-server.service; static)
     Active: active (running) since Fri 2026-01-23 09:42:26 UTC; 59min ago
   Main PID: 47090 (ovsdb-server)
         IO: 1.2M read, 262.5K written
      Tasks: 1 (limit: 48560)
     Memory: 4.8M (peak: 39.3M)
        CPU: 12.142s
     CGroup: /system.slice/ovsdb-server.service
             └─47090 ovsdb-server /etc/openvswitch/conf.db -vconsole:emer -vsyslog:err -vfile:info --remote=punix:/var/run/openvswitch/db.sock --private-key=db:Open_vSwitch,SSL,private_key --certificate=db:Open_vSwitch,SSL,certificate --bootstrap-ca-cert=db:Open_vSwitch,SSL,ca_cert --user openvswitch:hugetlbfs --no-chdir --log-file=/var/log/openvswitch/ovsdb-server.log --pidfile=/var/run/openvswitch/ovsdb-server.pid --detach

Jan 23 09:42:25 compute-0 chown[47037]: /usr/bin/chown: cannot access '/run/openvswitch': No such file or directory
Jan 23 09:42:25 compute-0 ovs-ctl[47042]: /etc/openvswitch/conf.db does not exist ... (warning).
Jan 23 09:42:25 compute-0 ovs-ctl[47042]: Creating empty database /etc/openvswitch/conf.db [  OK  ]
Jan 23 09:42:25 compute-0 ovs-ctl[47042]: Starting ovsdb-server [  OK  ]
Jan 23 09:42:25 compute-0 ovs-vsctl[47091]: ovs|00001|vsctl|INFO|Called as ovs-vsctl --no-wait -- init -- set Open_vSwitch . db-version=8.5.1
Jan 23 09:42:26 compute-0 ovs-vsctl[47110]: ovs|00001|vsctl|INFO|Called as ovs-vsctl --no-wait set Open_vSwitch . ovs-version=3.3.5-115.el9s "external-ids:system-id=\"57e418b8-f514-4483-8675-f32d2dcd8cea\"" "external-ids:rundir=\"/var/run/openvswitch\"" "system-type=\"centos\"" "system-version=\"9\""
Jan 23 09:42:26 compute-0 ovs-ctl[47042]: Configuring Open vSwitch system IDs [  OK  ]
Jan 23 09:42:26 compute-0 ovs-vsctl[47117]: ovs|00001|vsctl|INFO|Called as ovs-vsctl --no-wait add Open_vSwitch . external-ids hostname=compute-0
Jan 23 09:42:26 compute-0 ovs-ctl[47042]: Enabling remote OVSDB managers [  OK  ]
Jan 23 09:42:26 compute-0 systemd[1]: Started Open vSwitch Database Unit.

● polkit.service - Authorization Manager
     Loaded: loaded (/usr/lib/systemd/system/polkit.service; static)
     Active: active (running) since Fri 2026-01-23 09:41:26 UTC; 1h 0min ago
       Docs: man:polkit(8)
   Main PID: 43358 (polkitd)
         IO: 11.2M read, 0B written
      Tasks: 12 (limit: 48560)
     Memory: 17.0M (peak: 17.8M)
        CPU: 2.084s
     CGroup: /system.slice/polkit.service
             └─43358 /usr/lib/polkit-1/polkitd --no-debug

Jan 23 10:05:40 compute-0 polkitd[43358]: Collecting garbage unconditionally...
Jan 23 10:05:40 compute-0 polkitd[43358]: Loading rules from directory /etc/polkit-1/rules.d
Jan 23 10:05:40 compute-0 polkitd[43358]: Loading rules from directory /usr/share/polkit-1/rules.d
Jan 23 10:05:40 computUnit power-profiles-daemon.service could not be found.
Unit rpc-svcgssd.service could not be found.
e-0 polkitd[43358]: Finished loading, compiling and executing 3 rules
Jan 23 10:07:24 compute-0 polkitd[43358]: Registered Authentication Agent for unix-process:217739:396561 (system bus name :1.2756 [pkttyagent --process 217739 --notify-fd 4 --fallback], object path /org/freedesktop/PolicyKit1/AuthenticationAgent, locale en_US.UTF-8)
Jan 23 10:07:24 compute-0 polkitd[43358]: Unregistered Authentication Agent for unix-process:217739:396561 (system bus name :1.2756, object path /org/freedesktop/PolicyKit1/AuthenticationAgent, locale en_US.UTF-8) (disconnected from bus)
Jan 23 10:07:24 compute-0 polkitd[43358]: Registered Authentication Agent for unix-process:217738:396560 (system bus name :1.2757 [pkttyagent --process 217738 --notify-fd 4 --fallback], object path /org/freedesktop/PolicyKit1/AuthenticationAgent, locale en_US.UTF-8)
Jan 23 10:07:24 compute-0 polkitd[43358]: Unregistered Authentication Agent for unix-process:217738:396560 (system bus name :1.2757, object path /org/freedesktop/PolicyKit1/AuthenticationAgent, locale en_US.UTF-8) (disconnected from bus)
Jan 23 10:07:26 compute-0 polkitd[43358]: Registered Authentication Agent for unix-process:218208:396786 (system bus name :1.2760 [pkttyagent --process 218208 --notify-fd 4 --fallback], object path /org/freedesktop/PolicyKit1/AuthenticationAgent, locale en_US.UTF-8)
Jan 23 10:07:26 compute-0 polkitd[43358]: Unregistered Authentication Agent for unix-process:218208:396786 (system bus name :1.2760, object path /org/freedesktop/PolicyKit1/AuthenticationAgent, locale en_US.UTF-8) (disconnected from bus)

○ raid-check.service - RAID setup health check
     Loaded: loaded (/usr/lib/systemd/system/raid-check.service; static)
     Active: inactive (dead)
TriggeredBy: ○ raid-check.timer

○ rbdmap.service - Map RBD devices
     Loaded: loaded (/usr/lib/systemd/system/rbdmap.service; disabled; preset: disabled)
     Active: inactive (dead)

○ rc-local.service - /etc/rc.d/rc.local Compatibility
     Loaded: loaded (/usr/lib/systemd/system/rc-local.service; static)
     Active: inactive (dead)
       Docs: man:systemd-rc-local-generator(8)

○ rescue.service - Rescue Shell
     Loaded: loaded (/usr/lib/systemd/system/rescue.service; static)
     Active: inactive (dead)
       Docs: man:sulogin(8)

○ rpc-gssd.service - RPC security service for NFS client and server
     Loaded: loaded (/usr/lib/systemd/system/rpc-gssd.service; static)
     Active: inactive (dead)
  Condition: start condition failed at Fri 2026-01-23 09:01:24 UTC; 1h 40min ago
       Docs: man:rpc.gssd(8)

Jan 23 09:01:24 np0005593293.novalocal systemd[1]: RPC security service for NFS client and server was skipped because of an unmet condition check (ConditionPathExists=/etc/krb5.keytab).

● rpc-statd-notify.service - Notify NFS peers of a restart
     Loaded: loaded (/usr/lib/systemd/system/rpc-statd-notify.service; static)
     Active: active (exited) since Fri 2026-01-23 09:01:30 UTC; 1h 39min ago
       Docs: man:sm-notify(8)
             man:rpc.statd(8)
        CPU: 11ms

Jan 23 09:01:30 np0005593293.novalocal systemd[1]: Starting Notify NFS peers of a restart...
Jan 23 09:01:30 np0005593293.novalocal sm-notify[1002]: Version 2.5.4 starting
Jan 23 09:01:30 np0005593293.novalocal systemd[1]: Started Notify NFS peers of a restart.

○ rpc-statd.service - NFS status monitor for NFSv2/3 locking.
     Loaded: loaded (/usr/lib/systemd/system/rpc-statd.service; static)
     Active: inactive (dead)
       Docs: man:rpc.statd(8)

● rpcbind.service - RPC Bind
     Loaded: loaded (/usr/lib/systemd/system/rpcbind.service; enabled; preset: enabled)
     Active: active (running) since Fri 2026-01-23 09:01:23 UTC; 1h 40min ago
TriggeredBy: ● rpcbind.socket
       Docs: man:rpcbind(8)
   Main PID: 697 (rpcbind)
         IO: 0B read, 0B written
      Tasks: 1 (limit: 48560)
     Memory: 2.3M (peak: 2.8M)
        CPU: 56ms
     CGroup: /system.slice/rpcbind.service
             └─697 /usr/bin/rpcbind -w -f

Jan 23 09:01:23 localhost systemd[1]: Starting RPC Bind...
Jan 23 09:01:23 localhost systemd[1Unit sntp.service could not be found.
Unit sshd-keygen.service could not be found.
]: Started RPC Bind.

● rsyslog.service - System Logging Service
     Loaded: loaded (/usr/lib/systemd/system/rsyslog.service; enabled; preset: enabled)
     Active: active (running) since Fri 2026-01-23 09:01:30 UTC; 1h 39min ago
       Docs: man:rsyslogd(8)
             https://www.rsyslog.com/doc/
   Main PID: 1003 (rsyslogd)
         IO: 0B read, 18.4M written
      Tasks: 3 (limit: 48560)
     Memory: 19.4M (peak: 19.8M)
        CPU: 12.330s
     CGroup: /system.slice/rsyslog.service
             └─1003 /usr/sbin/rsyslogd -n

Jan 23 10:10:50 compute-0 rsyslogd[1003]: imjournal: journal files changed, reloading...  [v8.2510.0-2.el9 try https://www.rsyslog.com/e/0 ]
Jan 23 10:10:50 compute-0 rsyslogd[1003]: imjournal: journal files changed, reloading...  [v8.2510.0-2.el9 try https://www.rsyslog.com/e/0 ]
Jan 23 10:10:58 compute-0 rsyslogd[1003]: imjournal from <np0005593293:nova_compute>: begin to drop messages due to rate-limiting
Jan 23 10:12:24 compute-0 rsyslogd[1003]: imjournal: journal files changed, reloading...  [v8.2510.0-2.el9 try https://www.rsyslog.com/e/0 ]
Jan 23 10:12:24 compute-0 rsyslogd[1003]: imjournal: journal files changed, reloading...  [v8.2510.0-2.el9 try https://www.rsyslog.com/e/0 ]
Jan 23 10:17:36 compute-0 rsyslogd[1003]: imjournal: 6688 messages lost due to rate-limiting (20000 allowed within 600 seconds)
Jan 23 10:25:36 compute-0 rsyslogd[1003]: imjournal: journal files changed, reloading...  [v8.2510.0-2.el9 try https://www.rsyslog.com/e/0 ]
Jan 23 10:25:36 compute-0 rsyslogd[1003]: imjournal: journal files changed, reloading...  [v8.2510.0-2.el9 try https://www.rsyslog.com/e/0 ]
Jan 23 10:30:15 compute-0 rsyslogd[1003]: imjournal: journal files changed, reloading...  [v8.2510.0-2.el9 try https://www.rsyslog.com/e/0 ]
Jan 23 10:40:59 compute-0 rsyslogd[1003]: imjournal: journal files changed, reloading...  [v8.2510.0-2.el9 try https://www.rsyslog.com/e/0 ]

○ selinux-autorelabel-mark.service - Mark the need to relabel after reboot
     Loaded: loaded (/usr/lib/systemd/system/selinux-autorelabel-mark.service; enabled; preset: enabled)
     Active: inactive (dead)
  Condition: start condition failed at Fri 2026-01-23 09:01:23 UTC; 1h 40min ago

Jan 23 09:01:23 localhost systemd[1]: Mark the need to relabel after reboot was skipped because of an unmet condition check (ConditionSecurity=!selinux).

● serial-getty@ttyS0.service - Serial Getty on ttyS0
     Loaded: loaded (/usr/lib/systemd/system/serial-getty@.service; enabled-runtime; preset: disabled)
     Active: active (running) since Fri 2026-01-23 09:01:30 UTC; 1h 39min ago
       Docs: man:agetty(8)
             man:systemd-getty-generator(8)
             http://0pointer.de/blog/projects/serial-console.html
   Main PID: 1010 (agetty)
         IO: 0B read, 0B written
      Tasks: 1 (limit: 48560)
     Memory: 244.0K (peak: 500.0K)
        CPU: 9ms
     CGroup: /system.slice/system-serial\x2dgetty.slice/serial-getty@ttyS0.service
             └─1010 /sbin/agetty -o "-p -- \\u" --keep-baud 115200,57600,38400,9600 - vt220

Jan 23 09:01:30 np0005593293.novalocal systemd[1]: Started Serial Getty on ttyS0.

○ sshd-keygen@ecdsa.service - OpenSSH ecdsa Server Key Generation
     Loaded: loaded (/usr/lib/systemd/system/sshd-keygen@.service; disabled; preset: disabled)
    Drop-In: /usr/lib/systemd/system/sshd-keygen@.service.d
             └─disable-sshd-keygen-if-cloud-init-active.conf
     Active: inactive (dead)
  Condition: start condition failed at Fri 2026-01-23 10:05:45 UTC; 35min ago

Jan 23 09:01:23 localhost systemd[1]: OpenSSH ecdsa Server Key Generation was skipped because of an unmet condition check (ConditionPathExists=!/run/systemd/generator.early/multi-user.target.wants/cloud-init.target).
Jan 23 10:05:45 compute-0 systemd[1]: OpenSSH ecdsa Server Key Generation was skipped because of an unmet condition check (ConditionPathExists=!/run/systemd/generator.early/multi-user.target.wants/cloud-init.target).

○ sshd-keygen@ed25519.service - OpenSSH ed25519 Server Key Generation
     Loaded: loaded (/usr/lib/systemdUnit syslog.service could not be found.
/system/sshd-keygen@.service; disabled; preset: disabled)
    Drop-In: /usr/lib/systemd/system/sshd-keygen@.service.d
             └─disable-sshd-keygen-if-cloud-init-active.conf
     Active: inactive (dead)
  Condition: start condition failed at Fri 2026-01-23 10:05:45 UTC; 35min ago

Jan 23 09:01:23 localhost systemd[1]: OpenSSH ed25519 Server Key Generation was skipped because of an unmet condition check (ConditionPathExists=!/run/systemd/generator.early/multi-user.target.wants/cloud-init.target).
Jan 23 10:05:45 compute-0 systemd[1]: OpenSSH ed25519 Server Key Generation was skipped because of an unmet condition check (ConditionPathExists=!/run/systemd/generator.early/multi-user.target.wants/cloud-init.target).

○ sshd-keygen@rsa.service - OpenSSH rsa Server Key Generation
     Loaded: loaded (/usr/lib/systemd/system/sshd-keygen@.service; disabled; preset: disabled)
    Drop-In: /usr/lib/systemd/system/sshd-keygen@.service.d
             └─disable-sshd-keygen-if-cloud-init-active.conf
     Active: inactive (dead)
  Condition: start condition failed at Fri 2026-01-23 10:05:45 UTC; 35min ago

Jan 23 09:01:23 localhost systemd[1]: OpenSSH rsa Server Key Generation was skipped because of an unmet condition check (ConditionPathExists=!/run/systemd/generator.early/multi-user.target.wants/cloud-init.target).
Jan 23 10:05:45 compute-0 systemd[1]: OpenSSH rsa Server Key Generation was skipped because of an unmet condition check (ConditionPathExists=!/run/systemd/generator.early/multi-user.target.wants/cloud-init.target).

● sshd.service - OpenSSH server daemon
     Loaded: loaded (/usr/lib/systemd/system/sshd.service; enabled; preset: enabled)
     Active: active (running) since Fri 2026-01-23 10:05:45 UTC; 35min ago
       Docs: man:sshd(8)
             man:sshd_config(5)
   Main PID: 189686 (sshd)
         IO: 0B read, 0B written
      Tasks: 1 (limit: 48560)
     Memory: 3.4M (peak: 6.6M)
        CPU: 293ms
     CGroup: /system.slice/sshd.service
             └─189686 "sshd: /usr/sbin/sshd -D [listener] 0 of 10-100 startups"

Jan 23 10:29:56 compute-0 sshd-session[272812]: Accepted publickey for zuul from 192.168.122.10 port 59636 ssh2: ECDSA SHA256:VirhpRcIg3eaQ2of1D68YV1JVeFZwgFg3WdbJHtted4
Jan 23 10:29:56 compute-0 sshd-session[272812]: pam_unix(sshd:session): session opened for user zuul(uid=1000) by zuul(uid=0)
Jan 23 10:31:44 compute-0 sshd-session[281082]: Accepted publickey for zuul from 192.168.122.10 port 33218 ssh2: ECDSA SHA256:VirhpRcIg3eaQ2of1D68YV1JVeFZwgFg3WdbJHtted4
Jan 23 10:31:44 compute-0 sshd-session[281082]: pam_unix(sshd:session): session opened for user zuul(uid=1000) by zuul(uid=0)
Jan 23 10:31:44 compute-0 sshd-session[281082]: pam_unix(sshd:session): session closed for user zuul
Jan 23 10:31:44 compute-0 sshd-session[281112]: Accepted publickey for zuul from 192.168.122.10 port 33234 ssh2: ECDSA SHA256:VirhpRcIg3eaQ2of1D68YV1JVeFZwgFg3WdbJHtted4
Jan 23 10:31:44 compute-0 sshd-session[281112]: pam_unix(sshd:session): session opened for user zuul(uid=1000) by zuul(uid=0)
Jan 23 10:31:44 compute-0 sshd-session[281112]: pam_unix(sshd:session): session closed for user zuul
Jan 23 10:40:43 compute-0 sshd-session[289862]: Accepted publickey for zuul from 192.168.122.10 port 52522 ssh2: ECDSA SHA256:VirhpRcIg3eaQ2of1D68YV1JVeFZwgFg3WdbJHtted4
Jan 23 10:40:43 compute-0 sshd-session[289862]: pam_unix(sshd:session): session opened for user zuul(uid=1000) by zuul(uid=0)

○ sssd-kcm.service - SSSD Kerberos Cache Manager
     Loaded: loaded (/usr/lib/systemd/system/sssd-kcm.service; indirect; preset: disabled)
     Active: inactive (dead)
TriggeredBy: ● sssd-kcm.socket
       Docs: man:sssd-kcm(5)

○ sssd.service - System Security Services Daemon
     Loaded: loaded (/usr/lib/systemd/system/sssd.service; enabled; preset: enabled)
     Active: inactive (dead)
  Condition: start condition failed at Fri 2026-01-23 09:01:23 UTC; 1h 40min ago

Jan 23 09:01:23 localhost systemd[1]: System Security Services Daemon was skipped because no trigger condition checks were met.

○ sysstat-collect.service - system activity accounting tool
     Loaded: loaded (/usr/lib/systemd/system/sysstat-collect.service; static)
     Active: inactive (dead)
TriggeredBy: ○ sysstat-collect.timer
       Docs: man:sa1(8)

○ sysstat-summary.service - Generate a daily summary of process accounting
     Loaded: loaded (/usr/lib/systemd/system/sysstat-summary.service; static)
     Active: inactive (dead)
TriggeredBy: ○ sysstat-summary.timer
       Docs: man:sa2(8)

○ sysstat.service - Resets System Activity Logs
     Loaded: loaded (/usr/lib/systemd/system/sysstat.service; enabled; preset: enabled)
     Active: inactive (dead)

○ systemd-ask-password-console.service - Dispatch Password Requests to Console
     Loaded: loaded (/usr/lib/systemd/system/systemd-ask-password-console.service; static)
     Active: inactive (dead)
TriggeredBy: ● systemd-ask-password-console.path
       Docs: man:systemd-ask-password-console.service(8)

○ systemd-ask-password-wall.service - Forward Password Requests to Wall
     Loaded: loaded (/usr/lib/systemd/system/systemd-ask-password-wall.service; static)
     Active: inactive (dead)
TriggeredBy: ● systemd-ask-password-wall.path
       Docs: man:systemd-ask-password-wall.service(8)

○ systemd-binfmt.service - Set Up Additional Binary Formats
     Loaded: loaded (/usr/lib/systemd/system/systemd-binfmt.service; static)
     Active: inactive (dead)
  Condition: start condition failed at Fri 2026-01-23 09:01:23 UTC; 1h 40min ago
       Docs: man:systemd-binfmt.service(8)
             man:binfmt.d(5)
             https://docs.kernel.org/admin-guide/binfmt-misc.html
             https://www.freedesktop.org/wiki/Software/systemd/APIFileSystems

Jan 23 09:01:23 localhost systemd[1]: Set Up Additional Binary Formats was skipped because no trigger condition checks were met.

○ systemd-boot-random-seed.service - Update Boot Loader Random Seed
     Loaded: loaded (/usr/lib/systemd/system/systemd-boot-random-seed.service; static)
     Active: inactive (dead)
  Condition: start condition failed at Fri 2026-01-23 09:01:23 UTC; 1h 40min ago
       Docs: man:systemd-boot-random-seed.service(8)
             man:random(4)

Jan 23 09:01:23 localhost systemd[1]: Update Boot Loader Random Seed was skipped because no trigger condition checks were met.

● systemd-boot-update.service - Automatic Boot Loader Update
     Loaded: loaded (/usr/lib/systemd/system/systemd-boot-update.service; enabled; preset: enabled)
     Active: active (exited) since Fri 2026-01-23 09:01:23 UTC; 1h 40min ago
       Docs: man:bootctl(1)
   Main PID: 693 (code=exited, status=0/SUCCESS)
        CPU: 10ms

Jan 23 09:01:23 localhost systemd[1]: Starting Automatic Boot Loader Update...
Jan 23 09:01:23 localhost bootctl[693]: Couldn't find EFI system partition, skipping.
Jan 23 09:01:23 localhost systemd[1]: Finished Automatic Boot Loader Update.

○ systemd-firstboot.service - First Boot Wizard
     Loaded: loaded (/usr/lib/systemd/system/systemd-firstboot.service; static)
     Active: inactive (dead)
  Condition: start condition failed at Fri 2026-01-23 09:01:23 UTC; 1h 40min ago
       Docs: man:systemd-firstboot(1)

Jan 23 09:01:23 localhost systemd[1]: First Boot Wizard was skipped because of an unmet condition check (ConditionFirstBoot=yes).

○ systemd-fsck-root.service - File System Check on Root Device
     Loaded: loaded (/usr/lib/systemd/system/systemd-fsck-root.service; static)
     Active: inactive (dead) since Fri 2026-01-23 09:01:22 UTC; 1h 40min ago
   Duration: 1.544s
       Docs: man:systemd-fsck-root.service(8)
   Main PID: 551 (code=exited, status=0/SUCCESS)
        CPU: 13ms

Jan 23 09:01:21 localhost systemd[1]: Starting File System Check on /dev/disk/by-uuid/22ac9141-3960-4912-b20e-19fc8a328d40...
Jan 23 09:01:21 localhost systemd-fsck[553]: /usr/sbin/fsck.xfs: XFS file system.
Jan 23 09:01:21 localhost systemd[1]: Finished File System Check on /dev/disk/by-uuid/22ac9141-3960-4912-b20e-19fc8a328d40.

● systemd-hostnamed.service - Hostname Service
     Loaded: loaded (/usr/lib/systemd/system/systemd-hostnamed.service; static)
    Drop-In: /usr/lib/systemd/system/systemd-hostnamed.service.d
             └─disable-privatedevices.conf
     Active: active (running) since Fri 2026-01-23 10:41:04 UTC; 21s ago
       Docs: man:systemd-hostnamed.service(8)
             man:hostname(5)
             man:machine-info(5)
             man:org.freedesktop.resolve1(5)
   Main PID: 292378 (systemd-hostnam)
         IO: 0B read, 0B written
      Tasks: 1 (limit: 48560)
     Memory: 2.7M (peak: 3.8M)
        CPU: 141ms
     CGroup: /system.slice/systemd-hostnamed.service
             └─292378 /usr/lib/systemd/systemd-hostnamed

Jan 23 10:41:04 compute-0 systemd[1]: Starting Hostname Service...
Jan 23 10:41:04 compute-0 systemd[1]: Started Hostname Service.

● systemd-hwdb-update.service - Rebuild Hardware Database
     Loaded: loaded (/usr/lib/systemd/system/systemd-hwdb-update.service; static)
     Active: active (exited) since Fri 2026-01-23 09:01:23 UTC; 1h 40min ago
       Docs: man:hwdb(7)
             man:systemd-hwdb(8)
   Main PID: 684 (code=exited, status=0/SUCCESS)
        CPU: 512ms

Jan 23 09:01:23 localhost systemd[1]: Starting Rebuild Hardware Database...
Jan 23 09:01:23 localhost systemd[1]: Finished Rebuild Hardware Database.

○ systemd-initctl.service - initctl Compatibility Daemon
     Loaded: loaded (/usr/lib/systemd/system/systemd-initctl.service; static)
     Active: inactive (dead)
TriggeredBy: ● systemd-initctl.socket
       Docs: man:systemd-initctl.service(8)

● systemd-journal-catalog-update.service - Rebuild Journal Catalog
     Loaded: loaded (/usr/lib/systemd/system/systemd-journal-catalog-update.service; static)
     Active: active (exited) since Fri 2026-01-23 09:01:23 UTC; 1h 40min ago
       Docs: man:systemd-journald.service(8)
             man:journald.conf(5)
   Main PID: 698 (code=exited, status=0/SUCCESS)
        CPU: 35ms

Jan 23 09:01:23 localhost systemd[1]: Starting Rebuild Journal Catalog...
Jan 23 09:01:23 localhost systemd[1]: Finished Rebuild Journal Catalog.

● systemd-journal-flush.service - Flush Journal to Persistent Storage
     Loaded: loaded (/usr/lib/systemd/system/systemd-journal-flush.service; static)
     Active: active (exited) since Fri 2026-01-23 09:01:23 UTC; 1h 40min ago
       Docs: man:systemd-journald.service(8)
             man:journald.conf(5)
   Main PID: 685 (code=exited, status=0/SUCCESS)
        CPU: 9ms

Jan 23 09:01:23 localhost systemd[1]: Starting Flush Journal to Persistent Storage...
Jan 23 09:01:23 localhost systemd[1]: Finished Flush Journal to Persistent Storage.

● systemd-journald.service - Journal Service
     Loaded: loaded (/usr/lib/systemd/system/systemd-journald.service; static)
     Active: active (running) since Fri 2026-01-23 09:01:23 UTC; 1h 40min ago
TriggeredBy: ● systemd-journald-dev-log.socket
             ● systemd-journald.socket
       Docs: man:systemd-journald.service(8)
             man:journald.conf(5)
   Main PID: 675 (systemd-journal)
     Status: "Processing requests..."
         IO: 0B read, 0B written
      Tasks: 1 (limit: 48560)
     Memory: 79.5M (peak: 87.3M)
        CPU: 16.352s
     CGroup: /system.slice/systemd-journald.service
             └─675 /usr/lib/systemd/systemd-journald

Jan 23 09:01:23 localhost systemd-journald[675]: Journal started
Jan 23 09:01:23 localhost systemd-journald[675]: Runtime Journal (/run/log/journal/85ac68c10a6e7ae08ceb898dbdca0cb5) is 8.0M, max 153.6M, 145.6M free.
Jan 23 09:01:22 localhost systemd[1]: systemd-journald.service: Deactivated successfully.
Jan 23 09:01:23 localhost systemd-journald[675]: Runtime Journal (/run/log/journal/85ac68c10a6e7ae08ceb898dbdca0cb5) is 8.0M, max 153.6M, 145.6M free.
Jan 23 09:01:23 localhost systemd-journald[675]: Received client request to flush runtime journal.

● systemd-logind.service - User Login Management
     Loaded: loaded (/usr/lib/systemd/system/systemd-logind.service; static)
    Drop-In: /usr/lib/systemd/system/systemd-logind.service.d
             └─10-grub2-logind-service.conf
     Active: active (running) since Fri 2026-01-23 09:01:23 UTC; 1h 40min ago
       Docs: man:sd-login(3)
             man:systemd-logind.service(8)
             man:logind.conf(5)
             man:org.freedesktop.login1(5)
   Main PID: 784 (systemd-logind)
     Status: "Processing requests..."
         IO: 4.0K read, 0B written
      Tasks: 1 (limit: 48560)
     Memory: 6.2M (peak: 6.8M)
        CPU: 3.755s
     CGroup: /system.slice/systemd-logind.service
             └─784 /usr/lib/systemd/systemd-logind

Jan 23 10:29:56 compute-0 systemd-logind[784]: New session 56 of user zuul.
Jan 23 10:31:44 compute-0 systemd-logind[784]: Session 56 logged out. Waiting for processes to exit.
Jan 23 10:31:44 compute-0 systemd-logind[784]: Removed session 56.
Jan 23 10:31:44 compute-0 systemd-logind[784]: New session 57 of user zuul.
Jan 23 10:31:44 compute-0 systemd-logind[784]: Session 57 logged out. Waiting for processes to exit.
Jan 23 10:31:44 compute-0 systemd-logind[784]: Removed session 57.
Jan 23 10:31:44 compute-0 systemd-logind[784]: New session 58 of user zuul.
Jan 23 10:31:44 compute-0 systemd-logind[784]: Session 58 logged out. Waiting for processes to exit.
Jan 23 10:31:44 compute-0 systemd-logind[784]: Removed session 58.
Jan 23 10:40:43 compute-0 systemd-logind[784]: New session 59 of user zuul.

○ systemd-machine-id-commit.service - Commit a transient machine-id on disk
     Loaded: loaded (/usr/lib/systemd/system/systemd-machine-id-commit.service; static)
     Active: inactive (dead)
  Condition: start condition failed at Fri 2026-01-23 09:01:23 UTC; 1h 40min ago
       Docs: man:systemd-machine-id-commit.service(8)

Jan 23 09:01:23 localhost systemd[1]: Commit a transient machine-id on disk was skipped because of an unmet condition check (ConditionPathIsMountPoint=/etc/machine-id).

● systemd-machined.service - Virtual Machine and Container Registration Service
     Loaded: loaded (/usr/lib/systemd/system/systemd-machined.service; static)
     Active: active (running) since Fri 2026-01-23 10:07:17 UTC; 34min ago
       Docs: man:systemd-machined.service(8)
             man:org.freedesktop.machine1(5)
   Main PID: 216411 (systemd-machine)
     Status: "Processing requests..."
         IO: 0B read, 0B written
      Tasks: 1 (limit: 48560)
     Memory: 1.5M (peak: 2.2M)
        CPU: 1.430s
     CGroup: /system.slice/systemd-machined.service
             └─216411 /usr/lib/systemd/systemd-machined

Jan 23 10:07:17 compute-0 systemd[1]: Starting Virtual Machine and Container Registration Service...
Jan 23 10:07:17 compute-0 systemd[1]: Started Virtual Machine and Container Registration Service.
Jan 23 10:16:28 compute-0 systemd-machined[216411]: New machine qemu-1-instance-00000001.
Jan 23 10:17:59 compute-0 systemd-machined[216411]: Machine qemu-1-instance-00000001 terminated.
Jan 23 10:20:32 compute-0 systemd-machined[216411]: New machine qemu-2-instance-00000005.
Jan 23 10:20:55 compute-0 systemd-machined[216411]: Machine qemu-2-instance-00000005 terminated.
Jan 23 10:23:46 compute-0 systemd-machined[216411]: New machine qemu-3-instance-00000008.
Jan 23 10:23:52 compute-0 systemd-machined[216411]: Machine qemu-3-instance-00000008 terminated.
Jan 23 10:28:05 compute-0 systemd-machined[216411]: New machine qemu-4-instance-0000000d.
Jan 23 10:28:40 compute-0 systemd-machined[216411]: Machine qemu-4-instance-0000000d terminated.

● systemd-modules-load.service - Load Kernel Modules
     Loaded: loaded (/usr/lib/systemd/system/systemd-modules-load.service; static)
     Active: active (exited) since Fri 2026-01-23 10:08:52 UTC; 32min ago
       Docs: man:systemd-modules-load.service(8)
             man:modules-load.d(5)
   Main PID: 231429 (code=exited, status=0/SUCCESS)
        CPU: 11ms

Jan 23 10:08:52 compute-0 systemd[1]: Starting Load Kernel Modules...
Jan 23 10:08:52 compute-0 systemd[1]: Finished Load Kernel Modules.

● systemd-network-generator.service - Generate network units from Kernel command line
     Loaded: loaded (/usr/lib/systemd/system/systemd-network-generator.service; enabled; preset: enabled)
     Active: active (exited) since FriUnit systemd-networkd-wait-online.service could not be found.
 2026-01-23 09:01:23 UTC; 1h 40min ago
       Docs: man:systemd-network-generator.service(8)
   Main PID: 676 (code=exited, status=0/SUCCESS)
        CPU: 11ms

Jan 23 09:01:23 localhost systemd[1]: Finished Generate network units from Kernel command line.

○ systemd-pcrmachine.service - TPM2 PCR Machine ID Measurement
     Loaded: loaded (/usr/lib/systemd/system/systemd-pcrmachine.service; static)
     Active: inactive (dead)
  Condition: start condition failed at Fri 2026-01-23 09:01:22 UTC; 1h 40min ago
       Docs: man:systemd-pcrmachine.service(8)

○ systemd-pcrphase-initrd.service - TPM2 PCR Barrier (initrd)
     Loaded: loaded (/usr/lib/systemd/system/systemd-pcrphase-initrd.service; static)
     Active: inactive (dead)
       Docs: man:systemd-pcrphase-initrd.service(8)

○ systemd-pcrphase-sysinit.service - TPM2 PCR Barrier (Initialization)
     Loaded: loaded (/usr/lib/systemd/system/systemd-pcrphase-sysinit.service; static)
     Active: inactive (dead)
  Condition: start condition failed at Fri 2026-01-23 09:01:23 UTC; 1h 40min ago
       Docs: man:systemd-pcrphase-sysinit.service(8)

Jan 23 09:01:23 localhost systemd[1]: TPM2 PCR Barrier (Initialization) was skipped because of an unmet condition check (ConditionPathExists=/sys/firmware/efi/efivars/StubPcrKernelImage-4a67b082-0a4c-41cf-b6c7-440b29bb8c4f).

○ systemd-pcrphase.service - TPM2 PCR Barrier (User)
     Loaded: loaded (/usr/lib/systemd/system/systemd-pcrphase.service; static)
     Active: inactive (dead)
  Condition: start condition failed at Fri 2026-01-23 09:01:24 UTC; 1h 40min ago
       Docs: man:systemd-pcrphase.service(8)

Jan 23 09:01:24 np0005593293.novalocal systemd[1]: TPM2 PCR Barrier (User) was skipped because of an unmet condition check (ConditionPathExists=/sys/firmware/efi/efivars/StubPcrKernelImage-4a67b082-0a4c-41cf-b6c7-440b29bb8c4f).

○ systemd-pstore.service - Platform Persistent Storage Archival
     Loaded: loaded (/usr/lib/systemd/system/systemd-pstore.service; enabled; preset: enabled)
     Active: inactive (dead)
  Condition: start condition failed at Fri 2026-01-23 09:01:23 UTC; 1h 40min ago
       Docs: man:systemd-pstore(8)

Jan 23 09:01:23 localhost systemd[1]: Platform Persistent Storage Archival was skipped because of an unmet condition check (ConditionDirectoryNotEmpty=/sys/fs/pstore).

● systemd-random-seed.service - Load/Save OS Random Seed
     Loaded: loaded (/usr/lib/systemd/system/systemd-random-seed.service; static)
     Active: active (exited) since Fri 2026-01-23 09:01:23 UTC; 1h 40min ago
       Docs: man:systemd-random-seed.service(8)
             man:random(4)
   Main PID: 686 (code=exited, status=0/SUCCESS)
        CPU: 10ms

Jan 23 09:01:23 localhost systemd[1]: Starting Load/Save OS Random Seed...
Jan 23 09:01:23 localhost systemd[1]: Finished Load/Save OS Random Seed.

● systemd-remount-fs.service - Remount Root and Kernel File Systems
     Loaded: loaded (/usr/lib/systemd/system/systemd-remount-fs.service; enabled-runtime; preset: disabled)
     Active: active (exited) since Fri 2026-01-23 09:01:23 UTC; 1h 40min ago
       Docs: man:systemd-remount-fs.service(8)
             https://www.freedesktop.org/wiki/Software/systemd/APIFileSystems
   Main PID: 677 (code=exited, status=0/SUCCESS)
        CPU: 15ms

Jan 23 09:01:23 localhost systemd[1]: Finished Remount Root and Kernel File Systems.

○ systemd-repart.service - Repartition Root Disk
     Loaded: loaded (/usr/lib/systemd/system/systemd-repart.service; static)
     Active: inactive (dead)
  Condition: start condition failed at Fri 2026-01-23 09:01:22 UTC; 1h 40min ago
       Docs: man:systemd-repart.service(8)

○ systemd-rfkill.service - Load/Save RF Kill Switch Status
     Loaded: loaded (/usr/lib/systemd/system/systemd-rfkill.service; static)
     Active: inactive (dead)
TriggeredBy: ● systemd-rfkill.socket
       Docs: man:systemd-rfkill.service(8)

● systemd-sysctl.service - Apply Kernel Variables
     Loaded: loaded (/usr/lib/systemd/system/systemd-sysctl.service; static)
     Active: active (exited) since Fri 2026-01-23 09Unit systemd-timesyncd.service could not be found.
Unit systemd-tmpfiles.service could not be found.
:41:39 UTC; 59min ago
       Docs: man:systemd-sysctl.service(8)
             man:sysctl.d(5)
   Main PID: 44844 (code=exited, status=0/SUCCESS)
        CPU: 21ms

Jan 23 09:41:39 compute-0 systemd[1]: Starting Apply Kernel Variables...
Jan 23 09:41:39 compute-0 systemd[1]: Finished Apply Kernel Variables.

○ systemd-sysext.service - Merge System Extension Images into /usr/ and /opt/
     Loaded: loaded (/usr/lib/systemd/system/systemd-sysext.service; disabled; preset: disabled)
     Active: inactive (dead)
       Docs: man:systemd-sysext.service(8)

● systemd-sysusers.service - Create System Users
     Loaded: loaded (/usr/lib/systemd/system/systemd-sysusers.service; static)
     Active: active (exited) since Fri 2026-01-23 09:01:23 UTC; 1h 40min ago
       Docs: man:sysusers.d(5)
             man:systemd-sysusers.service(8)
   Main PID: 687 (code=exited, status=0/SUCCESS)
        CPU: 23ms

Jan 23 09:01:23 localhost systemd[1]: Starting Create System Users...
Jan 23 09:01:23 localhost systemd[1]: Finished Create System Users.

○ systemd-tmpfiles-clean.service - Cleanup of Temporary Directories
     Loaded: loaded (/usr/lib/systemd/system/systemd-tmpfiles-clean.service; static)
     Active: inactive (dead) since Fri 2026-01-23 09:16:18 UTC; 1h 25min ago
TriggeredBy: ● systemd-tmpfiles-clean.timer
       Docs: man:tmpfiles.d(5)
             man:systemd-tmpfiles(8)
   Main PID: 12895 (code=exited, status=0/SUCCESS)
        CPU: 57ms

Jan 23 09:16:18 np0005593293.novalocal systemd[1]: Starting Cleanup of Temporary Directories...
Jan 23 09:16:18 np0005593293.novalocal systemd[1]: systemd-tmpfiles-clean.service: Deactivated successfully.
Jan 23 09:16:18 np0005593293.novalocal systemd[1]: Finished Cleanup of Temporary Directories.

● systemd-tmpfiles-setup-dev.service - Create Static Device Nodes in /dev
     Loaded: loaded (/usr/lib/systemd/system/systemd-tmpfiles-setup-dev.service; static)
     Active: active (exited) since Fri 2026-01-23 09:01:23 UTC; 1h 40min ago
       Docs: man:tmpfiles.d(5)
             man:systemd-tmpfiles(8)
   Main PID: 689 (code=exited, status=0/SUCCESS)
        CPU: 35ms

Jan 23 09:01:23 localhost systemd[1]: Starting Create Static Device Nodes in /dev...
Jan 23 09:01:23 localhost systemd[1]: Finished Create Static Device Nodes in /dev.

● systemd-tmpfiles-setup.service - Create Volatile Files and Directories
     Loaded: loaded (/usr/lib/systemd/system/systemd-tmpfiles-setup.service; static)
     Active: active (exited) since Fri 2026-01-23 09:01:23 UTC; 1h 40min ago
       Docs: man:tmpfiles.d(5)
             man:systemd-tmpfiles(8)
   Main PID: 694 (code=exited, status=0/SUCCESS)
        CPU: 99ms

Jan 23 09:01:23 localhost systemd[1]: Starting Create Volatile Files and Directories...
Jan 23 09:01:23 localhost systemd[1]: Finished Create Volatile Files and Directories.

● systemd-udev-settle.service - Wait for udev To Complete Device Initialization
     Loaded: loaded (/usr/lib/systemd/system/systemd-udev-settle.service; static)
     Active: active (exited) since Fri 2026-01-23 10:08:47 UTC; 32min ago
       Docs: man:systemd-udev-settle.service(8)
   Main PID: 230492 (code=exited, status=0/SUCCESS)
        CPU: 17ms

Jan 23 10:08:47 compute-0 systemd[1]: Starting Wait for udev To Complete Device Initialization...
Jan 23 10:08:47 compute-0 udevadm[230492]: systemd-udev-settle.service is deprecated. Please fix multipathd.service not to pull it in.
Jan 23 10:08:47 compute-0 systemd[1]: Finished Wait for udev To Complete Device Initialization.

● systemd-udev-trigger.service - Coldplug All udev Devices
     Loaded: loaded (/usr/lib/systemd/system/systemd-udev-trigger.service; static)
    Drop-In: /usr/lib/systemd/system/systemd-udev-trigger.service.d
             └─systemd-udev-trigger-no-reload.conf
     Active: active (exited) since Fri 2026-01-23 09:01:23 UTC; 1h 40min ago
       Docs: man:udev(7)
             man:systemd-udevd.service(8)
   Main PID: 681 (code=exited, status=0/SUCCESS)
        CPU: 96ms

Jan 23 09:01:23 localhost systemd[1]: Finished Coldplug All udev Devices.

● systemd-udevd.service - Rule-based Manager for Device Events and Files
     Loaded: loaded (/usr/lib/systemd/system/systemd-udevd.service; static)
     Active: active (running) since Fri 2026-01-23 09:01:23 UTC; 1h 40min ago
TriggeredBy: ● systemd-udevd-kernel.socket
             ● systemd-udevd-control.socket
       Docs: man:systemd-udevd.service(8)
             man:udev(7)
   Main PID: 727 (systemd-udevd)
     Status: "Processing with 32 children at max"
         IO: 67.2M read, 25.2M written
      Tasks: 1
     Memory: 30.2M (peak: 91.5M)
        CPU: 8.716s
     CGroup: /system.slice/systemd-udevd.service
             └─udev
               └─727 /usr/lib/systemd/systemd-udevd

Jan 23 10:37:52 compute-0 lvm[287638]: PV /dev/loop3 online, VG ceph_vg0 is complete.
Jan 23 10:37:52 compute-0 lvm[287638]: VG ceph_vg0 finished
Jan 23 10:39:03 compute-0 lvm[288621]: PV /dev/loop3 online, VG ceph_vg0 is complete.
Jan 23 10:39:03 compute-0 lvm[288621]: VG ceph_vg0 finished
Jan 23 10:40:10 compute-0 lvm[289649]: PV /dev/loop3 online, VG ceph_vg0 is complete.
Jan 23 10:40:10 compute-0 lvm[289649]: VG ceph_vg0 finished
Jan 23 10:40:52 compute-0 lvm[290564]: PV /dev/loop3 online, VG ceph_vg0 is complete.
Jan 23 10:40:52 compute-0 lvm[290564]: VG ceph_vg0 finished
Jan 23 10:41:18 compute-0 lvm[295892]: PV /dev/loop3 online, VG ceph_vg0 is complete.
Jan 23 10:41:18 compute-0 lvm[295892]: VG ceph_vg0 finished

● systemd-update-done.service - Update is Completed
     Loaded: loaded (/usr/lib/systemd/system/systemd-update-done.service; static)
     Active: active (exited) since Fri 2026-01-23 09:01:23 UTC; 1h 40min ago
       Docs: man:systemd-update-done.service(8)
   Main PID: 728 (code=exited, status=0/SUCCESS)
        CPU: 14ms

Jan 23 09:01:23 localhost systemd[1]: Starting Update is Completed...
Jan 23 09:01:23 localhost systemd[1]: Finished Update is Completed.

○ systemd-update-utmp-runlevel.service - Record Runlevel Change in UTMP
     Loaded: loaded (/usr/lib/systemd/system/systemd-update-utmp-runlevel.service; static)
     Active: inactive (dead) since Fri 2026-01-23 09:01:30 UTC; 1h 39min ago
       Docs: man:systemd-update-utmp-runlevel.service(8)
             man:utmp(5)
   Main PID: 1020 (code=exited, status=0/SUCCESS)
        CPU: 10ms

Jan 23 09:01:30 np0005593293.novalocal systemd[1]: Starting Record Runlevel Change in UTMP...
Jan 23 09:01:30 np0005593293.novalocal systemd[1]: systemd-update-utmp-runlevel.service: Deactivated successfully.
Jan 23 09:01:30 np0005593293.novalocal systemd[1]: Finished Record Runlevel Change in UTMP.

● systemd-update-utmp.service - Record System Boot/Shutdown in UTMP
     Loaded: loaded (/usr/lib/systemd/system/systemd-update-utmp.service; static)
     Active: active (exited) since Fri 2026-01-23 09:01:23 UTC; 1h 40min ago
       Docs: man:systemd-update-utmp.service(8)
             man:utmp(5)
   Main PID: 726 (code=exited, status=0/SUCCESS)
        CPU: 9ms

Jan 23 09:01:23 localhost systemd[1]: Starting Record System Boot/Shutdown in UTMP...
Jan 23 09:01:23 localhost systemd[1]: Finished Record System Boot/Shutdown in UTMP.

● systemd-user-sessions.service - Permit User Sessions
     Loaded: loaded (/usr/lib/systemd/system/systemd-user-sessions.service; static)
     Active: active (exited) since Fri 2026-01-23 09:01:30 UTC; 1h 39min ago
       Docs: man:systemd-user-sessions.service(8)
   Main PID: 1005 (code=exited, status=0/SUCCESS)
        CPU: 11ms

Jan 23 09:01:30 np0005593293.novalocal systemd[1]: Starting Permit User Sessions...
Jan 23 09:01:30 np0005593293.novalocal systemd[1]: Finished Permit User Sessions.

○ systemd-vconsole-setup.service - Setup Virtual Console
     Loaded: loaded (/usr/lib/systemd/system/systemd-vconsole-setup.service; static)
     Active: inactive (dead) since Fri 2026-01-23 09:01:22 UTC; 1h 40min ago
   Duration: 1.826s
       Docs: man:systemd-vconsole-setup.service(8)
             man:vconsole.conf(5)
   Main PID: 311 (code=exited, status=0/SUCCESS)
        CPU: 231ms

Jan 23 09:01:20 localhost systemd[1]: Finished SetuUnit tlp.service could not be found.
p Virtual Console.
Jan 23 09:01:22 localhost systemd[1]: systemd-vconsole-setup.service: Deactivated successfully.
Jan 23 09:01:22 localhost systemd[1]: Stopped Setup Virtual Console.
Notice: journal has been rotated since unit was started, output may be incomplete.

● tuned.service - Dynamic System Tuning Daemon
     Loaded: loaded (/usr/lib/systemd/system/tuned.service; enabled; preset: enabled)
     Active: active (running) since Fri 2026-01-23 09:55:50 UTC; 45min ago
       Docs: man:tuned(8)
             man:tuned.conf(5)
             man:tuned-adm(8)
   Main PID: 109791 (tuned)
         IO: 0B read, 0B written
      Tasks: 4 (limit: 48560)
     Memory: 13.8M (peak: 16.1M)
        CPU: 1.436s
     CGroup: /system.slice/tuned.service
             └─109791 /usr/bin/python3 -Es /usr/sbin/tuned -l -P

Jan 23 09:55:50 compute-0 systemd[1]: Starting Dynamic System Tuning Daemon...
Jan 23 09:55:50 compute-0 systemd[1]: Started Dynamic System Tuning Daemon.

○ unbound-anchor.service - update of the root trust anchor for DNSSEC validation in unbound
     Loaded: loaded (/usr/lib/systemd/system/unbound-anchor.service; static)
     Active: inactive (dead)
TriggeredBy: ● unbound-anchor.timer
       Docs: man:unbound-anchor(8)

● user-runtime-dir@1000.service - User Runtime Directory /run/user/1000
     Loaded: loaded (/usr/lib/systemd/system/user-runtime-dir@.service; static)
     Active: active (exited) since Fri 2026-01-23 09:01:44 UTC; 1h 39min ago
       Docs: man:user@.service(5)
   Main PID: 4303 (code=exited, status=0/SUCCESS)
        CPU: 31ms

Jan 23 09:01:44 np0005593293.novalocal systemd[1]: Starting User Runtime Directory /run/user/1000...
Jan 23 09:01:44 np0005593293.novalocal systemd[1]: Finished User Runtime Directory /run/user/1000.

● user-runtime-dir@42477.service - User Runtime Directory /run/user/42477
     Loaded: loaded (/usr/lib/systemd/system/user-runtime-dir@.service; static)
     Active: active (exited) since Fri 2026-01-23 09:51:33 UTC; 49min ago
       Docs: man:user@.service(5)
   Main PID: 90023 (code=exited, status=0/SUCCESS)
        CPU: 14ms

Jan 23 09:51:33 compute-0 systemd[1]: Starting User Runtime Directory /run/user/42477...
Jan 23 09:51:33 compute-0 systemd[1]: Finished User Runtime Directory /run/user/42477.

● user@1000.service - User Manager for UID 1000
     Loaded: loaded (/usr/lib/systemd/system/user@.service; static)
    Drop-In: /usr/lib/systemd/system/user@.service.d
             └─10-login-barrier.conf
     Active: active (running) since Fri 2026-01-23 09:01:44 UTC; 1h 39min ago
       Docs: man:user@.service(5)
   Main PID: 4304 (systemd)
     Status: "Ready."
         IO: 676.0K read, 4.0K written
      Tasks: 5
     Memory: 7.6M (peak: 11.2M)
        CPU: 3.936s
     CGroup: /user.slice/user-1000.slice/user@1000.service
             ├─app.slice
             │ └─dbus-broker.service
             │   ├─11765 /usr/bin/dbus-broker-launch --scope user
             │   └─11778 dbus-broker --log 4 --controller 9 --machine-id 85ac68c10a6e7ae08ceb898dbdca0cb5 --max-bytes 100000000000000 --max-fds 25000000000000 --max-matches 5000000000
             ├─init.scope
             │ ├─4304 /usr/lib/systemd/systemd --user
             │ └─4306 "(sd-pam)"
             └─user.slice
               └─podman-pause-3184ab7f.scope
                 └─11628 catatonit -P

Jan 23 09:16:16 np0005593293.novalocal dbus-broker-launch[11765]: Policy to allow eavesdropping in /usr/share/dbus-1/session.conf +31: Eavesdropping is deprecated and ignored
Jan 23 09:16:16 np0005593293.novalocal dbus-broker-launch[11765]: Policy to allow eavesdropping in /usr/share/dbus-1/session.conf +33: Eavesdropping is deprecated and ignored
Jan 23 09:16:16 np0005593293.novalocal systemd[4304]: Started D-Bus User Message Bus.
Jan 23 09:16:16 np0005593293.novalocal dbus-broker-lau[11765]: Ready
Jan 23 09:16:16 np0005593293.novalocal systemd[4304]: selinux: avc:  op=load_policy lsm=selinux seqno=4 res=1
Jan 23 09:16:16 np0005593293.novalocal systemd[4304]: Created slice Slice /user.
Jan 23 09:16:16 np0005593293.novalocal systemd[4304]: podman-11607.scope: unit configures an IP firewall, but not running as root.
Jan 23 09:16:16 np0005593293.novalocal systemd[4304]: (This warning is only shown for the first unit using IP firewalling.)
Jan 23 09:16:16 np0005593293.novalocal systemd[4304]: Started podman-11607.scope.
Jan 23 09:16:17 np0005593293.novalocal systemd[4304]: Started podman-pause-3184ab7f.scope.

● user@42477.service - User Manager for UID 42477
     Loaded: loaded (/usr/lib/systemd/system/user@.service; static)
    Drop-In: /usr/lib/systemd/system/user@.service.d
             └─10-login-barrier.conf
     Active: active (running) since Fri 2026-01-23 09:51:33 UTC; 49min ago
       Docs: man:user@.service(5)
   Main PID: 90024 (systemd)
     Status: "Ready."
         IO: 0B read, 0B written
      Tasks: 2
     Memory: 7.3M (peak: 11.7M)
        CPU: 2.505s
     CGroup: /user.slice/user-42477.slice/user@42477.service
             └─init.scope
               ├─90024 /usr/lib/systemd/systemd --user
               └─90026 "(sd-pam)"

Jan 23 09:51:33 compute-0 systemd[90024]: Reached target Sockets.
Jan 23 09:51:33 compute-0 systemd[90024]: Reached target Basic System.
Jan 23 09:51:33 compute-0 systemd[90024]: Reached target Main User Target.
Jan 23 09:51:33 compute-0 systemd[90024]: Startup finished in 120ms.
Jan 23 09:51:33 compute-0 systemd[1]: Started User Manager for UID 42477.
Jan 23 09:53:34 compute-0 systemd[90024]: Starting Mark boot as successful...
Jan 23 09:53:34 compute-0 systemd[90024]: Finished Mark boot as successful.
Jan 23 09:57:23 compute-0 systemd[90024]: Created slice User Background Tasks Slice.
Jan 23 09:57:23 compute-0 systemd[90024]: Starting Cleanup of User's Temporary Files and Directories...
Jan 23 09:57:23 compute-0 systemd[90024]: Finished Cleanup of User's Temporary Files and Directories.

○ virtinterfaced.service - libvirt interface daemon
     Loaded: loaded (/usr/lib/systemd/system/virtinterfaced.service; disabled; preset: disabled)
     Active: inactive (dead)
TriggeredBy: ○ virtinterfaced-ro.socket
             ○ virtinterfaced.socket
             ○ virtinterfaced-admin.socket
       Docs: man:virtinterfaced(8)
             https://libvirt.org/

○ virtlockd.service - libvirt locking daemon
     Loaded: loaded (/usr/lib/systemd/system/virtlockd.service; disabled; preset: disabled)
     Active: inactive (dead)
TriggeredBy: ○ virtlockd-admin.socket
             ● virtlockd.socket
       Docs: man:virtlockd(8)
             https://libvirt.org/

● virtlogd.service - libvirt logging daemon
     Loaded: loaded (/usr/lib/systemd/system/virtlogd.service; enabled; preset: disabled)
     Active: active (running) since Fri 2026-01-23 10:07:14 UTC; 34min ago
TriggeredBy: ● virtlogd-admin.socket
             ● virtlogd.socket
       Docs: man:virtlogd(8)
             https://libvirt.org/
   Main PID: 215777 (virtlogd)
         IO: 644.0K read, 360.0K written
      Tasks: 1 (limit: 48560)
     Memory: 3.3M (peak: 3.6M)
        CPU: 5.302s
     CGroup: /system.slice/virtlogd.service
             └─215777 /usr/sbin/virtlogd

Jan 23 10:07:13 compute-0 systemd[1]: Starting libvirt logging daemon...
Jan 23 10:07:14 compute-0 systemd[1]: Started libvirt logging daemon.

○ virtnetworkd.service - libvirt network daemon
     Loaded: loaded (/usr/lib/systemd/system/virtnetworkd.service; disabled; preset: disabled)
     Active: inactive (dead)
TriggeredBy: ○ virtnetworkd-admin.socket
             ○ virtnetworkd-ro.socket
             ○ virtnetworkd.socket
       Docs: man:virtnetworkd(8)
             https://libvirt.org/

● virtnodedevd.service - libvirt nodedev daemon
     Loaded: loaded (/usr/lib/systemd/system/virtnodedevd.service; enabled; preset: disabled)
     Active: active (running) since Fri 2026-01-23 10:10:50 UTC; 30min ago
TriggeredBy: ● virtnodedevd-ro.socket
             ● virtnodedevd.socket
             ● virtnodedevd-admin.socket
       Docs: man:virtnodedevd(8)
             https://libvirt.org/
   Main PID: 248921 (virtnodedevd)
         IO: 4.3M read, 0B written
      Tasks: 20 (limit: 48560)
     Memory: 10.0M (peak: 11.3M)
        CPU: 2.117s
     CGroup: /system.slice/virtnodedevd.service
             └─248921 /usr/sbin/virtnodedevd --timeout 120

Jan 23 10:10:50 compute-0 systemd[1]: Starting libvirt nodedev daemon...
Jan 23 10:10:50 compute-0 systemd[1]: Started libvirt nodedev daemon.

○ virtnwfilterd.service - libvirt nwfilter daemon
     Loaded: loaded (/usr/lib/systemd/system/virtnwfilterd.service; disabled; preset: disabled)
     Active: inactive (dead)
TriggeredBy: ○ virtnwfilterd-ro.socket
             ○ virtnwfilterd.socket
             ○ virtnwfilterd-admin.socket
       Docs: man:virtnwfilterd(8)
             https://libvirt.org/

○ virtproxyd.service - libvirt proxy daemon
     Loaded: loaded (/usr/lib/systemd/system/virtproxyd.service; enabled; preset: disabled)
     Active: inactive (dead) since Fri 2026-01-23 10:09:16 UTC; 32min ago
   Duration: 2min 7ms
TriggeredBy: ● virtproxyd-ro.socket
             ● virtproxyd-admin.socket
             ● virtproxyd-tls.socket
             ● virtproxyd.socket
       Docs: man:virtproxyd(8)
             https://libvirt.org/
   Main PID: 216199 (code=exited, status=0/SUCCESS)
        CPU: 59ms

Jan 23 10:07:16 compute-0 systemd[1]: Starting libvirt proxy daemon...
Jan 23 10:07:16 compute-0 systemd[1]: Started libvirt proxy daemon.
Jan 23 10:09:16 compute-0 systemd[1]: virtproxyd.service: Deactivated successfully.

● virtqemud.service - libvirt QEMU daemon
     Loaded: loaded (/usr/lib/systemd/system/virtqemud.service; enabled; preset: enabled)
     Active: active (running) since Fri 2026-01-23 10:10:48 UTC; 30min ago
TriggeredBy: ● virtqemud-ro.socket
             ● virtqemud-admin.socket
             ● virtqemud.socket
       Docs: man:virtqemud(8)
             https://libvirt.org/
   Main PID: 248554 (virtqemud)
         IO: 45.8M read, 273.0K written
      Tasks: 19 (limit: 32768)
     Memory: 63.2M (peak: 86.9M)
        CPU: 3.457s
     CGroup: /system.slice/virtqemud.service
             └─248554 /usr/sbin/virtqemud --timeout 120

Jan 23 10:10:52 compute-0 virtqemud[248554]: libvirt version: 11.10.0, package: 2.el9 (builder@centos.org, 2025-12-18-15:09:54, )
Jan 23 10:10:52 compute-0 virtqemud[248554]: hostname: compute-0
Jan 23 10:10:52 compute-0 virtqemud[248554]: End of file while reading data: Input/output error
Jan 23 10:30:07 compute-0 virtqemud[248554]: Failed to connect socket to '/var/run/libvirt/virtnetworkd-sock-ro': No such file or directory
Jan 23 10:30:07 compute-0 virtqemud[248554]: Failed to connect socket to '/var/run/libvirt/virtnwfilterd-sock-ro': No such file or directory
Jan 23 10:30:07 compute-0 virtqemud[248554]: Failed to connect socket to '/var/run/libvirt/virtstoraged-sock-ro': No such file or directory
Jan 23 10:30:44 compute-0 virtqemud[248554]: Failed to connect socket to '/var/run/libvirt/virtstoraged-sock-ro': No such file or directory
Jan 23 10:40:51 compute-0 virtqemud[248554]: Failed to connect socket to '/var/run/libvirt/virtnetworkd-sock-ro': No such file or directory
Jan 23 10:40:51 compute-0 virtqemud[248554]: Failed to connect socket to '/var/run/libvirt/virtnwfilterd-sock-ro': No such file or directory
Jan 23 10:40:51 compute-0 virtqemud[248554]: Failed to connect socket to '/var/run/libvirt/virtstoraged-sock-ro': No such file or directory

○ virtsecretd.service - libvirt secret daemon
     Loaded: loaded (/usr/lib/systemd/system/virtsecretd.service; enabled; preset: disabled)
     Active: inactive (dead) since Fri 2026-01-23 10:30:05 UTC; 11min ago
   Duration: 2min 17ms
TriggeredBy: ● virtsecretd-admin.socket
             ● virtsecretd.socket
             ● virtsecretd-ro.socket
       Docs: man:virtsecretd(8)
             https://libvirt.org/
    Process: 270568 ExecStart=/usr/sbin/virtsecretd $VIRTSECRETD_ARGS (code=exited, status=0/SUCCESS)
   Main PID: 270568 (code=exited, status=0/SUCCESS)
        CPU: 64ms

Jan 23 10:28:04 compute-0 systemd[1]: Starting libvirt Unit ypbind.service could not be found.
Unit yppasswdd.service could not be found.
Unit ypserv.service could not be found.
Unit ypxfrd.service could not be found.
secret daemon...
Jan 23 10:28:05 compute-0 systemd[1]: Started libvirt secret daemon.
Jan 23 10:30:05 compute-0 systemd[1]: virtsecretd.service: Deactivated successfully.

○ virtstoraged.service - libvirt storage daemon
     Loaded: loaded (/usr/lib/systemd/system/virtstoraged.service; disabled; preset: disabled)
     Active: inactive (dead)
TriggeredBy: ○ virtstoraged.socket
             ○ virtstoraged-ro.socket
             ○ virtstoraged-admin.socket
       Docs: man:virtstoraged(8)
             https://libvirt.org/

● -.slice - Root Slice
     Loaded: loaded
     Active: active since Fri 2026-01-23 09:01:19 UTC; 1h 40min ago
      Until: Fri 2026-01-23 09:01:19 UTC; 1h 40min ago
       Docs: man:systemd.special(7)
      Tasks: 1365
     Memory: 3.2G
        CPU: 47min 2.530s
     CGroup: /
             ├─296661 turbostat --debug sleep 10
             ├─296664 sleep 10
             ├─init.scope
             │ └─1 /usr/lib/systemd/systemd --switched-root --system --deserialize 31
             ├─machine.slice
             │ ├─libpod-7c4b1914e1e86e16566f40dac1c2043d119deee57a046ec037c84640bd0c067d.scope
             │ │ └─container
             │ │   ├─161918 dumb-init --single-child -- kolla_start
             │ │   ├─161921 "neutron-ovn-metadata-agent (/usr/bin/python3 /usr/bin/neutron-ovn-metadata-agent)"
             │ │   ├─162303 "neutron-ovn-metadata-agent (/usr/bin/python3 /usr/bin/neutron-ovn-metadata-agent)"
             │ │   ├─162436 /usr/bin/python3 /bin/privsep-helper --config-file /etc/neutron/neutron.conf --config-dir /etc/neutron.conf.d --privsep_context neutron.privileged.namespace_cmd --privsep_sock_path /tmp/tmpm91q9wva/privsep.sock
             │ │   ├─255218 /usr/bin/python3 /bin/privsep-helper --config-file /etc/neutron/neutron.conf --config-dir /etc/neutron.conf.d --privsep_context neutron.privileged.default --privsep_sock_path /tmp/tmp5z6r0kvj/privsep.sock
             │ │   └─255276 /usr/bin/python3 /bin/privsep-helper --config-file /etc/neutron/neutron.conf --config-dir /etc/neutron.conf.d --privsep_context neutron.privileged.link_cmd --privsep_sock_path /tmp/tmpv8hp88b7/privsep.sock
             │ ├─libpod-955f9566d05798cdd71546732c1b5a8107d1262ca33e1e62353f97ae0dc2cc2f.scope
             │ │ └─container
             │ │   ├─249231 dumb-init --single-child -- kolla_start
             │ │   ├─249233 /usr/bin/python3 /usr/bin/nova-compute
             │ │   ├─255097 /usr/bin/python3 /bin/privsep-helper --config-file /etc/nova/nova.conf --config-file /etc/nova/nova-compute.conf --config-dir /etc/nova/nova.conf.d --privsep_context vif_plug_ovs.privsep.vif_plug --privsep_sock_path /tmp/tmp_1x_jasi/privsep.sock
             │ │   └─255486 /usr/bin/python3 /bin/privsep-helper --config-file /etc/nova/nova.conf --config-file /etc/nova/nova-compute.conf --config-dir /etc/nova/nova.conf.d --privsep_context nova.privsep.sys_admin_pctxt --privsep_sock_path /tmp/tmpsh92gw98/privsep.sock
             │ └─libpod-ffd3fd97eb8ccb69f06ea21df042fbfc8784045b6313bea6a684bfa168f1196d.scope
             │   └─container
             │     ├─151636 dumb-init --single-child -- kolla_start
             │     └─151639 /usr/bin/ovn-controller --pidfile unix:/run/openvswitch/db.sock -p /etc/pki/tls/private/ovndb.key -c /etc/pki/tls/certs/ovndb.crt -C /etc/pki/tls/certs/ovndbca.crt
             ├─system.slice
             │ ├─NetworkManager.service
             │ │ └─48866 /usr/sbin/NetworkManager --no-daemon
             │ ├─auditd.service
             │ │ ├─699 /sbin/auditd
             │ │ └─701 /usr/sbin/sedispatch
             │ ├─chronyd.service
             │ │ └─58433 /usr/sbin/chronyd -F 2
             │ ├─crond.service
             │ │ ├─  1008 /usr/sbin/crond -n
             │ │ └─147517 /usr/sbin/anacron -s
             │ ├─dbus-broker.service
             │ │ ├─765 /usr/bin/dbus-broker-launch --scope system --audit
             │ │ └─769 dbus-broker --log 4 --controller 9 --machine-id 85ac68c10a6e7ae08ceb898dbdca0cb5 --max-bytes 536870912 --max-fds 4096 --max-matches 131072 --audit
             │ ├─edpm_nova_compute.service
             │ │ └─249229 /usr/bin/conmon --api-version 1 -c 955f9566d05798cdd71546732c1b5a8107d1262ca33e1e62353f97ae0dc2cc2f -u 955f9566d05798cdd71546732c1b5a8107d1262ca33e1e62353f97ae0dc2cc2f -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/955f9566d05798cdd71546732c1b5a8107d1262ca33e1e62353f97ae0dc2cc2f/userdata -p /run/containers/storage/overlay-containers/955f9566d05798cdd71546732c1b5a8107d1262ca33e1e62353f97ae0dc2cc2f/userdata/pidfile -n nova_compute --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/955f9566d05798cdd71546732c1b5a8107d1262ca33e1e62353f97ae0dc2cc2f --full-attach -s -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/955f9566d05798cdd71546732c1b5a8107d1262ca33e1e62353f97ae0dc2cc2f/userdata/oci-log --conmon-pidfile /run/nova_compute.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg 955f9566d05798cdd71546732c1b5a8107d1262ca33e1e62353f97ae0dc2cc2f
             │ ├─edpm_ovn_controller.service
             │ │ └─151634 /usr/bin/conmon --api-version 1 -c ffd3fd97eb8ccb69f06ea21df042fbfc8784045b6313bea6a684bfa168f1196d -u ffd3fd97eb8ccb69f06ea21df042fbfc8784045b6313bea6a684bfa168f1196d -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/ffd3fd97eb8ccb69f06ea21df042fbfc8784045b6313bea6a684bfa168f1196d/userdata -p /run/containers/storage/overlay-containers/ffd3fd97eb8ccb69f06ea21df042fbfc8784045b6313bea6a684bfa168f1196d/userdata/pidfile -n ovn_controller --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/ffd3fd97eb8ccb69f06ea21df042fbfc8784045b6313bea6a684bfa168f1196d --full-attach -s -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/ffd3fd97eb8ccb69f06ea21df042fbfc8784045b6313bea6a684bfa168f1196d/userdata/oci-log --conmon-pidfile /run/ovn_controller.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg ffd3fd97eb8ccb69f06ea21df042fbfc8784045b6313bea6a684bfa168f1196d
             │ ├─edpm_ovn_metadata_agent.service
             │ │ └─161916 /usr/bin/conmon --api-version 1 -c 7c4b1914e1e86e16566f40dac1c2043d119deee57a046ec037c84640bd0c067d -u 7c4b1914e1e86e16566f40dac1c2043d119deee57a046ec037c84640bd0c067d -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/7c4b1914e1e86e16566f40dac1c2043d119deee57a046ec037c84640bd0c067d/userdata -p /run/containers/storage/overlay-containers/7c4b1914e1e86e16566f40dac1c2043d119deee57a046ec037c84640bd0c067d/userdata/pidfile -n ovn_metadata_agent --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/7c4b1914e1e86e16566f40dac1c2043d119deee57a046ec037c84640bd0c067d --full-attach -s -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/7c4b1914e1e86e16566f40dac1c2043d119deee57a046ec037c84640bd0c067d/userdata/oci-log --conmon-pidfile /run/ovn_metadata_agent.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg 7c4b1914e1e86e16566f40dac1c2043d119deee57a046ec037c84640bd0c067d
             │ ├─gssproxy.service
             │ │ └─868 /usr/sbin/gssproxy -D
             │ ├─irqbalance.service
             │ │ └─779 /usr/sbin/irqbalance
             │ ├─iscsid.service
             │ │ └─233083 /usr/sbin/iscsid -f
             │ ├─multipathd.service
             │ │ └─233315 /sbin/multipathd -d -s
             │ ├─ovs-vswitchd.service
             │ │ └─47171 ovs-vswitchd unix:/var/run/openvswitch/db.sock -vconsole:emer -vsyslog:err -vfile:info --mlockall --user openvswitch:hugetlbfs --no-chdir --log-file=/var/log/openvswitch/ovs-vswitchd.log --pidfile=/var/run/openvswitch/ovs-vswitchd.pid --detach
             │ ├─ovsdb-server.service
             │ │ └─47090 ovsdb-server /etc/openvswitch/conf.db -vconsole:emer -vsyslog:err -vfile:info --remote=punix:/var/run/openvswitch/db.sock --private-key=db:Open_vSwitch,SSL,private_key --certificate=db:Open_vSwitch,SSL,certificate --bootstrap-ca-cert=db:Open_vSwitch,SSL,ca_cert --user openvswitch:hugetlbfs --no-chdir --log-file=/var/log/openvswitch/ovsdb-server.log --pidfile=/var/run/openvswitch/ovsdb-server.pid --detach
             │ ├─polkit.service
             │ │ └─43358 /usr/lib/polkit-1/polkitd --no-debug
             │ ├─rpcbind.service
             │ │ └─697 /usr/bin/rpcbind -w -f
             │ ├─rsyslog.service
             │ │ └─1003 /usr/sbin/rsyslogd -n
             │ ├─sshd.service
             │ │ └─189686 "sshd: /usr/sbin/sshd -D [listener] 0 of 10-100 startups"
             │ ├─system-ceph\x2df3005f84\x2d239a\x2d55b6\x2da948\x2d8f1fb592b920.slice
             │ │ ├─ceph-f3005f84-239a-55b6-a948-8f1fb592b920@alertmanager.compute-0.service
             │ │ │ ├─libpod-payload-a2ddeb968d99b1961970b788dda423e2c0177d966be5d6216090bc7f97658982
             │ │ │ │ ├─104187 /run/podman-init -- /bin/alertmanager --cluster.listen-address=:9094 --web.listen-address=192.168.122.100:9093 --config.file=/etc/alertmanager/alertmanager.yml
             │ │ │ │ └─104189 /bin/alertmanager --cluster.listen-address=:9094 --web.listen-address=192.168.122.100:9093 --config.file=/etc/alertmanager/alertmanager.yml
             │ │ │ └─runtime
             │ │ │   └─104185 /usr/bin/conmon --api-version 1 -c a2ddeb968d99b1961970b788dda423e2c0177d966be5d6216090bc7f97658982 -u a2ddeb968d99b1961970b788dda423e2c0177d966be5d6216090bc7f97658982 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/a2ddeb968d99b1961970b788dda423e2c0177d966be5d6216090bc7f97658982/userdata -p /run/containers/storage/overlay-containers/a2ddeb968d99b1961970b788dda423e2c0177d966be5d6216090bc7f97658982/userdata/pidfile -n ceph-f3005f84-239a-55b6-a948-8f1fb592b920-alertmanager-compute-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/a2ddeb968d99b1961970b788dda423e2c0177d966be5d6216090bc7f97658982 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/a2ddeb968d99b1961970b788dda423e2c0177d966be5d6216090bc7f97658982/userdata/oci-log --conmon-pidfile /run/ceph-f3005f84-239a-55b6-a948-8f1fb592b920@alertmanager.compute-0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg a2ddeb968d99b1961970b788dda423e2c0177d966be5d6216090bc7f97658982
             │ │ ├─ceph-f3005f84-239a-55b6-a948-8f1fb592b920@crash.compute-0.service
             │ │ │ ├─libpod-payload-ae2342c943dc0b4633eaeef8f7726de29e0287eb0b10e37d37a519b117896a21
             │ │ │ │ ├─79596 /run/podman-init -- /usr/bin/ceph-crash -n client.crash.compute-0
             │ │ │ │ └─79598 /usr/bin/python3 -s /usr/bin/ceph-crash -n client.crash.compute-0
             │ │ │ └─runtime
             │ │ │   └─79594 /usr/bin/conmon --api-version 1 -c ae2342c943dc0b4633eaeef8f7726de29e0287eb0b10e37d37a519b117896a21 -u ae2342c943dc0b4633eaeef8f7726de29e0287eb0b10e37d37a519b117896a21 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/ae2342c943dc0b4633eaeef8f7726de29e0287eb0b10e37d37a519b117896a21/userdata -p /run/containers/storage/overlay-containers/ae2342c943dc0b4633eaeef8f7726de29e0287eb0b10e37d37a519b117896a21/userdata/pidfile -n ceph-f3005f84-239a-55b6-a948-8f1fb592b920-crash-compute-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/ae2342c943dc0b4633eaeef8f7726de29e0287eb0b10e37d37a519b117896a21 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/ae2342c943dc0b4633eaeef8f7726de29e0287eb0b10e37d37a519b117896a21/userdata/oci-log --conmon-pidfile /run/ceph-f3005f84-239a-55b6-a948-8f1fb592b920@crash.compute-0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg ae2342c943dc0b4633eaeef8f7726de29e0287eb0b10e37d37a519b117896a21
             │ │ ├─ceph-f3005f84-239a-55b6-a948-8f1fb592b920@grafana.compute-0.service
             │ │ │ ├─libpod-payload-91a745e69178e4c0b1322185fa504c92fadf052d1491b19cbcad743ff263de28
             │ │ │ │ ├─104505 /run/podman-init -- /run.sh
             │ │ │ │ └─104507 grafana server --homepath=/usr/share/grafana --config=/etc/grafana/grafana.ini --packaging=docker cfg:default.log.mode=console cfg:default.paths.data=/var/lib/grafana cfg:default.paths.logs=/var/log/grafana cfg:default.paths.plugins=/var/lib/grafana/plugins cfg:default.paths.provisioning=/etc/grafana/provisioning
             │ │ │ └─runtime
             │ │ │   └─104501 /usr/bin/conmon --api-version 1 -c 91a745e69178e4c0b1322185fa504c92fadf052d1491b19cbcad743ff263de28 -u 91a745e69178e4c0b1322185fa504c92fadf052d1491b19cbcad743ff263de28 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/91a745e69178e4c0b1322185fa504c92fadf052d1491b19cbcad743ff263de28/userdata -p /run/containers/storage/overlay-containers/91a745e69178e4c0b1322185fa504c92fadf052d1491b19cbcad743ff263de28/userdata/pidfile -n ceph-f3005f84-239a-55b6-a948-8f1fb592b920-grafana-compute-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/91a745e69178e4c0b1322185fa504c92fadf052d1491b19cbcad743ff263de28 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/91a745e69178e4c0b1322185fa504c92fadf052d1491b19cbcad743ff263de28/userdata/oci-log --conmon-pidfile /run/ceph-f3005f84-239a-55b6-a948-8f1fb592b920@grafana.compute-0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 91a745e69178e4c0b1322185fa504c92fadf052d1491b19cbcad743ff263de28
             │ │ ├─ceph-f3005f84-239a-55b6-a948-8f1fb592b920@haproxy.nfs.cephfs.compute-0.yeogal.service
             │ │ │ ├─libpod-payload-2675dd2af0d87249968094bd6c1eb5d25ac7173a5fd992ed1bd309216a505178
             │ │ │ │ ├─95467 /run/podman-init -- docker-entrypoint.sh haproxy -f /var/lib/haproxy/haproxy.cfg
             │ │ │ │ ├─95469 haproxy -W -db -f /var/lib/haproxy/haproxy.cfg
             │ │ │ │ └─95471 haproxy -W -db -f /var/lib/haproxy/haproxy.cfg
             │ │ │ └─runtime
             │ │ │   └─95465 /usr/bin/conmon --api-version 1 -c 2675dd2af0d87249968094bd6c1eb5d25ac7173a5fd992ed1bd309216a505178 -u 2675dd2af0d87249968094bd6c1eb5d25ac7173a5fd992ed1bd309216a505178 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/2675dd2af0d87249968094bd6c1eb5d25ac7173a5fd992ed1bd309216a505178/userdata -p /run/containers/storage/overlay-containers/2675dd2af0d87249968094bd6c1eb5d25ac7173a5fd992ed1bd309216a505178/userdata/pidfile -n ceph-f3005f84-239a-55b6-a948-8f1fb592b920-haproxy-nfs-cephfs-compute-0-yeogal --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/2675dd2af0d87249968094bd6c1eb5d25ac7173a5fd992ed1bd309216a505178 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/2675dd2af0d87249968094bd6c1eb5d25ac7173a5fd992ed1bd309216a505178/userdata/oci-log --conmon-pidfile /run/ceph-f3005f84-239a-55b6-a948-8f1fb592b920@haproxy.nfs.cephfs.compute-0.yeogal.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 2675dd2af0d87249968094bd6c1eb5d25ac7173a5fd992ed1bd309216a505178
             │ │ ├─ceph-f3005f84-239a-55b6-a948-8f1fb592b920@haproxy.rgw.default.compute-0.qabsws.service
             │ │ │ ├─libpod-payload-872bb66aeaa09288ead0a99e17c29682960b011c0b0f7af2b0513c1ab79aba61
             │ │ │ │ ├─97806 /run/podman-init -- docker-entrypoint.sh haproxy -f /var/lib/haproxy/haproxy.cfg
             │ │ │ │ ├─97808 haproxy -W -db -f /var/lib/haproxy/haproxy.cfg
             │ │ │ │ └─97810 haproxy -W -db -f /var/lib/haproxy/haproxy.cfg
             │ │ │ └─runtime
             │ │ │   └─97804 /usr/bin/conmon --api-version 1 -c 872bb66aeaa09288ead0a99e17c29682960b011c0b0f7af2b0513c1ab79aba61 -u 872bb66aeaa09288ead0a99e17c29682960b011c0b0f7af2b0513c1ab79aba61 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/872bb66aeaa09288ead0a99e17c29682960b011c0b0f7af2b0513c1ab79aba61/userdata -p /run/containers/storage/overlay-containers/872bb66aeaa09288ead0a99e17c29682960b011c0b0f7af2b0513c1ab79aba61/userdata/pidfile -n ceph-f3005f84-239a-55b6-a948-8f1fb592b920-haproxy-rgw-default-compute-0-qabsws --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/872bb66aeaa09288ead0a99e17c29682960b011c0b0f7af2b0513c1ab79aba61 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/872bb66aeaa09288ead0a99e17c29682960b011c0b0f7af2b0513c1ab79aba61/userdata/oci-log --conmon-pidfile /run/ceph-f3005f84-239a-55b6-a948-8f1fb592b920@haproxy.rgw.default.compute-0.qabsws.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 872bb66aeaa09288ead0a99e17c29682960b011c0b0f7af2b0513c1ab79aba61
             │ │ ├─ceph-f3005f84-239a-55b6-a948-8f1fb592b920@keepalived.nfs.cephfs.compute-0.lrsdkc.service
             │ │ │ ├─libpod-payload-4783fa4a0e03b39b894c380a1696ad8cf3e72c4e94fd2480817944beb7891609
             │ │ │ │ ├─96150 /run/podman-init -- ./init.sh
             │ │ │ │ ├─96152 /usr/sbin/keepalived -n -l -f /etc/keepalived/keepalived.conf
             │ │ │ │ └─96154 /usr/sbin/keepalived -n -l -f /etc/keepalived/keepalived.conf
             │ │ │ └─runtime
             │ │ │   └─96148 /usr/bin/conmon --api-version 1 -c 4783fa4a0e03b39b894c380a1696ad8cf3e72c4e94fd2480817944beb7891609 -u 4783fa4a0e03b39b894c380a1696ad8cf3e72c4e94fd2480817944beb7891609 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/4783fa4a0e03b39b894c380a1696ad8cf3e72c4e94fd2480817944beb7891609/userdata -p /run/containers/storage/overlay-containers/4783fa4a0e03b39b894c380a1696ad8cf3e72c4e94fd2480817944beb7891609/userdata/pidfile -n ceph-f3005f84-239a-55b6-a948-8f1fb592b920-keepalived-nfs-cephfs-compute-0-lrsdkc --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/4783fa4a0e03b39b894c380a1696ad8cf3e72c4e94fd2480817944beb7891609 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/4783fa4a0e03b39b894c380a1696ad8cf3e72c4e94fd2480817944beb7891609/userdata/oci-log --conmon-pidfile /run/ceph-f3005f84-239a-55b6-a948-8f1fb592b920@keepalived.nfs.cephfs.compute-0.lrsdkc.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 4783fa4a0e03b39b894c380a1696ad8cf3e72c4e94fd2480817944beb7891609
             │ │ ├─ceph-f3005f84-239a-55b6-a948-8f1fb592b920@keepalived.rgw.default.compute-0.tytkrd.service
             │ │ │ ├─libpod-payload-fb69e42829d45e3b674ff9bd3f3333c8c90dc07a3801eda65c7a0ef9a0f84b50
             │ │ │ │ ├─98096 /run/podman-init -- ./init.sh
             │ │ │ │ ├─98098 /usr/sbin/keepalived -n -l -f /etc/keepalived/keepalived.conf
             │ │ │ │ └─98100 /usr/sbin/keepalived -n -l -f /etc/keepalived/keepalived.conf
             │ │ │ └─runtime
             │ │ │   └─98094 /usr/bin/conmon --api-version 1 -c fb69e42829d45e3b674ff9bd3f3333c8c90dc07a3801eda65c7a0ef9a0f84b50 -u fb69e42829d45e3b674ff9bd3f3333c8c90dc07a3801eda65c7a0ef9a0f84b50 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/fb69e42829d45e3b674ff9bd3f3333c8c90dc07a3801eda65c7a0ef9a0f84b50/userdata -p /run/containers/storage/overlay-containers/fb69e42829d45e3b674ff9bd3f3333c8c90dc07a3801eda65c7a0ef9a0f84b50/userdata/pidfile -n ceph-f3005f84-239a-55b6-a948-8f1fb592b920-keepalived-rgw-default-compute-0-tytkrd --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/fb69e42829d45e3b674ff9bd3f3333c8c90dc07a3801eda65c7a0ef9a0f84b50 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/fb69e42829d45e3b674ff9bd3f3333c8c90dc07a3801eda65c7a0ef9a0f84b50/userdata/oci-log --conmon-pidfile /run/ceph-f3005f84-239a-55b6-a948-8f1fb592b920@keepalived.rgw.default.compute-0.tytkrd.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg fb69e42829d45e3b674ff9bd3f3333c8c90dc07a3801eda65c7a0ef9a0f84b50
             │ │ ├─ceph-f3005f84-239a-55b6-a948-8f1fb592b920@mds.cephfs.compute-0.ymknms.service
             │ │ │ ├─libpod-payload-e4542c7adce0f518b7f99d99679470337d13a224c21a637a7c2e54819c64d093
             │ │ │ │ ├─94626 /run/podman-init -- /usr/bin/ceph-mds -n mds.cephfs.compute-0.ymknms -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ │ │ └─94628 /usr/bin/ceph-mds -n mds.cephfs.compute-0.ymknms -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ │ └─runtime
             │ │ │   └─94624 /usr/bin/conmon --api-version 1 -c e4542c7adce0f518b7f99d99679470337d13a224c21a637a7c2e54819c64d093 -u e4542c7adce0f518b7f99d99679470337d13a224c21a637a7c2e54819c64d093 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/e4542c7adce0f518b7f99d99679470337d13a224c21a637a7c2e54819c64d093/userdata -p /run/containers/storage/overlay-containers/e4542c7adce0f518b7f99d99679470337d13a224c21a637a7c2e54819c64d093/userdata/pidfile -n ceph-f3005f84-239a-55b6-a948-8f1fb592b920-mds-cephfs-compute-0-ymknms --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/e4542c7adce0f518b7f99d99679470337d13a224c21a637a7c2e54819c64d093 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/e4542c7adce0f518b7f99d99679470337d13a224c21a637a7c2e54819c64d093/userdata/oci-log --conmon-pidfile /run/ceph-f3005f84-239a-55b6-a948-8f1fb592b920@mds.cephfs.compute-0.ymknms.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg e4542c7adce0f518b7f99d99679470337d13a224c21a637a7c2e54819c64d093
             │ │ ├─ceph-f3005f84-239a-55b6-a948-8f1fb592b920@mgr.compute-0.nbdygh.service
             │ │ │ ├─libpod-payload-e4a1c45f747e69af65041011d00875cfaaf16149f31875bd1585747dd24058b3
             │ │ │ │ ├─74631 /run/podman-init -- /usr/bin/ceph-mgr -n mgr.compute-0.nbdygh -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ │ │ └─74633 /usr/bin/ceph-mgr -n mgr.compute-0.nbdygh -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ │ └─runtime
             │ │ │   └─74629 /usr/bin/conmon --api-version 1 -c e4a1c45f747e69af65041011d00875cfaaf16149f31875bd1585747dd24058b3 -u e4a1c45f747e69af65041011d00875cfaaf16149f31875bd1585747dd24058b3 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/e4a1c45f747e69af65041011d00875cfaaf16149f31875bd1585747dd24058b3/userdata -p /run/containers/storage/overlay-containers/e4a1c45f747e69af65041011d00875cfaaf16149f31875bd1585747dd24058b3/userdata/pidfile -n ceph-f3005f84-239a-55b6-a948-8f1fb592b920-mgr-compute-0-nbdygh --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/e4a1c45f747e69af65041011d00875cfaaf16149f31875bd1585747dd24058b3 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/e4a1c45f747e69af65041011d00875cfaaf16149f31875bd1585747dd24058b3/userdata/oci-log --conmon-pidfile /run/ceph-f3005f84-239a-55b6-a948-8f1fb592b920@mgr.compute-0.nbdygh.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg e4a1c45f747e69af65041011d00875cfaaf16149f31875bd1585747dd24058b3
             │ │ ├─ceph-f3005f84-239a-55b6-a948-8f1fb592b920@mon.compute-0.service
             │ │ │ ├─libpod-payload-cbfd7f9a2ad9887ed3adf829b401ebf60f670e8aa91916ede409f75a12aeb3e6
             │ │ │ │ ├─74333 /run/podman-init -- /usr/bin/ceph-mon -n mon.compute-0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false
             │ │ │ │ └─74335 /usr/bin/ceph-mon -n mon.compute-0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false
             │ │ │ └─runtime
             │ │ │   └─74331 /usr/bin/conmon --api-version 1 -c cbfd7f9a2ad9887ed3adf829b401ebf60f670e8aa91916ede409f75a12aeb3e6 -u cbfd7f9a2ad9887ed3adf829b401ebf60f670e8aa91916ede409f75a12aeb3e6 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/cbfd7f9a2ad9887ed3adf829b401ebf60f670e8aa91916ede409f75a12aeb3e6/userdata -p /run/containers/storage/overlay-containers/cbfd7f9a2ad9887ed3adf829b401ebf60f670e8aa91916ede409f75a12aeb3e6/userdata/pidfile -n ceph-f3005f84-239a-55b6-a948-8f1fb592b920-mon-compute-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/cbfd7f9a2ad9887ed3adf829b401ebf60f670e8aa91916ede409f75a12aeb3e6 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/cbfd7f9a2ad9887ed3adf829b401ebf60f670e8aa91916ede409f75a12aeb3e6/userdata/oci-log --conmon-pidfile /run/ceph-f3005f84-239a-55b6-a948-8f1fb592b920@mon.compute-0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg cbfd7f9a2ad9887ed3adf829b401ebf60f670e8aa91916ede409f75a12aeb3e6
             │ │ ├─ceph-f3005f84-239a-55b6-a948-8f1fb592b920@node-exporter.compute-0.service
             │ │ │ ├─libpod-payload-97848d12ab6322cfe3cc805f7972048af088977ee2e693bcc0a5bb581613a0d8
             │ │ │ │ ├─89474 /run/podman-init -- /bin/node_exporter --no-collector.timex --web.listen-address=:9100 --path.procfs=/host/proc --path.sysfs=/host/sys --path.rootfs=/rootfs
             │ │ │ │ └─89477 /bin/node_exporter --no-collector.timex --web.listen-address=:9100 --path.procfs=/host/proc --path.sysfs=/host/sys --path.rootfs=/rootfs
             │ │ │ └─runtime
             │ │ │   └─89471 /usr/bin/conmon --api-version 1 -c 97848d12ab6322cfe3cc805f7972048af088977ee2e693bcc0a5bb581613a0d8 -u 97848d12ab6322cfe3cc805f7972048af088977ee2e693bcc0a5bb581613a0d8 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/97848d12ab6322cfe3cc805f7972048af088977ee2e693bcc0a5bb581613a0d8/userdata -p /run/containers/storage/overlay-containers/97848d12ab6322cfe3cc805f7972048af088977ee2e693bcc0a5bb581613a0d8/userdata/pidfile -n ceph-f3005f84-239a-55b6-a948-8f1fb592b920-node-exporter-compute-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/97848d12ab6322cfe3cc805f7972048af088977ee2e693bcc0a5bb581613a0d8 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/97848d12ab6322cfe3cc805f7972048af088977ee2e693bcc0a5bb581613a0d8/userdata/oci-log --conmon-pidfile /run/ceph-f3005f84-239a-55b6-a948-8f1fb592b920@node-exporter.compute-0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 97848d12ab6322cfe3cc805f7972048af088977ee2e693bcc0a5bb581613a0d8
             │ │ ├─ceph-f3005f84-239a-55b6-a948-8f1fb592b920@osd.1.service
             │ │ │ ├─libpod-payload-ba38de35226506cb699780f729ec895e86e90cec52f99c13abc1fc038212a39b
             │ │ │ │ ├─82639 /run/podman-init -- /usr/bin/ceph-osd -n osd.1 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ │ │ └─82641 /usr/bin/ceph-osd -n osd.1 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ │ └─runtime
             │ │ │   └─82637 /usr/bin/conmon --api-version 1 -c ba38de35226506cb699780f729ec895e86e90cec52f99c13abc1fc038212a39b -u ba38de35226506cb699780f729ec895e86e90cec52f99c13abc1fc038212a39b -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/ba38de35226506cb699780f729ec895e86e90cec52f99c13abc1fc038212a39b/userdata -p /run/containers/storage/overlay-containers/ba38de35226506cb699780f729ec895e86e90cec52f99c13abc1fc038212a39b/userdata/pidfile -n ceph-f3005f84-239a-55b6-a948-8f1fb592b920-osd-1 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/ba38de35226506cb699780f729ec895e86e90cec52f99c13abc1fc038212a39b --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/ba38de35226506cb699780f729ec895e86e90cec52f99c13abc1fc038212a39b/userdata/oci-log --conmon-pidfile /run/ceph-f3005f84-239a-55b6-a948-8f1fb592b920@osd.1.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg ba38de35226506cb699780f729ec895e86e90cec52f99c13abc1fc038212a39b
             │ │ ├─ceph-f3005f84-239a-55b6-a948-8f1fb592b920@prometheus.compute-0.service
             │ │ │ ├─libpod-payload-8d18c97a753c8610913f4f1be41e91ee0fa0045d3fad5bb17483ebd74168eedf
             │ │ │ │ ├─98643 /run/podman-init -- /bin/prometheus --config.file=/etc/prometheus/prometheus.yml --storage.tsdb.path=/prometheus --storage.tsdb.retention.time=15d --storage.tsdb.retention.size=0 --web.external-url=http://compute-0.ctlplane.example.com:9095 --web.listen-address=192.168.122.100:9095
             │ │ │ │ └─98645 /bin/prometheus --config.file=/etc/prometheus/prometheus.yml --storage.tsdb.path=/prometheus --storage.tsdb.retention.time=15d --storage.tsdb.retention.size=0 --web.external-url=http://compute-0.ctlplane.example.com:9095 --web.listen-address=192.168.122.100:9095
             │ │ │ └─runtime
             │ │ │   └─98641 /usr/bin/conmon --api-version 1 -c 8d18c97a753c8610913f4f1be41e91ee0fa0045d3fad5bb17483ebd74168eedf -u 8d18c97a753c8610913f4f1be41e91ee0fa0045d3fad5bb17483ebd74168eedf -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/8d18c97a753c8610913f4f1be41e91ee0fa0045d3fad5bb17483ebd74168eedf/userdata -p /run/containers/storage/overlay-containers/8d18c97a753c8610913f4f1be41e91ee0fa0045d3fad5bb17483ebd74168eedf/userdata/pidfile -n ceph-f3005f84-239a-55b6-a948-8f1fb592b920-prometheus-compute-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/8d18c97a753c8610913f4f1be41e91ee0fa0045d3fad5bb17483ebd74168eedf --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/8d18c97a753c8610913f4f1be41e91ee0fa0045d3fad5bb17483ebd74168eedf/userdata/oci-log --conmon-pidfile /run/ceph-f3005f84-239a-55b6-a948-8f1fb592b920@prometheus.compute-0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 8d18c97a753c8610913f4f1be41e91ee0fa0045d3fad5bb17483ebd74168eedf
             │ │ └─ceph-f3005f84-239a-55b6-a948-8f1fb592b920@rgw.rgw.compute-0.jbpfwf.service
             │ │   ├─libpod-payload-318a53d5c542e8639ae8cbe910fcc9d6c8a7c7006a978c4655d1d2582222973a
             │ │   │ ├─93746 /run/podman-init -- /usr/bin/radosgw -n client.rgw.rgw.compute-0.jbpfwf -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │   │ └─93748 /usr/bin/radosgw -n client.rgw.rgw.compute-0.jbpfwf -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │   └─runtime
             │ │     └─93744 /usr/bin/conmon --api-version 1 -c 318a53d5c542e8639ae8cbe910fcc9d6c8a7c7006a978c4655d1d2582222973a -u 318a53d5c542e8639ae8cbe910fcc9d6c8a7c7006a978c4655d1d2582222973a -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/318a53d5c542e8639ae8cbe910fcc9d6c8a7c7006a978c4655d1d2582222973a/userdata -p /run/containers/storage/overlay-containers/318a53d5c542e8639ae8cbe910fcc9d6c8a7c7006a978c4655d1d2582222973a/userdata/pidfile -n ceph-f3005f84-239a-55b6-a948-8f1fb592b920-rgw-rgw-compute-0-jbpfwf --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/318a53d5c542e8639ae8cbe910fcc9d6c8a7c7006a978c4655d1d2582222973a --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/318a53d5c542e8639ae8cbe910fcc9d6c8a7c7006a978c4655d1d2582222973a/userdata/oci-log --conmon-pidfile /run/ceph-f3005f84-239a-55b6-a948-8f1fb592b920@rgw.rgw.compute-0.jbpfwf.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 318a53d5c542e8639ae8cbe910fcc9d6c8a7c7006a978c4655d1d2582222973a
             │ ├─system-getty.slice
             │ │ └─getty@tty1.service
             │ │   └─1009 /sbin/agetty -o "-p -- \\u" --noclear - linux
             │ ├─system-serial\x2dgetty.slice
             │ │ └─serial-getty@ttyS0.service
             │ │   └─1010 /sbin/agetty -o "-p -- \\u" --keep-baud 115200,57600,38400,9600 - vt220
             │ ├─systemd-hostnamed.service
             │ │ └─292378 /usr/lib/systemd/systemd-hostnamed
             │ ├─systemd-journald.service
             │ │ └─675 /usr/lib/systemd/systemd-journald
             │ ├─systemd-logind.service
             │ │ └─784 /usr/lib/systemd/systemd-logind
             │ ├─systemd-machined.service
             │ │ └─216411 /usr/lib/systemd/systemd-machined
             │ ├─systemd-udevd.service
             │ │ └─udev
             │ │   └─727 /usr/lib/systemd/systemd-udevd
             │ ├─tuned.service
             │ │ └─109791 /usr/bin/python3 -Es /usr/sbin/tuned -l -P
             │ ├─virtlogd.service
             │ │ └─215777 /usr/sbin/virtlogd
             │ ├─virtnodedevd.service
             │ │ └─248921 /usr/sbin/virtnodedevd --timeout 120
             │ └─virtqemud.service
             │   └─248554 /usr/sbin/virtqemud --timeout 120
             └─user.slice
               ├─user-1000.slice
               │ ├─session-1.scope
               │ │ └─4516 /usr/bin/python3
               │ ├─session-59.scope
               │ │ ├─289862 "sshd-session: zuul [priv]"
               │ │ ├─289865 "sshd-session: zuul@notty"
               │ │ ├─289866 sudo bash -c "rm -rf /var/tmp/sos-osp && mkdir /var/tmp/sos-osp && sos report --batch --all-logs --tmp-dir=/var/tmp/sos-osp  -p container,openstack_edpm,system,storage,virt"
               │ │ ├─289890 /usr/bin/python3 -s /sbin/sos report --batch --all-logs --tmp-dir=/var/tmp/sos-osp -p container,openstack_edpm,system,storage,virt
               │ │ ├─296659 timeout 15s turbostat --debug sleep 10
               │ │ ├─297031 timeout 300s systemctl status --all
               │ │ ├─297032 systemctl status --all
               │ │ ├─297060 timeout 300s ceph osd numa-status --format json-pretty
               │ │ └─297061 /usr/bin/python3 -s /usr/bin/ceph osd numa-status --format json-pretty
               │ └─user@1000.service
               │   ├─app.slice
               │   │ └─dbus-broker.service
               │   │   ├─11765 /usr/bin/dbus-broker-launch --scope user
               │   │   └─11778 dbus-broker --log 4 --controller 9 --machine-id 85ac68c10a6e7ae08ceb898dbdca0cb5 --max-bytes 100000000000000 --max-fds 25000000000000 --max-matches 5000000000
               │   ├─init.scope
               │   │ ├─4304 /usr/lib/systemd/systemd --user
               │   │ └─4306 "(sd-pam)"
               │   └─user.slice
               │     └─podman-pause-3184ab7f.scope
               │       └─11628 catatonit -P
               └─user-42477.slice
                 ├─session-39.scope
                 │ ├─99842 "sshd-session: ceph-admin [priv]"
                 │ └─99848 "sshd-session: ceph-admin@notty"
                 └─user@42477.service
                   └─init.scope
                     ├─90024 /usr/lib/systemd/systemd --user
                     └─90026 "(sd-pam)"

Jan 23 10:41:17 compute-0 systemd[1]: Started libcrun container.
Jan 23 10:41:17 compute-0 systemd[1]: libpod-08c67959b2d141ae7a57d37ece2da37ad07761fa59f0340f63f41eaed57d5855.scope: Deactivated successfully.
Jan 23 10:41:17 compute-0 systemd[1]: var-lib-containers-storage-overlay-2533e289941a47e639cd0db545742105d138d628a923353a5d3c86e91f47b4f0-merged.mount: Deactivated successfully.
Jan 23 10:41:17 compute-0 systemd[1]: libpod-conmon-08c67959b2d141ae7a57d37ece2da37ad07761fa59f0340f63f41eaed57d5855.scope: Deactivated successfully.
Jan 23 10:41:18 compute-0 systemd[1]: Started libpod-conmon-70935c21499685672b8e368e4d9f412081240a225439ed4442c8712ab1111167.scope.
Jan 23 10:41:18 compute-0 systemd[1]: Started libcrun container.
Jan 23 10:41:18 compute-0 systemd[1]: libpod-70935c21499685672b8e368e4d9f412081240a225439ed4442c8712ab1111167.scope: Deactivated successfully.
Jan 23 10:41:18 compute-0 systemd[1]: libpod-70935c21499685672b8e368e4d9f412081240a225439ed4442c8712ab1111167.scope: Consumed 1.129s CPU time.
Jan 23 10:41:19 compute-0 systemd[1]: var-lib-containers-storage-overlay-1da9cc6cfb41f2879c30509070e68c9ab54203f80ead794e613f67a7cf3a6d72-merged.mount: Deactivated successfully.
Jan 23 10:41:19 compute-0 systemd[1]: libpod-conmon-70935c21499685672b8e368e4d9f412081240a225439ed4442c8712ab1111167.scope: Deactivated successfully.

● machine.slice - Virtual Machine and Container Slice
     Loaded: loaded (/usr/lib/systemd/system/machine.slice; static)
     Active: active since Fri 2026-01-23 09:47:33 UTC; 53min ago
      Until: Fri 2026-01-23 09:47:33 UTC; 53min ago
       Docs: man:systemd.special(7)
         IO: 311.5M read, 59.2M written
      Tasks: 43
     Memory: 889.5M (peak: 1.1G)
        CPU: 4min 47.491s
     CGroup: /machine.slice
             ├─libpod-7c4b1914e1e86e16566f40dac1c2043d119deee57a046ec037c84640bd0c067d.scope
             │ └─container
             │   ├─161918 dumb-init --single-child -- kolla_start
             │   ├─161921 "neutron-ovn-metadata-agent (/usr/bin/python3 /usr/bin/neutron-ovn-metadata-agent)"
             │   ├─162303 "neutron-ovn-metadata-agent (/usr/bin/python3 /usr/bin/neutron-ovn-metadata-agent)"
             │   ├─162436 /usr/bin/python3 /bin/privsep-helper --config-file /etc/neutron/neutron.conf --config-dir /etc/neutron.conf.d --privsep_context neutron.privileged.namespace_cmd --privsep_sock_path /tmp/tmpm91q9wva/privsep.sock
             │   ├─255218 /usr/bin/python3 /bin/privsep-helper --config-file /etc/neutron/neutron.conf --config-dir /etc/neutron.conf.d --privsep_context neutron.privileged.default --privsep_sock_path /tmp/tmp5z6r0kvj/privsep.sock
             │   └─255276 /usr/bin/python3 /bin/privsep-helper --config-file /etc/neutron/neutron.conf --config-dir /etc/neutron.conf.d --privsep_context neutron.privileged.link_cmd --privsep_sock_path /tmp/tmpv8hp88b7/privsep.sock
             ├─libpod-955f9566d05798cdd71546732c1b5a8107d1262ca33e1e62353f97ae0dc2cc2f.scope
             │ └─container
             │   ├─249231 dumb-init --single-child -- kolla_start
             │   ├─249233 /usr/bin/python3 /usr/bin/nova-compute
             │   ├─255097 /usr/bin/python3 /bin/privsep-helper --config-file /etc/nova/nova.conf --config-file /etc/nova/nova-compute.conf --config-dir /etc/nova/nova.conf.d --privsep_context vif_plug_ovs.privsep.vif_plug --privsep_sock_path /tmp/tmp_1x_jasi/privsep.sock
             │   └─255486 /usr/bin/python3 /bin/privsep-helper --config-file /etc/nova/nova.conf --config-file /etc/nova/nova-compute.conf --config-dir /etc/nova/nova.conf.d --privsep_context nova.privsep.sys_admin_pctxt --privsep_sock_path /tmp/tmpsh92gw98/privsep.sock
             └─libpod-ffd3fd97eb8ccb69f06ea21df042fbfc8784045b6313bea6a684bfa168f1196d.scope
               └─container
                 ├─151636 dumb-init --single-child -- kolla_start
                 └─151639 /usr/bin/ovn-controller --pidfile unix:/run/openvswitch/db.sock -p /etc/pki/tls/private/ovndb.key -c /etc/pki/tls/certs/ovndb.crt -C /etc/pki/tls/certs/ovndbca.crt

Jan 23 10:41:16 compute-0 focused_euclid[294793]:                 "ceph.vdo": "0",
Jan 23 10:41:16 compute-0 focused_euclid[294793]:                 "ceph.with_tpm": "0"
Jan 23 10:41:16 compute-0 focused_euclid[294793]:             },
Jan 23 10:41:16 compute-0 focused_euclid[294793]:             "type": "block",
Jan 23 10:41:16 compute-0 focused_euclid[294793]:             "vg_name": "ceph_vg0"
Jan 23 10:41:16 compute-0 focused_euclid[294793]:         }
Jan 23 10:41:16 compute-0 focused_euclid[294793]:     ]
Jan 23 10:41:16 compute-0 focused_euclid[294793]: }
Jan 23 10:41:17 compute-0 agitated_ishizaka[295377]: 167 167
Jan 23 10:41:18 compute-0 compassionate_solomon[295535]: {}

● system-ceph\x2df3005f84\x2d239a\x2d55b6\x2da948\x2d8f1fb592b920.slice - Slice /system/ceph-f3005f84-239a-55b6-a948-8f1fb592b920
     Loaded: loaded
     Active: active since Fri 2026-01-23 09:47:37 UTC; 53min ago
      Until: Fri 2026-01-23 09:47:37 UTC; 53min ago
         IO: 220.9M read, 2.3G written
      Tasks: 983
     Memory: 1.6G (peak: 1.6G)
        CPU: 6min 25.829s
     CGroup: /system.slice/system-ceph\x2df3005f84\x2d239a\x2d55b6\x2da948\x2d8f1fb592b920.slice
             ├─ceph-f3005f84-239a-55b6-a948-8f1fb592b920@alertmanager.compute-0.service
             │ ├─libpod-payload-a2ddeb968d99b1961970b788dda423e2c0177d966be5d6216090bc7f97658982
             │ │ ├─104187 /run/podman-init -- /bin/alertmanager --cluster.listen-address=:9094 --web.listen-address=192.168.122.100:9093 --config.file=/etc/alertmanager/alertmanager.yml
             │ │ └─104189 /bin/alertmanager --cluster.listen-address=:9094 --web.listen-address=192.168.122.100:9093 --config.file=/etc/alertmanager/alertmanager.yml
             │ └─runtime
             │   └─104185 /usr/bin/conmon --api-version 1 -c a2ddeb968d99b1961970b788dda423e2c0177d966be5d6216090bc7f97658982 -u a2ddeb968d99b1961970b788dda423e2c0177d966be5d6216090bc7f97658982 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/a2ddeb968d99b1961970b788dda423e2c0177d966be5d6216090bc7f97658982/userdata -p /run/containers/storage/overlay-containers/a2ddeb968d99b1961970b788dda423e2c0177d966be5d6216090bc7f97658982/userdata/pidfile -n ceph-f3005f84-239a-55b6-a948-8f1fb592b920-alertmanager-compute-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/a2ddeb968d99b1961970b788dda423e2c0177d966be5d6216090bc7f97658982 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/a2ddeb968d99b1961970b788dda423e2c0177d966be5d6216090bc7f97658982/userdata/oci-log --conmon-pidfile /run/ceph-f3005f84-239a-55b6-a948-8f1fb592b920@alertmanager.compute-0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg a2ddeb968d99b1961970b788dda423e2c0177d966be5d6216090bc7f97658982
             ├─ceph-f3005f84-239a-55b6-a948-8f1fb592b920@crash.compute-0.service
             │ ├─libpod-payload-ae2342c943dc0b4633eaeef8f7726de29e0287eb0b10e37d37a519b117896a21
             │ │ ├─79596 /run/podman-init -- /usr/bin/ceph-crash -n client.crash.compute-0
             │ │ └─79598 /usr/bin/python3 -s /usr/bin/ceph-crash -n client.crash.compute-0
             │ └─runtime
             │   └─79594 /usr/bin/conmon --api-version 1 -c ae2342c943dc0b4633eaeef8f7726de29e0287eb0b10e37d37a519b117896a21 -u ae2342c943dc0b4633eaeef8f7726de29e0287eb0b10e37d37a519b117896a21 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/ae2342c943dc0b4633eaeef8f7726de29e0287eb0b10e37d37a519b117896a21/userdata -p /run/containers/storage/overlay-containers/ae2342c943dc0b4633eaeef8f7726de29e0287eb0b10e37d37a519b117896a21/userdata/pidfile -n ceph-f3005f84-239a-55b6-a948-8f1fb592b920-crash-compute-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/ae2342c943dc0b4633eaeef8f7726de29e0287eb0b10e37d37a519b117896a21 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/ae2342c943dc0b4633eaeef8f7726de29e0287eb0b10e37d37a519b117896a21/userdata/oci-log --conmon-pidfile /run/ceph-f3005f84-239a-55b6-a948-8f1fb592b920@crash.compute-0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg ae2342c943dc0b4633eaeef8f7726de29e0287eb0b10e37d37a519b117896a21
             ├─ceph-f3005f84-239a-55b6-a948-8f1fb592b920@grafana.compute-0.service
             │ ├─libpod-payload-91a745e69178e4c0b1322185fa504c92fadf052d1491b19cbcad743ff263de28
             │ │ ├─104505 /run/podman-init -- /run.sh
             │ │ └─104507 grafana server --homepath=/usr/share/grafana --config=/etc/grafana/grafana.ini --packaging=docker cfg:default.log.mode=console cfg:default.paths.data=/var/lib/grafana cfg:default.paths.logs=/var/log/grafana cfg:default.paths.plugins=/var/lib/grafana/plugins cfg:default.paths.provisioning=/etc/grafana/provisioning
             │ └─runtime
             │   └─104501 /usr/bin/conmon --api-version 1 -c 91a745e69178e4c0b1322185fa504c92fadf052d1491b19cbcad743ff263de28 -u 91a745e69178e4c0b1322185fa504c92fadf052d1491b19cbcad743ff263de28 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/91a745e69178e4c0b1322185fa504c92fadf052d1491b19cbcad743ff263de28/userdata -p /run/containers/storage/overlay-containers/91a745e69178e4c0b1322185fa504c92fadf052d1491b19cbcad743ff263de28/userdata/pidfile -n ceph-f3005f84-239a-55b6-a948-8f1fb592b920-grafana-compute-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/91a745e69178e4c0b1322185fa504c92fadf052d1491b19cbcad743ff263de28 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/91a745e69178e4c0b1322185fa504c92fadf052d1491b19cbcad743ff263de28/userdata/oci-log --conmon-pidfile /run/ceph-f3005f84-239a-55b6-a948-8f1fb592b920@grafana.compute-0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 91a745e69178e4c0b1322185fa504c92fadf052d1491b19cbcad743ff263de28
             ├─ceph-f3005f84-239a-55b6-a948-8f1fb592b920@haproxy.nfs.cephfs.compute-0.yeogal.service
             │ ├─libpod-payload-2675dd2af0d87249968094bd6c1eb5d25ac7173a5fd992ed1bd309216a505178
             │ │ ├─95467 /run/podman-init -- docker-entrypoint.sh haproxy -f /var/lib/haproxy/haproxy.cfg
             │ │ ├─95469 haproxy -W -db -f /var/lib/haproxy/haproxy.cfg
             │ │ └─95471 haproxy -W -db -f /var/lib/haproxy/haproxy.cfg
             │ └─runtime
             │   └─95465 /usr/bin/conmon --api-version 1 -c 2675dd2af0d87249968094bd6c1eb5d25ac7173a5fd992ed1bd309216a505178 -u 2675dd2af0d87249968094bd6c1eb5d25ac7173a5fd992ed1bd309216a505178 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/2675dd2af0d87249968094bd6c1eb5d25ac7173a5fd992ed1bd309216a505178/userdata -p /run/containers/storage/overlay-containers/2675dd2af0d87249968094bd6c1eb5d25ac7173a5fd992ed1bd309216a505178/userdata/pidfile -n ceph-f3005f84-239a-55b6-a948-8f1fb592b920-haproxy-nfs-cephfs-compute-0-yeogal --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/2675dd2af0d87249968094bd6c1eb5d25ac7173a5fd992ed1bd309216a505178 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/2675dd2af0d87249968094bd6c1eb5d25ac7173a5fd992ed1bd309216a505178/userdata/oci-log --conmon-pidfile /run/ceph-f3005f84-239a-55b6-a948-8f1fb592b920@haproxy.nfs.cephfs.compute-0.yeogal.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 2675dd2af0d87249968094bd6c1eb5d25ac7173a5fd992ed1bd309216a505178
             ├─ceph-f3005f84-239a-55b6-a948-8f1fb592b920@haproxy.rgw.default.compute-0.qabsws.service
             │ ├─libpod-payload-872bb66aeaa09288ead0a99e17c29682960b011c0b0f7af2b0513c1ab79aba61
             │ │ ├─97806 /run/podman-init -- docker-entrypoint.sh haproxy -f /var/lib/haproxy/haproxy.cfg
             │ │ ├─97808 haproxy -W -db -f /var/lib/haproxy/haproxy.cfg
             │ │ └─97810 haproxy -W -db -f /var/lib/haproxy/haproxy.cfg
             │ └─runtime
             │   └─97804 /usr/bin/conmon --api-version 1 -c 872bb66aeaa09288ead0a99e17c29682960b011c0b0f7af2b0513c1ab79aba61 -u 872bb66aeaa09288ead0a99e17c29682960b011c0b0f7af2b0513c1ab79aba61 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/872bb66aeaa09288ead0a99e17c29682960b011c0b0f7af2b0513c1ab79aba61/userdata -p /run/containers/storage/overlay-containers/872bb66aeaa09288ead0a99e17c29682960b011c0b0f7af2b0513c1ab79aba61/userdata/pidfile -n ceph-f3005f84-239a-55b6-a948-8f1fb592b920-haproxy-rgw-default-compute-0-qabsws --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/872bb66aeaa09288ead0a99e17c29682960b011c0b0f7af2b0513c1ab79aba61 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/872bb66aeaa09288ead0a99e17c29682960b011c0b0f7af2b0513c1ab79aba61/userdata/oci-log --conmon-pidfile /run/ceph-f3005f84-239a-55b6-a948-8f1fb592b920@haproxy.rgw.default.compute-0.qabsws.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 872bb66aeaa09288ead0a99e17c29682960b011c0b0f7af2b0513c1ab79aba61
             ├─ceph-f3005f84-239a-55b6-a948-8f1fb592b920@keepalived.nfs.cephfs.compute-0.lrsdkc.service
             │ ├─libpod-payload-4783fa4a0e03b39b894c380a1696ad8cf3e72c4e94fd2480817944beb7891609
             │ │ ├─96150 /run/podman-init -- ./init.sh
             │ │ ├─96152 /usr/sbin/keepalived -n -l -f /etc/keepalived/keepalived.conf
             │ │ └─96154 /usr/sbin/keepalived -n -l -f /etc/keepalived/keepalived.conf
             │ └─runtime
             │   └─96148 /usr/bin/conmon --api-version 1 -c 4783fa4a0e03b39b894c380a1696ad8cf3e72c4e94fd2480817944beb7891609 -u 4783fa4a0e03b39b894c380a1696ad8cf3e72c4e94fd2480817944beb7891609 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/4783fa4a0e03b39b894c380a1696ad8cf3e72c4e94fd2480817944beb7891609/userdata -p /run/containers/storage/overlay-containers/4783fa4a0e03b39b894c380a1696ad8cf3e72c4e94fd2480817944beb7891609/userdata/pidfile -n ceph-f3005f84-239a-55b6-a948-8f1fb592b920-keepalived-nfs-cephfs-compute-0-lrsdkc --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/4783fa4a0e03b39b894c380a1696ad8cf3e72c4e94fd2480817944beb7891609 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/4783fa4a0e03b39b894c380a1696ad8cf3e72c4e94fd2480817944beb7891609/userdata/oci-log --conmon-pidfile /run/ceph-f3005f84-239a-55b6-a948-8f1fb592b920@keepalived.nfs.cephfs.compute-0.lrsdkc.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 4783fa4a0e03b39b894c380a1696ad8cf3e72c4e94fd2480817944beb7891609
             ├─ceph-f3005f84-239a-55b6-a948-8f1fb592b920@keepalived.rgw.default.compute-0.tytkrd.service
             │ ├─libpod-payload-fb69e42829d45e3b674ff9bd3f3333c8c90dc07a3801eda65c7a0ef9a0f84b50
             │ │ ├─98096 /run/podman-init -- ./init.sh
             │ │ ├─98098 /usr/sbin/keepalived -n -l -f /etc/keepalived/keepalived.conf
             │ │ └─98100 /usr/sbin/keepalived -n -l -f /etc/keepalived/keepalived.conf
             │ └─runtime
             │   └─98094 /usr/bin/conmon --api-version 1 -c fb69e42829d45e3b674ff9bd3f3333c8c90dc07a3801eda65c7a0ef9a0f84b50 -u fb69e42829d45e3b674ff9bd3f3333c8c90dc07a3801eda65c7a0ef9a0f84b50 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/fb69e42829d45e3b674ff9bd3f3333c8c90dc07a3801eda65c7a0ef9a0f84b50/userdata -p /run/containers/storage/overlay-containers/fb69e42829d45e3b674ff9bd3f3333c8c90dc07a3801eda65c7a0ef9a0f84b50/userdata/pidfile -n ceph-f3005f84-239a-55b6-a948-8f1fb592b920-keepalived-rgw-default-compute-0-tytkrd --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/fb69e42829d45e3b674ff9bd3f3333c8c90dc07a3801eda65c7a0ef9a0f84b50 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/fb69e42829d45e3b674ff9bd3f3333c8c90dc07a3801eda65c7a0ef9a0f84b50/userdata/oci-log --conmon-pidfile /run/ceph-f3005f84-239a-55b6-a948-8f1fb592b920@keepalived.rgw.default.compute-0.tytkrd.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg fb69e42829d45e3b674ff9bd3f3333c8c90dc07a3801eda65c7a0ef9a0f84b50
             ├─ceph-f3005f84-239a-55b6-a948-8f1fb592b920@mds.cephfs.compute-0.ymknms.service
             │ ├─libpod-payload-e4542c7adce0f518b7f99d99679470337d13a224c21a637a7c2e54819c64d093
             │ │ ├─94626 /run/podman-init -- /usr/bin/ceph-mds -n mds.cephfs.compute-0.ymknms -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ └─94628 /usr/bin/ceph-mds -n mds.cephfs.compute-0.ymknms -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ └─runtime
             │   └─94624 /usr/bin/conmon --api-version 1 -c e4542c7adce0f518b7f99d99679470337d13a224c21a637a7c2e54819c64d093 -u e4542c7adce0f518b7f99d99679470337d13a224c21a637a7c2e54819c64d093 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/e4542c7adce0f518b7f99d99679470337d13a224c21a637a7c2e54819c64d093/userdata -p /run/containers/storage/overlay-containers/e4542c7adce0f518b7f99d99679470337d13a224c21a637a7c2e54819c64d093/userdata/pidfile -n ceph-f3005f84-239a-55b6-a948-8f1fb592b920-mds-cephfs-compute-0-ymknms --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/e4542c7adce0f518b7f99d99679470337d13a224c21a637a7c2e54819c64d093 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/e4542c7adce0f518b7f99d99679470337d13a224c21a637a7c2e54819c64d093/userdata/oci-log --conmon-pidfile /run/ceph-f3005f84-239a-55b6-a948-8f1fb592b920@mds.cephfs.compute-0.ymknms.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg e4542c7adce0f518b7f99d99679470337d13a224c21a637a7c2e54819c64d093
             ├─ceph-f3005f84-239a-55b6-a948-8f1fb592b920@mgr.compute-0.nbdygh.service
             │ ├─libpod-payload-e4a1c45f747e69af65041011d00875cfaaf16149f31875bd1585747dd24058b3
             │ │ ├─74631 /run/podman-init -- /usr/bin/ceph-mgr -n mgr.compute-0.nbdygh -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ └─74633 /usr/bin/ceph-mgr -n mgr.compute-0.nbdygh -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ └─runtime
             │   └─74629 /usr/bin/conmon --api-version 1 -c e4a1c45f747e69af65041011d00875cfaaf16149f31875bd1585747dd24058b3 -u e4a1c45f747e69af65041011d00875cfaaf16149f31875bd1585747dd24058b3 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/e4a1c45f747e69af65041011d00875cfaaf16149f31875bd1585747dd24058b3/userdata -p /run/containers/storage/overlay-containers/e4a1c45f747e69af65041011d00875cfaaf16149f31875bd1585747dd24058b3/userdata/pidfile -n ceph-f3005f84-239a-55b6-a948-8f1fb592b920-mgr-compute-0-nbdygh --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/e4a1c45f747e69af65041011d00875cfaaf16149f31875bd1585747dd24058b3 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/e4a1c45f747e69af65041011d00875cfaaf16149f31875bd1585747dd24058b3/userdata/oci-log --conmon-pidfile /run/ceph-f3005f84-239a-55b6-a948-8f1fb592b920@mgr.compute-0.nbdygh.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg e4a1c45f747e69af65041011d00875cfaaf16149f31875bd1585747dd24058b3
             ├─ceph-f3005f84-239a-55b6-a948-8f1fb592b920@mon.compute-0.service
             │ ├─libpod-payload-cbfd7f9a2ad9887ed3adf829b401ebf60f670e8aa91916ede409f75a12aeb3e6
             │ │ ├─74333 /run/podman-init -- /usr/bin/ceph-mon -n mon.compute-0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false
             │ │ └─74335 /usr/bin/ceph-mon -n mon.compute-0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false
             │ └─runtime
             │   └─74331 /usr/bin/conmon --api-version 1 -c cbfd7f9a2ad9887ed3adf829b401ebf60f670e8aa91916ede409f75a12aeb3e6 -u cbfd7f9a2ad9887ed3adf829b401ebf60f670e8aa91916ede409f75a12aeb3e6 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/cbfd7f9a2ad9887ed3adf829b401ebf60f670e8aa91916ede409f75a12aeb3e6/userdata -p /run/containers/storage/overlay-containers/cbfd7f9a2ad9887ed3adf829b401ebf60f670e8aa91916ede409f75a12aeb3e6/userdata/pidfile -n ceph-f3005f84-239a-55b6-a948-8f1fb592b920-mon-compute-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/cbfd7f9a2ad9887ed3adf829b401ebf60f670e8aa91916ede409f75a12aeb3e6 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/cbfd7f9a2ad9887ed3adf829b401ebf60f670e8aa91916ede409f75a12aeb3e6/userdata/oci-log --conmon-pidfile /run/ceph-f3005f84-239a-55b6-a948-8f1fb592b920@mon.compute-0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg cbfd7f9a2ad9887ed3adf829b401ebf60f670e8aa91916ede409f75a12aeb3e6
             ├─ceph-f3005f84-239a-55b6-a948-8f1fb592b920@node-exporter.compute-0.service
             │ ├─libpod-payload-97848d12ab6322cfe3cc805f7972048af088977ee2e693bcc0a5bb581613a0d8
             │ │ ├─89474 /run/podman-init -- /bin/node_exporter --no-collector.timex --web.listen-address=:9100 --path.procfs=/host/proc --path.sysfs=/host/sys --path.rootfs=/rootfs
             │ │ └─89477 /bin/node_exporter --no-collector.timex --web.listen-address=:9100 --path.procfs=/host/proc --path.sysfs=/host/sys --path.rootfs=/rootfs
             │ └─runtime
             │   └─89471 /usr/bin/conmon --api-version 1 -c 97848d12ab6322cfe3cc805f7972048af088977ee2e693bcc0a5bb581613a0d8 -u 97848d12ab6322cfe3cc805f7972048af088977ee2e693bcc0a5bb581613a0d8 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/97848d12ab6322cfe3cc805f7972048af088977ee2e693bcc0a5bb581613a0d8/userdata -p /run/containers/storage/overlay-containers/97848d12ab6322cfe3cc805f7972048af088977ee2e693bcc0a5bb581613a0d8/userdata/pidfile -n ceph-f3005f84-239a-55b6-a948-8f1fb592b920-node-exporter-compute-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/97848d12ab6322cfe3cc805f7972048af088977ee2e693bcc0a5bb581613a0d8 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/97848d12ab6322cfe3cc805f7972048af088977ee2e693bcc0a5bb581613a0d8/userdata/oci-log --conmon-pidfile /run/ceph-f3005f84-239a-55b6-a948-8f1fb592b920@node-exporter.compute-0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 97848d12ab6322cfe3cc805f7972048af088977ee2e693bcc0a5bb581613a0d8
             ├─ceph-f3005f84-239a-55b6-a948-8f1fb592b920@osd.1.service
             │ ├─libpod-payload-ba38de35226506cb699780f729ec895e86e90cec52f99c13abc1fc038212a39b
             │ │ ├─82639 /run/podman-init -- /usr/bin/ceph-osd -n osd.1 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ └─82641 /usr/bin/ceph-osd -n osd.1 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ └─runtime
             │   └─82637 /usr/bin/conmon --api-version 1 -c ba38de35226506cb699780f729ec895e86e90cec52f99c13abc1fc038212a39b -u ba38de35226506cb699780f729ec895e86e90cec52f99c13abc1fc038212a39b -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/ba38de35226506cb699780f729ec895e86e90cec52f99c13abc1fc038212a39b/userdata -p /run/containers/storage/overlay-containers/ba38de35226506cb699780f729ec895e86e90cec52f99c13abc1fc038212a39b/userdata/pidfile -n ceph-f3005f84-239a-55b6-a948-8f1fb592b920-osd-1 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/ba38de35226506cb699780f729ec895e86e90cec52f99c13abc1fc038212a39b --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/ba38de35226506cb699780f729ec895e86e90cec52f99c13abc1fc038212a39b/userdata/oci-log --conmon-pidfile /run/ceph-f3005f84-239a-55b6-a948-8f1fb592b920@osd.1.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg ba38de35226506cb699780f729ec895e86e90cec52f99c13abc1fc038212a39b
             ├─ceph-f3005f84-239a-55b6-a948-8f1fb592b920@prometheus.compute-0.service
             │ ├─libpod-payload-8d18c97a753c8610913f4f1be41e91ee0fa0045d3fad5bb17483ebd74168eedf
             │ │ ├─98643 /run/podman-init -- /bin/prometheus --config.file=/etc/prometheus/prometheus.yml --storage.tsdb.path=/prometheus --storage.tsdb.retention.time=15d --storage.tsdb.retention.size=0 --web.external-url=http://compute-0.ctlplane.example.com:9095 --web.listen-address=192.168.122.100:9095
             │ │ └─98645 /bin/prometheus --config.file=/etc/prometheus/prometheus.yml --storage.tsdb.path=/prometheus --storage.tsdb.retention.time=15d --storage.tsdb.retention.size=0 --web.external-url=http://compute-0.ctlplane.example.com:9095 --web.listen-address=192.168.122.100:9095
             │ └─runtime
             │   └─98641 /usr/bin/conmon --api-version 1 -c 8d18c97a753c8610913f4f1be41e91ee0fa0045d3fad5bb17483ebd74168eedf -u 8d18c97a753c8610913f4f1be41e91ee0fa0045d3fad5bb17483ebd74168eedf -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/8d18c97a753c8610913f4f1be41e91ee0fa0045d3fad5bb17483ebd74168eedf/userdata -p /run/containers/storage/overlay-containers/8d18c97a753c8610913f4f1be41e91ee0fa0045d3fad5bb17483ebd74168eedf/userdata/pidfile -n ceph-f3005f84-239a-55b6-a948-8f1fb592b920-prometheus-compute-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/8d18c97a753c8610913f4f1be41e91ee0fa0045d3fad5bb17483ebd74168eedf --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/8d18c97a753c8610913f4f1be41e91ee0fa0045d3fad5bb17483ebd74168eedf/userdata/oci-log --conmon-pidfile /run/ceph-f3005f84-239a-55b6-a948-8f1fb592b920@prometheus.compute-0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 8d18c97a753c8610913f4f1be41e91ee0fa0045d3fad5bb17483ebd74168eedf
             └─ceph-f3005f84-239a-55b6-a948-8f1fb592b920@rgw.rgw.compute-0.jbpfwf.service
               ├─libpod-payload-318a53d5c542e8639ae8cbe910fcc9d6c8a7c7006a978c4655d1d2582222973a
               │ ├─93746 /run/podman-init -- /usr/bin/radosgw -n client.rgw.rgw.compute-0.jbpfwf -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
               │ └─93748 /usr/bin/radosgw -n client.rgw.rgw.compute-0.jbpfwf -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
               └─runtime
                 └─93744 /usr/bin/conmon --api-version 1 -c 318a53d5c542e8639ae8cbe910fcc9d6c8a7c7006a978c4655d1d2582222973a -u 318a53d5c542e8639ae8cbe910fcc9d6c8a7c7006a978c4655d1d2582222973a -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/318a53d5c542e8639ae8cbe910fcc9d6c8a7c7006a978c4655d1d2582222973a/userdata -p /run/containers/storage/overlay-containers/318a53d5c542e8639ae8cbe910fcc9d6c8a7c7006a978c4655d1d2582222973a/userdata/pidfile -n ceph-f3005f84-239a-55b6-a948-8f1fb592b920-rgw-rgw-compute-0-jbpfwf --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/318a53d5c542e8639ae8cbe910fcc9d6c8a7c7006a978c4655d1d2582222973a --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/318a53d5c542e8639ae8cbe910fcc9d6c8a7c7006a978c4655d1d2582222973a/userdata/oci-log --conmon-pidfile /run/ceph-f3005f84-239a-55b6-a948-8f1fb592b920@rgw.rgw.compute-0.jbpfwf.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 318a53d5c542e8639ae8cbe910fcc9d6c8a7c7006a978c4655d1d2582222973a

Jan 23 10:41:26 compute-0 radosgw[93748]: ====== starting new request req=0x7fa5c588c5d0 =====
Jan 23 10:41:26 compute-0 radosgw[93748]: ====== req done req=0x7fa5c588c5d0 op status=0 http_status=200 latency=0.000000000s ======
Jan 23 10:41:26 compute-0 radosgw[93748]: beast: 0x7fa5c588c5d0: 192.168.122.102 - anonymous [23/Jan/2026:10:41:26.459 +0000] "HEAD / HTTP/1.0" 200 0 - - - latency=0.000000000s
Jan 23 10:41:26 compute-0 ceph-mon[74335]: mon.compute-0@0(leader) e3 handle_command mon_command({"prefix": "osd dump", "format": "json-pretty"} v 0)
Jan 23 10:41:26 compute-0 ceph-mon[74335]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/98296209' entity='client.admin' cmd=[{"prefix": "osd dump", "format": "json-pretty"}]: dispatch
Jan 23 10:41:26 compute-0 ceph-mgr[74633]: log_channel(audit) log [DBG] : from='client.28090 -' entity='client.admin' cmd=[{"prefix": "pg stat", "target": ["mon-mgr", ""]}]: dispatch
Jan 23 10:41:26 compute-0 ceph-mon[74335]: from='client.27728 -' entity='client.admin' cmd=[{"prefix": "pg stat", "target": ["mon-mgr", ""]}]: dispatch
Jan 23 10:41:26 compute-0 ceph-mon[74335]: from='client.18573 -' entity='client.admin' cmd=[{"prefix": "osd blocked-by", "target": ["mon-mgr", ""], "format": "json-pretty"}]: dispatch
Jan 23 10:41:26 compute-0 ceph-mon[74335]: from='client.? 192.168.122.102:0/26184416' entity='client.admin' cmd=[{"prefix": "osd stat"}]: dispatch
Jan 23 10:41:26 compute-0 ceph-mon[74335]: from='client.? 192.168.122.100:0/467195774' entity='client.admin' cmd=[{"prefix": "osd blocklist ls", "format": "json-pretty"}]: dispatch

● system-dbus\x2d:1.1\x2dorg.fedoraproject.SetroubleshootPrivileged.slice - Slice /system/dbus-:1.1-org.fedoraproject.SetroubleshootPrivileged
     Loaded: loaded
     Active: active since Fri 2026-01-23 10:07:15 UTC; 34min ago
      Until: Fri 2026-01-23 10:07:15 UTC; 34min ago
         IO: 4.0K read, 0B written
      Tasks: 0
     Memory: 8.0K (peak: 58.6M)
        CPU: 1.004s
     CGroup: /system.slice/system-dbus\x2d:1.1\x2dorg.fedoraproject.SetroubleshootPrivileged.slice

Jan 23 10:07:15 compute-0 systemd[1]: Created slice Slice /system/dbus-:1.1-org.fedoraproject.SetroubleshootPrivileged.

● system-getty.slice - Slice /system/getty
     Loaded: loaded
     Active: active since Fri 2026-01-23 09:01:22 UTC; 1h 40min ago
      Until: Fri 2026-01-23 09:01:22 UTC; 1h 40min ago
         IO: 0B read, 0B written
      Tasks: 1
     Memory: 296.0K (peak: 776.0K)
        CPU: 10ms
     CGroup: /system.slice/system-getty.slice
             └─getty@tty1.service
               └─1009 /sbin/agetty -o "-p -- \\u" --noclear - linux

● system-modprobe.slice - Slice /system/modprobe
     Loaded: loaded
     Active: active since Fri 2026-01-23 09:01:20 UTC; 1h 40min ago
      Until: Fri 2026-01-23 09:01:20 UTC; 1h 40min ago
         IO: 0B read, 0B written
      Tasks: 0
     Memory: 52.0K (peak: 11.6M)
        CPU: 150ms
     CGroup: /system.slice/system-modprobe.slice

Jan 23 09:01:20 localhost systemd[1]: Created slice Slice /system/modprobe.

● system-serial\x2dgetty.slice - Slice /system/serial-getty
     Loaded: loaded
     Active: active since Fri 2026-01-23 09:01:22 UTC; 1h 40min ago
      Until: Fri 2026-01-23 09:01:22 UTC; 1h 40min ago
         IO: 0B read, 0B written
      Tasks: 1
     Memory: 264.0K (peak: 520.0K)
        CPU: 9ms
     CGroup: /system.slice/system-serial\x2dgetty.slice
             └─serial-getty@ttyS0.service
               └─1010 /sbin/agetty -o "-p -- \\u" --keep-baud 115200,57600,38400,9600 - vt220

● system-sshd\x2dkeygen.slice - Slice /system/sshd-keygen
     Loaded: loaded
     Active: active since Fri 2026-01-23 09:01:22 UTC; 1h 40min ago
      Until: Fri 2026-01-23 09:01:22 UTC; 1h 40min ago
         IO: 0B read, 0B written
      Tasks: 0
     Memory: 0B (peak: 0B)
        CPU: 0
     CGroup: /system.slice/system-sshd\x2dkeygen.slice

● system-systemd\x2dcoredump.slice - Slice /system/systemd-coredump
     Loaded: loaded
     Active: active since Fri 2026-01-23 09:54:11 UTC; 47min ago
      Until: Fri 2026-01-23 09:54:11 UTC; 47min ago
         IO: 1.1M read, 825.2M written
      Tasks: 0
     Memory: 7.1M (peak: 456.1M)
        CPU: 18.172s
     CGroup: /system.slice/system-systemd\x2dcoredump.slice

Jan 23 09:56:17 compute-0 systemd-coredump[112194]: Process 108672 (ganesha.nfsd) of user 0 dumped core.
                                                    
                                                    Stack trace of thread 43:
                                                    #0  0x00007f63229e332e n/a (/usr/lib64/libntirpc.so.5.8 + 0x2232e)
                                                    ELF object binary architecture: AMD x86-64
Jan 23 10:03:48 compute-0 systemd-coredump[168924]: Process 114206 (ganesha.nfsd) of user 0 dumped core.
                                                    
                                                    Stack trace of thread 79:
                                                    #0  0x00007f7f2768332e n/a (/usr/lib64/libntirpc.so.5.8 + 0x2232e)
                                                    #1  0x0000000000000000 n/a (n/a + 0x0)
                                                    #2  0x00007f7f2768d900 n/a (/usr/lib64/libntirpc.so.5.8 + 0x2c900)
                                                    ELF object binary architecture: AMD x86-64
Jan 23 10:05:13 compute-0 systemd-coredump[182441]: Process 169790 (ganesha.nfsd) of user 0 dumped core.
                                                    
                                                    Stack trace of thread 53:
                                                    #0  0x00007f6c001f332e n/a (/usr/lib64/libntirpc.so.5.8 + 0x2232e)
                                                    ELF object binary architecture: AMD x86-64
Jan 23 10:06:47 compute-0 systemd-coredump[208577]: Process 188004 (ganesha.nfsd) of user 0 dumped core.
                                                    
                                                    Stack trace of thread 60:
                                                    #0  0x00007fc521f3a32e n/a (/usr/lib64/libntirpc.so.5.8 + 0x2232e)
                                                    ELF object binary architecture: AMD x86-64
Jan 23 10:12:24 compute-0 systemd-coredump[250662]: Process 212765 (ganesha.nfsd) of user 0 dumped core.
                                                    
                                                    Stack trace of thread 67:
                                                    #0  0x00007f1bf4dc432e n/a (/usr/lib64/libntirpc.so.5.8 + 0x2232e)
                                                    ELF object binary architecture: AMD x86-64
Jan 23 10:16:08 compute-0 systemd-coredump[254586]: Process 251500 (ganesha.nfsd) of user 0 dumped core.
                                                    
                                                    Stack trace of thread 57:
                                                    #0  0x00007f5efbec632e n/a (/usr/lib64/libntirpc.so.5.8 + 0x2232e)
                                                    ELF object binary architecture: AMD x86-64
Jan 23 10:18:02 compute-0 systemd-coredump[257408]: Process 254785 (ganesha.nfsd) of user 0 dumped core.
                                                    
                                                    Stack trace of thread 46:
                                                    #0  0x00007fb4749b532e n/a (/usr/lib64/libntirpc.so.5.8 + 0x2232e)
                                                    ELF object binary architecture: AMD x86-64
Jan 23 10:18:42 compute-0 systemd-coredump[258419]: Process 258018 (ganesha.nfsd) of user 0 dumped core.
                                                    
                                                    Stack trace of thread 53:
                                                    #0  0x00007fde48ab732e n/a (/usr/lib64/libntirpc.so.5.8 + 0x2232e)
                                                    ELF object binary architecture: AMD x86-64
Jan 23 10:20:55 compute-0 systemd-coredump[262441]: Process 258621 (ganesha.nfsd) of user 0 dumped core.
                                                    
                                                    Stack trace of thread 57:
                                                    #0  0x00007fb84d64132e n/a (/usr/lib64/libntirpc.so.5.8 + 0x2232e)
                                                    ELF object binary architecture: AMD x86-64
Jan 23 10:25:43 compute-0 systemd-coredump[267487]: Process 262786 (ganesha.nfsd) of user 0 dumped core.
                                                    
                                                    Stack trace of thread 74:
                                                    #0  0x00007fe6d8f8832e n/a (/usr/lib64/libntirpc.so.5.8 + 0x2232e)
                                                    ELF object binary architecture: AMD x86-64

● system.slice - System Slice
     Loaded: loaded
     Active: active since Fri 2026-01-23 09:01:19 UTC; 1h 40min ago
      Until: Fri 2026-01-23 09:01:19 UTC; 1h 40min ago
       Docs: man:systemd.special(7)
         IO: 390.5M read, 3.2G written
      Tasks: 1096
     Memory: 2.3G (peak: 2.8G)
        CPU: 11min 14.280s
     CGroup: /system.slice
             ├─NetworkManager.service
             │ └─48866 /usr/sbin/NetworkManager --no-daemon
             ├─auditd.service
             │ ├─699 /sbin/auditd
             │ └─701 /usr/sbin/sedispatch
             ├─chronyd.service
             │ └─58433 /usr/sbin/chronyd -F 2
             ├─crond.service
             │ ├─  1008 /usr/sbin/crond -n
             │ └─147517 /usr/sbin/anacron -s
             ├─dbus-broker.service
             │ ├─765 /usr/bin/dbus-broker-launch --scope system --audit
             │ └─769 dbus-broker --log 4 --controller 9 --machine-id 85ac68c10a6e7ae08ceb898dbdca0cb5 --max-bytes 536870912 --max-fds 4096 --max-matches 131072 --audit
             ├─edpm_nova_compute.service
             │ └─249229 /usr/bin/conmon --api-version 1 -c 955f9566d05798cdd71546732c1b5a8107d1262ca33e1e62353f97ae0dc2cc2f -u 955f9566d05798cdd71546732c1b5a8107d1262ca33e1e62353f97ae0dc2cc2f -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/955f9566d05798cdd71546732c1b5a8107d1262ca33e1e62353f97ae0dc2cc2f/userdata -p /run/containers/storage/overlay-containers/955f9566d05798cdd71546732c1b5a8107d1262ca33e1e62353f97ae0dc2cc2f/userdata/pidfile -n nova_compute --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/955f9566d05798cdd71546732c1b5a8107d1262ca33e1e62353f97ae0dc2cc2f --full-attach -s -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/955f9566d05798cdd71546732c1b5a8107d1262ca33e1e62353f97ae0dc2cc2f/userdata/oci-log --conmon-pidfile /run/nova_compute.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg 955f9566d05798cdd71546732c1b5a8107d1262ca33e1e62353f97ae0dc2cc2f
             ├─edpm_ovn_controller.service
             │ └─151634 /usr/bin/conmon --api-version 1 -c ffd3fd97eb8ccb69f06ea21df042fbfc8784045b6313bea6a684bfa168f1196d -u ffd3fd97eb8ccb69f06ea21df042fbfc8784045b6313bea6a684bfa168f1196d -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/ffd3fd97eb8ccb69f06ea21df042fbfc8784045b6313bea6a684bfa168f1196d/userdata -p /run/containers/storage/overlay-containers/ffd3fd97eb8ccb69f06ea21df042fbfc8784045b6313bea6a684bfa168f1196d/userdata/pidfile -n ovn_controller --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/ffd3fd97eb8ccb69f06ea21df042fbfc8784045b6313bea6a684bfa168f1196d --full-attach -s -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/ffd3fd97eb8ccb69f06ea21df042fbfc8784045b6313bea6a684bfa168f1196d/userdata/oci-log --conmon-pidfile /run/ovn_controller.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg ffd3fd97eb8ccb69f06ea21df042fbfc8784045b6313bea6a684bfa168f1196d
             ├─edpm_ovn_metadata_agent.service
             │ └─161916 /usr/bin/conmon --api-version 1 -c 7c4b1914e1e86e16566f40dac1c2043d119deee57a046ec037c84640bd0c067d -u 7c4b1914e1e86e16566f40dac1c2043d119deee57a046ec037c84640bd0c067d -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/7c4b1914e1e86e16566f40dac1c2043d119deee57a046ec037c84640bd0c067d/userdata -p /run/containers/storage/overlay-containers/7c4b1914e1e86e16566f40dac1c2043d119deee57a046ec037c84640bd0c067d/userdata/pidfile -n ovn_metadata_agent --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/7c4b1914e1e86e16566f40dac1c2043d119deee57a046ec037c84640bd0c067d --full-attach -s -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/7c4b1914e1e86e16566f40dac1c2043d119deee57a046ec037c84640bd0c067d/userdata/oci-log --conmon-pidfile /run/ovn_metadata_agent.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg 7c4b1914e1e86e16566f40dac1c2043d119deee57a046ec037c84640bd0c067d
             ├─gssproxy.service
             │ └─868 /usr/sbin/gssproxy -D
             ├─irqbalance.service
             │ └─779 /usr/sbin/irqbalance
             ├─iscsid.service
             │ └─233083 /usr/sbin/iscsid -f
             ├─multipathd.service
             │ └─233315 /sbin/multipathd -d -s
             ├─ovs-vswitchd.service
             │ └─47171 ovs-vswitchd unix:/var/run/openvswitch/db.sock -vconsole:emer -vsyslog:err -vfile:info --mlockall --user openvswitch:hugetlbfs --no-chdir --log-file=/var/log/openvswitch/ovs-vswitchd.log --pidfile=/var/run/openvswitch/ovs-vswitchd.pid --detach
             ├─ovsdb-server.service
             │ └─47090 ovsdb-server /etc/openvswitch/conf.db -vconsole:emer -vsyslog:err -vfile:info --remote=punix:/var/run/openvswitch/db.sock --private-key=db:Open_vSwitch,SSL,private_key --certificate=db:Open_vSwitch,SSL,certificate --bootstrap-ca-cert=db:Open_vSwitch,SSL,ca_cert --user openvswitch:hugetlbfs --no-chdir --log-file=/var/log/openvswitch/ovsdb-server.log --pidfile=/var/run/openvswitch/ovsdb-server.pid --detach
             ├─polkit.service
             │ └─43358 /usr/lib/polkit-1/polkitd --no-debug
             ├─rpcbind.service
             │ └─697 /usr/bin/rpcbind -w -f
             ├─rsyslog.service
             │ └─1003 /usr/sbin/rsyslogd -n
             ├─sshd.service
             │ └─189686 "sshd: /usr/sbin/sshd -D [listener] 0 of 10-100 startups"
             ├─system-ceph\x2df3005f84\x2d239a\x2d55b6\x2da948\x2d8f1fb592b920.slice
             │ ├─ceph-f3005f84-239a-55b6-a948-8f1fb592b920@alertmanager.compute-0.service
             │ │ ├─libpod-payload-a2ddeb968d99b1961970b788dda423e2c0177d966be5d6216090bc7f97658982
             │ │ │ ├─104187 /run/podman-init -- /bin/alertmanager --cluster.listen-address=:9094 --web.listen-address=192.168.122.100:9093 --config.file=/etc/alertmanager/alertmanager.yml
             │ │ │ └─104189 /bin/alertmanager --cluster.listen-address=:9094 --web.listen-address=192.168.122.100:9093 --config.file=/etc/alertmanager/alertmanager.yml
             │ │ └─runtime
             │ │   └─104185 /usr/bin/conmon --api-version 1 -c a2ddeb968d99b1961970b788dda423e2c0177d966be5d6216090bc7f97658982 -u a2ddeb968d99b1961970b788dda423e2c0177d966be5d6216090bc7f97658982 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/a2ddeb968d99b1961970b788dda423e2c0177d966be5d6216090bc7f97658982/userdata -p /run/containers/storage/overlay-containers/a2ddeb968d99b1961970b788dda423e2c0177d966be5d6216090bc7f97658982/userdata/pidfile -n ceph-f3005f84-239a-55b6-a948-8f1fb592b920-alertmanager-compute-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/a2ddeb968d99b1961970b788dda423e2c0177d966be5d6216090bc7f97658982 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/a2ddeb968d99b1961970b788dda423e2c0177d966be5d6216090bc7f97658982/userdata/oci-log --conmon-pidfile /run/ceph-f3005f84-239a-55b6-a948-8f1fb592b920@alertmanager.compute-0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg a2ddeb968d99b1961970b788dda423e2c0177d966be5d6216090bc7f97658982
             │ ├─ceph-f3005f84-239a-55b6-a948-8f1fb592b920@crash.compute-0.service
             │ │ ├─libpod-payload-ae2342c943dc0b4633eaeef8f7726de29e0287eb0b10e37d37a519b117896a21
             │ │ │ ├─79596 /run/podman-init -- /usr/bin/ceph-crash -n client.crash.compute-0
             │ │ │ └─79598 /usr/bin/python3 -s /usr/bin/ceph-crash -n client.crash.compute-0
             │ │ └─runtime
             │ │   └─79594 /usr/bin/conmon --api-version 1 -c ae2342c943dc0b4633eaeef8f7726de29e0287eb0b10e37d37a519b117896a21 -u ae2342c943dc0b4633eaeef8f7726de29e0287eb0b10e37d37a519b117896a21 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/ae2342c943dc0b4633eaeef8f7726de29e0287eb0b10e37d37a519b117896a21/userdata -p /run/containers/storage/overlay-containers/ae2342c943dc0b4633eaeef8f7726de29e0287eb0b10e37d37a519b117896a21/userdata/pidfile -n ceph-f3005f84-239a-55b6-a948-8f1fb592b920-crash-compute-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/ae2342c943dc0b4633eaeef8f7726de29e0287eb0b10e37d37a519b117896a21 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/ae2342c943dc0b4633eaeef8f7726de29e0287eb0b10e37d37a519b117896a21/userdata/oci-log --conmon-pidfile /run/ceph-f3005f84-239a-55b6-a948-8f1fb592b920@crash.compute-0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg ae2342c943dc0b4633eaeef8f7726de29e0287eb0b10e37d37a519b117896a21
             │ ├─ceph-f3005f84-239a-55b6-a948-8f1fb592b920@grafana.compute-0.service
             │ │ ├─libpod-payload-91a745e69178e4c0b1322185fa504c92fadf052d1491b19cbcad743ff263de28
             │ │ │ ├─104505 /run/podman-init -- /run.sh
             │ │ │ └─104507 grafana server --homepath=/usr/share/grafana --config=/etc/grafana/grafana.ini --packaging=docker cfg:default.log.mode=console cfg:default.paths.data=/var/lib/grafana cfg:default.paths.logs=/var/log/grafana cfg:default.paths.plugins=/var/lib/grafana/plugins cfg:default.paths.provisioning=/etc/grafana/provisioning
             │ │ └─runtime
             │ │   └─104501 /usr/bin/conmon --api-version 1 -c 91a745e69178e4c0b1322185fa504c92fadf052d1491b19cbcad743ff263de28 -u 91a745e69178e4c0b1322185fa504c92fadf052d1491b19cbcad743ff263de28 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/91a745e69178e4c0b1322185fa504c92fadf052d1491b19cbcad743ff263de28/userdata -p /run/containers/storage/overlay-containers/91a745e69178e4c0b1322185fa504c92fadf052d1491b19cbcad743ff263de28/userdata/pidfile -n ceph-f3005f84-239a-55b6-a948-8f1fb592b920-grafana-compute-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/91a745e69178e4c0b1322185fa504c92fadf052d1491b19cbcad743ff263de28 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/91a745e69178e4c0b1322185fa504c92fadf052d1491b19cbcad743ff263de28/userdata/oci-log --conmon-pidfile /run/ceph-f3005f84-239a-55b6-a948-8f1fb592b920@grafana.compute-0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 91a745e69178e4c0b1322185fa504c92fadf052d1491b19cbcad743ff263de28
             │ ├─ceph-f3005f84-239a-55b6-a948-8f1fb592b920@haproxy.nfs.cephfs.compute-0.yeogal.service
             │ │ ├─libpod-payload-2675dd2af0d87249968094bd6c1eb5d25ac7173a5fd992ed1bd309216a505178
             │ │ │ ├─95467 /run/podman-init -- docker-entrypoint.sh haproxy -f /var/lib/haproxy/haproxy.cfg
             │ │ │ ├─95469 haproxy -W -db -f /var/lib/haproxy/haproxy.cfg
             │ │ │ └─95471 haproxy -W -db -f /var/lib/haproxy/haproxy.cfg
             │ │ └─runtime
             │ │   └─95465 /usr/bin/conmon --api-version 1 -c 2675dd2af0d87249968094bd6c1eb5d25ac7173a5fd992ed1bd309216a505178 -u 2675dd2af0d87249968094bd6c1eb5d25ac7173a5fd992ed1bd309216a505178 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/2675dd2af0d87249968094bd6c1eb5d25ac7173a5fd992ed1bd309216a505178/userdata -p /run/containers/storage/overlay-containers/2675dd2af0d87249968094bd6c1eb5d25ac7173a5fd992ed1bd309216a505178/userdata/pidfile -n ceph-f3005f84-239a-55b6-a948-8f1fb592b920-haproxy-nfs-cephfs-compute-0-yeogal --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/2675dd2af0d87249968094bd6c1eb5d25ac7173a5fd992ed1bd309216a505178 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/2675dd2af0d87249968094bd6c1eb5d25ac7173a5fd992ed1bd309216a505178/userdata/oci-log --conmon-pidfile /run/ceph-f3005f84-239a-55b6-a948-8f1fb592b920@haproxy.nfs.cephfs.compute-0.yeogal.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 2675dd2af0d87249968094bd6c1eb5d25ac7173a5fd992ed1bd309216a505178
             │ ├─ceph-f3005f84-239a-55b6-a948-8f1fb592b920@haproxy.rgw.default.compute-0.qabsws.service
             │ │ ├─libpod-payload-872bb66aeaa09288ead0a99e17c29682960b011c0b0f7af2b0513c1ab79aba61
             │ │ │ ├─97806 /run/podman-init -- docker-entrypoint.sh haproxy -f /var/lib/haproxy/haproxy.cfg
             │ │ │ ├─97808 haproxy -W -db -f /var/lib/haproxy/haproxy.cfg
             │ │ │ └─97810 haproxy -W -db -f /var/lib/haproxy/haproxy.cfg
             │ │ └─runtime
             │ │   └─97804 /usr/bin/conmon --api-version 1 -c 872bb66aeaa09288ead0a99e17c29682960b011c0b0f7af2b0513c1ab79aba61 -u 872bb66aeaa09288ead0a99e17c29682960b011c0b0f7af2b0513c1ab79aba61 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/872bb66aeaa09288ead0a99e17c29682960b011c0b0f7af2b0513c1ab79aba61/userdata -p /run/containers/storage/overlay-containers/872bb66aeaa09288ead0a99e17c29682960b011c0b0f7af2b0513c1ab79aba61/userdata/pidfile -n ceph-f3005f84-239a-55b6-a948-8f1fb592b920-haproxy-rgw-default-compute-0-qabsws --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/872bb66aeaa09288ead0a99e17c29682960b011c0b0f7af2b0513c1ab79aba61 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/872bb66aeaa09288ead0a99e17c29682960b011c0b0f7af2b0513c1ab79aba61/userdata/oci-log --conmon-pidfile /run/ceph-f3005f84-239a-55b6-a948-8f1fb592b920@haproxy.rgw.default.compute-0.qabsws.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 872bb66aeaa09288ead0a99e17c29682960b011c0b0f7af2b0513c1ab79aba61
             │ ├─ceph-f3005f84-239a-55b6-a948-8f1fb592b920@keepalived.nfs.cephfs.compute-0.lrsdkc.service
             │ │ ├─libpod-payload-4783fa4a0e03b39b894c380a1696ad8cf3e72c4e94fd2480817944beb7891609
             │ │ │ ├─96150 /run/podman-init -- ./init.sh
             │ │ │ ├─96152 /usr/sbin/keepalived -n -l -f /etc/keepalived/keepalived.conf
             │ │ │ └─96154 /usr/sbin/keepalived -n -l -f /etc/keepalived/keepalived.conf
             │ │ └─runtime
             │ │   └─96148 /usr/bin/conmon --api-version 1 -c 4783fa4a0e03b39b894c380a1696ad8cf3e72c4e94fd2480817944beb7891609 -u 4783fa4a0e03b39b894c380a1696ad8cf3e72c4e94fd2480817944beb7891609 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/4783fa4a0e03b39b894c380a1696ad8cf3e72c4e94fd2480817944beb7891609/userdata -p /run/containers/storage/overlay-containers/4783fa4a0e03b39b894c380a1696ad8cf3e72c4e94fd2480817944beb7891609/userdata/pidfile -n ceph-f3005f84-239a-55b6-a948-8f1fb592b920-keepalived-nfs-cephfs-compute-0-lrsdkc --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/4783fa4a0e03b39b894c380a1696ad8cf3e72c4e94fd2480817944beb7891609 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/4783fa4a0e03b39b894c380a1696ad8cf3e72c4e94fd2480817944beb7891609/userdata/oci-log --conmon-pidfile /run/ceph-f3005f84-239a-55b6-a948-8f1fb592b920@keepalived.nfs.cephfs.compute-0.lrsdkc.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 4783fa4a0e03b39b894c380a1696ad8cf3e72c4e94fd2480817944beb7891609
             │ ├─ceph-f3005f84-239a-55b6-a948-8f1fb592b920@keepalived.rgw.default.compute-0.tytkrd.service
             │ │ ├─libpod-payload-fb69e42829d45e3b674ff9bd3f3333c8c90dc07a3801eda65c7a0ef9a0f84b50
             │ │ │ ├─98096 /run/podman-init -- ./init.sh
             │ │ │ ├─98098 /usr/sbin/keepalived -n -l -f /etc/keepalived/keepalived.conf
             │ │ │ └─98100 /usr/sbin/keepalived -n -l -f /etc/keepalived/keepalived.conf
             │ │ └─runtime
             │ │   └─98094 /usr/bin/conmon --api-version 1 -c fb69e42829d45e3b674ff9bd3f3333c8c90dc07a3801eda65c7a0ef9a0f84b50 -u fb69e42829d45e3b674ff9bd3f3333c8c90dc07a3801eda65c7a0ef9a0f84b50 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/fb69e42829d45e3b674ff9bd3f3333c8c90dc07a3801eda65c7a0ef9a0f84b50/userdata -p /run/containers/storage/overlay-containers/fb69e42829d45e3b674ff9bd3f3333c8c90dc07a3801eda65c7a0ef9a0f84b50/userdata/pidfile -n ceph-f3005f84-239a-55b6-a948-8f1fb592b920-keepalived-rgw-default-compute-0-tytkrd --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/fb69e42829d45e3b674ff9bd3f3333c8c90dc07a3801eda65c7a0ef9a0f84b50 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/fb69e42829d45e3b674ff9bd3f3333c8c90dc07a3801eda65c7a0ef9a0f84b50/userdata/oci-log --conmon-pidfile /run/ceph-f3005f84-239a-55b6-a948-8f1fb592b920@keepalived.rgw.default.compute-0.tytkrd.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg fb69e42829d45e3b674ff9bd3f3333c8c90dc07a3801eda65c7a0ef9a0f84b50
             │ ├─ceph-f3005f84-239a-55b6-a948-8f1fb592b920@mds.cephfs.compute-0.ymknms.service
             │ │ ├─libpod-payload-e4542c7adce0f518b7f99d99679470337d13a224c21a637a7c2e54819c64d093
             │ │ │ ├─94626 /run/podman-init -- /usr/bin/ceph-mds -n mds.cephfs.compute-0.ymknms -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ │ └─94628 /usr/bin/ceph-mds -n mds.cephfs.compute-0.ymknms -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ └─runtime
             │ │   └─94624 /usr/bin/conmon --api-version 1 -c e4542c7adce0f518b7f99d99679470337d13a224c21a637a7c2e54819c64d093 -u e4542c7adce0f518b7f99d99679470337d13a224c21a637a7c2e54819c64d093 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/e4542c7adce0f518b7f99d99679470337d13a224c21a637a7c2e54819c64d093/userdata -p /run/containers/storage/overlay-containers/e4542c7adce0f518b7f99d99679470337d13a224c21a637a7c2e54819c64d093/userdata/pidfile -n ceph-f3005f84-239a-55b6-a948-8f1fb592b920-mds-cephfs-compute-0-ymknms --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/e4542c7adce0f518b7f99d99679470337d13a224c21a637a7c2e54819c64d093 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/e4542c7adce0f518b7f99d99679470337d13a224c21a637a7c2e54819c64d093/userdata/oci-log --conmon-pidfile /run/ceph-f3005f84-239a-55b6-a948-8f1fb592b920@mds.cephfs.compute-0.ymknms.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg e4542c7adce0f518b7f99d99679470337d13a224c21a637a7c2e54819c64d093
             │ ├─ceph-f3005f84-239a-55b6-a948-8f1fb592b920@mgr.compute-0.nbdygh.service
             │ │ ├─libpod-payload-e4a1c45f747e69af65041011d00875cfaaf16149f31875bd1585747dd24058b3
             │ │ │ ├─74631 /run/podman-init -- /usr/bin/ceph-mgr -n mgr.compute-0.nbdygh -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ │ └─74633 /usr/bin/ceph-mgr -n mgr.compute-0.nbdygh -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ └─runtime
             │ │   └─74629 /usr/bin/conmon --api-version 1 -c e4a1c45f747e69af65041011d00875cfaaf16149f31875bd1585747dd24058b3 -u e4a1c45f747e69af65041011d00875cfaaf16149f31875bd1585747dd24058b3 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/e4a1c45f747e69af65041011d00875cfaaf16149f31875bd1585747dd24058b3/userdata -p /run/containers/storage/overlay-containers/e4a1c45f747e69af65041011d00875cfaaf16149f31875bd1585747dd24058b3/userdata/pidfile -n ceph-f3005f84-239a-55b6-a948-8f1fb592b920-mgr-compute-0-nbdygh --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/e4a1c45f747e69af65041011d00875cfaaf16149f31875bd1585747dd24058b3 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/e4a1c45f747e69af65041011d00875cfaaf16149f31875bd1585747dd24058b3/userdata/oci-log --conmon-pidfile /run/ceph-f3005f84-239a-55b6-a948-8f1fb592b920@mgr.compute-0.nbdygh.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg e4a1c45f747e69af65041011d00875cfaaf16149f31875bd1585747dd24058b3
             │ ├─ceph-f3005f84-239a-55b6-a948-8f1fb592b920@mon.compute-0.service
             │ │ ├─libpod-payload-cbfd7f9a2ad9887ed3adf829b401ebf60f670e8aa91916ede409f75a12aeb3e6
             │ │ │ ├─74333 /run/podman-init -- /usr/bin/ceph-mon -n mon.compute-0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false
             │ │ │ └─74335 /usr/bin/ceph-mon -n mon.compute-0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false
             │ │ └─runtime
             │ │   └─74331 /usr/bin/conmon --api-version 1 -c cbfd7f9a2ad9887ed3adf829b401ebf60f670e8aa91916ede409f75a12aeb3e6 -u cbfd7f9a2ad9887ed3adf829b401ebf60f670e8aa91916ede409f75a12aeb3e6 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/cbfd7f9a2ad9887ed3adf829b401ebf60f670e8aa91916ede409f75a12aeb3e6/userdata -p /run/containers/storage/overlay-containers/cbfd7f9a2ad9887ed3adf829b401ebf60f670e8aa91916ede409f75a12aeb3e6/userdata/pidfile -n ceph-f3005f84-239a-55b6-a948-8f1fb592b920-mon-compute-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/cbfd7f9a2ad9887ed3adf829b401ebf60f670e8aa91916ede409f75a12aeb3e6 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/cbfd7f9a2ad9887ed3adf829b401ebf60f670e8aa91916ede409f75a12aeb3e6/userdata/oci-log --conmon-pidfile /run/ceph-f3005f84-239a-55b6-a948-8f1fb592b920@mon.compute-0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg cbfd7f9a2ad9887ed3adf829b401ebf60f670e8aa91916ede409f75a12aeb3e6
             │ ├─ceph-f3005f84-239a-55b6-a948-8f1fb592b920@node-exporter.compute-0.service
             │ │ ├─libpod-payload-97848d12ab6322cfe3cc805f7972048af088977ee2e693bcc0a5bb581613a0d8
             │ │ │ ├─89474 /run/podman-init -- /bin/node_exporter --no-collector.timex --web.listen-address=:9100 --path.procfs=/host/proc --path.sysfs=/host/sys --path.rootfs=/rootfs
             │ │ │ └─89477 /bin/node_exporter --no-collector.timex --web.listen-address=:9100 --path.procfs=/host/proc --path.sysfs=/host/sys --path.rootfs=/rootfs
             │ │ └─runtime
             │ │   └─89471 /usr/bin/conmon --api-version 1 -c 97848d12ab6322cfe3cc805f7972048af088977ee2e693bcc0a5bb581613a0d8 -u 97848d12ab6322cfe3cc805f7972048af088977ee2e693bcc0a5bb581613a0d8 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/97848d12ab6322cfe3cc805f7972048af088977ee2e693bcc0a5bb581613a0d8/userdata -p /run/containers/storage/overlay-containers/97848d12ab6322cfe3cc805f7972048af088977ee2e693bcc0a5bb581613a0d8/userdata/pidfile -n ceph-f3005f84-239a-55b6-a948-8f1fb592b920-node-exporter-compute-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/97848d12ab6322cfe3cc805f7972048af088977ee2e693bcc0a5bb581613a0d8 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/97848d12ab6322cfe3cc805f7972048af088977ee2e693bcc0a5bb581613a0d8/userdata/oci-log --conmon-pidfile /run/ceph-f3005f84-239a-55b6-a948-8f1fb592b920@node-exporter.compute-0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 97848d12ab6322cfe3cc805f7972048af088977ee2e693bcc0a5bb581613a0d8
             │ ├─ceph-f3005f84-239a-55b6-a948-8f1fb592b920@osd.1.service
             │ │ ├─libpod-payload-ba38de35226506cb699780f729ec895e86e90cec52f99c13abc1fc038212a39b
             │ │ │ ├─82639 /run/podman-init -- /usr/bin/ceph-osd -n osd.1 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ │ └─82641 /usr/bin/ceph-osd -n osd.1 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ └─runtime
             │ │   └─82637 /usr/bin/conmon --api-version 1 -c ba38de35226506cb699780f729ec895e86e90cec52f99c13abc1fc038212a39b -u ba38de35226506cb699780f729ec895e86e90cec52f99c13abc1fc038212a39b -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/ba38de35226506cb699780f729ec895e86e90cec52f99c13abc1fc038212a39b/userdata -p /run/containers/storage/overlay-containers/ba38de35226506cb699780f729ec895e86e90cec52f99c13abc1fc038212a39b/userdata/pidfile -n ceph-f3005f84-239a-55b6-a948-8f1fb592b920-osd-1 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/ba38de35226506cb699780f729ec895e86e90cec52f99c13abc1fc038212a39b --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/ba38de35226506cb699780f729ec895e86e90cec52f99c13abc1fc038212a39b/userdata/oci-log --conmon-pidfile /run/ceph-f3005f84-239a-55b6-a948-8f1fb592b920@osd.1.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg ba38de35226506cb699780f729ec895e86e90cec52f99c13abc1fc038212a39b
             │ ├─ceph-f3005f84-239a-55b6-a948-8f1fb592b920@prometheus.compute-0.service
             │ │ ├─libpod-payload-8d18c97a753c8610913f4f1be41e91ee0fa0045d3fad5bb17483ebd74168eedf
             │ │ │ ├─98643 /run/podman-init -- /bin/prometheus --config.file=/etc/prometheus/prometheus.yml --storage.tsdb.path=/prometheus --storage.tsdb.retention.time=15d --storage.tsdb.retention.size=0 --web.external-url=http://compute-0.ctlplane.example.com:9095 --web.listen-address=192.168.122.100:9095
             │ │ │ └─98645 /bin/prometheus --config.file=/etc/prometheus/prometheus.yml --storage.tsdb.path=/prometheus --storage.tsdb.retention.time=15d --storage.tsdb.retention.size=0 --web.external-url=http://compute-0.ctlplane.example.com:9095 --web.listen-address=192.168.122.100:9095
             │ │ └─runtime
             │ │   └─98641 /usr/bin/conmon --api-version 1 -c 8d18c97a753c8610913f4f1be41e91ee0fa0045d3fad5bb17483ebd74168eedf -u 8d18c97a753c8610913f4f1be41e91ee0fa0045d3fad5bb17483ebd74168eedf -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/8d18c97a753c8610913f4f1be41e91ee0fa0045d3fad5bb17483ebd74168eedf/userdata -p /run/containers/storage/overlay-containers/8d18c97a753c8610913f4f1be41e91ee0fa0045d3fad5bb17483ebd74168eedf/userdata/pidfile -n ceph-f3005f84-239a-55b6-a948-8f1fb592b920-prometheus-compute-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/8d18c97a753c8610913f4f1be41e91ee0fa0045d3fad5bb17483ebd74168eedf --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/8d18c97a753c8610913f4f1be41e91ee0fa0045d3fad5bb17483ebd74168eedf/userdata/oci-log --conmon-pidfile /run/ceph-f3005f84-239a-55b6-a948-8f1fb592b920@prometheus.compute-0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 8d18c97a753c8610913f4f1be41e91ee0fa0045d3fad5bb17483ebd74168eedf
             │ └─ceph-f3005f84-239a-55b6-a948-8f1fb592b920@rgw.rgw.compute-0.jbpfwf.service
             │   ├─libpod-payload-318a53d5c542e8639ae8cbe910fcc9d6c8a7c7006a978c4655d1d2582222973a
             │   │ ├─93746 /run/podman-init -- /usr/bin/radosgw -n client.rgw.rgw.compute-0.jbpfwf -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │   │ └─93748 /usr/bin/radosgw -n client.rgw.rgw.compute-0.jbpfwf -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │   └─runtime
             │     └─93744 /usr/bin/conmon --api-version 1 -c 318a53d5c542e8639ae8cbe910fcc9d6c8a7c7006a978c4655d1d2582222973a -u 318a53d5c542e8639ae8cbe910fcc9d6c8a7c7006a978c4655d1d2582222973a -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/318a53d5c542e8639ae8cbe910fcc9d6c8a7c7006a978c4655d1d2582222973a/userdata -p /run/containers/storage/overlay-containers/318a53d5c542e8639ae8cbe910fcc9d6c8a7c7006a978c4655d1d2582222973a/userdata/pidfile -n ceph-f3005f84-239a-55b6-a948-8f1fb592b920-rgw-rgw-compute-0-jbpfwf --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/318a53d5c542e8639ae8cbe910fcc9d6c8a7c7006a978c4655d1d2582222973a --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/318a53d5c542e8639ae8cbe910fcc9d6c8a7c7006a978c4655d1d2582222973a/userdata/oci-log --conmon-pidfile /run/ceph-f3005f84-239a-55b6-a948-8f1fb592b920@rgw.rgw.compute-0.jbpfwf.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 318a53d5c542e8639ae8cbe910fcc9d6c8a7c7006a978c4655d1d2582222973a
             ├─system-getty.slice
             │ └─getty@tty1.service
             │   └─1009 /sbin/agetty -o "-p -- \\u" --noclear - linux
             ├─system-serial\x2dgetty.slice
             │ └─serial-getty@ttyS0.service
             │   └─1010 /sbin/agetty -o "-p -- \\u" --keep-baud 115200,57600,38400,9600 - vt220
             ├─systemd-hostnamed.service
             │ └─292378 /usr/lib/systemd/systemd-hostnamed
             ├─systemd-journald.service
             │ └─675 /usr/lib/systemd/systemd-journald
             ├─systemd-logind.service
             │ └─784 /usr/lib/systemd/systemd-logind
             ├─systemd-machined.service
             │ └─216411 /usr/lib/systemd/systemd-machined
             ├─systemd-udevd.service
             │ └─udev
             │   └─727 /usr/lib/systemd/systemd-udevd
             ├─tuned.service
             │ └─109791 /usr/bin/python3 -Es /usr/sbin/tuned -l -P
             ├─virtlogd.service
             │ └─215777 /usr/sbin/virtlogd
             ├─virtnodedevd.service
             │ └─248921 /usr/sbin/virtnodedevd --timeout 120
             └─virtqemud.service
               └─248554 /usr/sbin/virtqemud --timeout 120

Jan 23 10:41:21 compute-0 nova_compute[249229]: 2026-01-23 10:41:21.718 249233 DEBUG nova.compute.manager [None req-0623b36c-8378-484d-a465-75209495a966 - - - - - -] Starting heal instance info cache _heal_instance_info_cache /usr/lib/python3.9/site-packages/nova/compute/manager.py:9858[00m
Jan 23 10:41:21 compute-0 nova_compute[249229]: 2026-01-23 10:41:21.718 249233 DEBUG nova.compute.manager [None req-0623b36c-8378-484d-a465-75209495a966 - - - - - -] Rebuilding the list of instances to heal _heal_instance_info_cache /usr/lib/python3.9/site-packages/nova/compute/manager.py:9862[00m
Jan 23 10:41:21 compute-0 nova_compute[249229]: 2026-01-23 10:41:21.963 249233 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263[00m
Jan 23 10:41:22 compute-0 nova_compute[249229]: 2026-01-23 10:41:22.106 249233 DEBUG nova.compute.manager [None req-0623b36c-8378-484d-a465-75209495a966 - - - - - -] Didn't find any instances for network info cache update. _heal_instance_info_cache /usr/lib/python3.9/site-packages/nova/compute/manager.py:9944[00m
Jan 23 10:41:22 compute-0 nova_compute[249229]: 2026-01-23 10:41:22.716 249233 DEBUG oslo_service.periodic_task [None req-0623b36c-8378-484d-a465-75209495a966 - - - - - -] Running periodic task ComputeManager._reclaim_queued_deletes run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210[00m
Jan 23 10:41:22 compute-0 nova_compute[249229]: 2026-01-23 10:41:22.717 249233 DEBUG nova.compute.manager [None req-0623b36c-8378-484d-a465-75209495a966 - - - - - -] CONF.reclaim_instance_interval <= 0, skipping... _reclaim_queued_deletes /usr/lib/python3.9/site-packages/nova/compute/manager.py:10477[00m
Jan 23 10:41:24 compute-0 nova_compute[249229]: 2026-01-23 10:41:24.136 249233 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263[00m
Jan 23 10:41:24 compute-0 nova_compute[249229]: 2026-01-23 10:41:24.717 249233 DEBUG oslo_service.periodic_task [None req-0623b36c-8378-484d-a465-75209495a966 - - - - - -] Running periodic task ComputeManager._poll_unconfirmed_resizes run_periodic_tasks /usr/lib/python3.9/site-packages/oslo_service/periodic_task.py:210[00m
Jan 23 10:41:26 compute-0 podman[297066]: 2026-01-23 10:41:26.663277852 +0000 UTC m=+0.076979937 container health_status 7c4b1914e1e86e16566f40dac1c2043d119deee57a046ec037c84640bd0c067d (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn@sha256:85729a662800e6b42ceb088545fed39a2ac58704b4a37fd540cdef3ebf9e59a2, name=ovn_metadata_agent, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.schema-version=1.0, container_name=ovn_metadata_agent, org.label-schema.name=CentOS Stream 9 Base Image, tcib_build_tag=c3923531bcda0b0811b2d5053f189beb, tcib_managed=true, config_id=ovn_metadata_agent, org.label-schema.build-date=20251202, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, managed_by=edpm_ansible, config_data={'cgroupns': 'host', 'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'EDPM_CONFIG_HASH': 'cdc8d10f0e05d8a70b43cf26938a886cf76be4340fa6a898edc4cc90e10001b1-99269140098de15b48680c41e5313433c184a4380a28a4d66e6de0ece8f46703-99269140098de15b48680c41e5313433c184a4380a28a4d66e6de0ece8f46703-99269140098de15b48680c41e5313433c184a4380a28a4d66e6de0ece8f46703-0823bd3e096c75f72e4a95820d41b0d4b6a1172bd2892ddb9f29b788a11bc87d'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_metadata_agent', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn@sha256:85729a662800e6b42ceb088545fed39a2ac58704b4a37fd540cdef3ebf9e59a2', 'net': 'host', 'pid': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/run/openvswitch:/run/openvswitch:z', '/var/lib/openstack/neutron-ovn-metadata-agent:/etc/neutron.conf.d:z', '/run/netns:/run/netns:shared', '/var/lib/kolla/config_files/ovn_metadata_agent.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/neutron:/var/lib/neutron:shared,z', '/var/lib/neutron/ovn_metadata_haproxy_wrapper:/usr/local/bin/haproxy:ro', '/var/lib/neutron/kill_scripts:/etc/neutron/kill_scripts:ro', '/var/lib/openstack/cacerts/neutron-metadata/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_metadata_agent:/openstack:ro,z']}, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team)
Jan 23 10:41:26 compute-0 nova_compute[249229]: 2026-01-23 10:41:26.964 249233 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263[00m

● user-1000.slice - User Slice of UID 1000
     Loaded: loaded
    Drop-In: /usr/lib/systemd/system/user-.slice.d
             └─10-defaults.conf
     Active: active since Fri 2026-01-23 09:01:44 UTC; 1h 39min ago
      Until: Fri 2026-01-23 09:01:44 UTC; 1h 39min ago
       Docs: man:user@.service(5)
         IO: 457.8M read, 8.7G written
      Tasks: 33 (limit: 20031)
     Memory: 3.1G (peak: 4.0G)
        CPU: 25min 42.785s
     CGroup: /user.slice/user-1000.slice
             ├─session-1.scope
             │ └─4516 /usr/bin/python3
             ├─session-59.scope
             │ ├─289862 "sshd-session: zuul [priv]"
             │ ├─289865 "sshd-session: zuul@notty"
             │ ├─289866 sudo bash -c "rm -rf /var/tmp/sos-osp && mkdir /var/tmp/sos-osp && sos report --batch --all-logs --tmp-dir=/var/tmp/sos-osp  -p container,openstack_edpm,system,storage,virt"
             │ ├─289890 /usr/bin/python3 -s /sbin/sos report --batch --all-logs --tmp-dir=/var/tmp/sos-osp -p container,openstack_edpm,system,storage,virt
             │ ├─296659 timeout 15s turbostat --debug sleep 10
             │ ├─297031 timeout 300s systemctl status --all
             │ ├─297032 systemctl status --all
             │ ├─297060 timeout 300s ceph osd numa-status --format json-pretty
             │ ├─297061 /usr/bin/python3 -s /usr/bin/ceph osd numa-status --format json-pretty
             │ ├─297112 timeout 300s tuned-adm recommend
             │ └─297113 /usr/bin/python3 -Es /usr/sbin/tuned-adm recommend
             └─user@1000.service
               ├─app.slice
               │ └─dbus-broker.service
               │   ├─11765 /usr/bin/dbus-broker-launch --scope user
               │   └─11778 dbus-broker --log 4 --controller 9 --machine-id 85ac68c10a6e7ae08ceb898dbdca0cb5 --max-bytes 100000000000000 --max-fds 25000000000000 --max-matches 5000000000
               ├─init.scope
               │ ├─4304 /usr/lib/systemd/systemd --user
               │ └─4306 "(sd-pam)"
               └─user.slice
                 └─podman-pause-3184ab7f.scope
                   └─11628 catatonit -P

Jan 23 10:31:44 compute-0 sudo[281116]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Jan 23 10:31:44 compute-0 sudo[281116]: pam_unix(sudo:session): session closed for user root
Jan 23 10:31:44 compute-0 sshd-session[281115]: Received disconnect from 192.168.122.10 port 33234:11: disconnected by user
Jan 23 10:31:44 compute-0 sshd-session[281115]: Disconnected from user zuul 192.168.122.10 port 33234
Jan 23 10:40:43 compute-0 sudo[289866]:     zuul : PWD=/home/zuul ; USER=root ; COMMAND=/bin/bash -c 'rm -rf /var/tmp/sos-osp && mkdir /var/tmp/sos-osp && sos report --batch --all-logs --tmp-dir=/var/tmp/sos-osp  -p container,openstack_edpm,system,storage,virt'
Jan 23 10:40:43 compute-0 sudo[289866]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Jan 23 10:40:50 compute-0 ovs-vsctl[290219]: ovs|00001|db_ctl_base|ERR|no key "dpdk-init" in Open_vSwitch record "." column other_config
Jan 23 10:41:17 compute-0 ovs-appctl[295110]: ovs|00001|daemon_unix|WARN|/var/run/openvswitch/ovs-monitor-ipsec.pid: open: No such file or directory
Jan 23 10:41:17 compute-0 ovs-appctl[295126]: ovs|00001|daemon_unix|WARN|/var/run/openvswitch/ovs-monitor-ipsec.pid: open: No such file or directory
Jan 23 10:41:17 compute-0 ovs-appctl[295133]: ovs|00001|daemon_unix|WARN|/var/run/openvswitch/ovs-monitor-ipsec.pid: open: No such file or directory

● user-42477.slice - User Slice of UID 42477
     Loaded: loaded
    Drop-In: /usr/lib/systemd/system/user-.slice.d
             └─10-defaults.conf
     Active: active since Fri 2026-01-23 09:51:33 UTC; 49min ago
      Until: Fri 2026-01-23 09:51:33 UTC; 49min ago
       Docs: man:user@.service(5)
         IO: 764.0K read, 1.2G written
      Tasks: 4 (limit: 20031)
     Memory: 243.9M (peak: 1.2G)
        CPU: 4min 25.199s
     CGroup: /user.slice/user-42477.slice
             ├─session-39.scope
             │ ├─99842 "sshd-session: ceph-admin [priv]"
             │ └─99848 "sshd-session: ceph-admin@notty"
             └─user@42477.service
               └─init.scope
                 ├─90024 /usr/lib/systemd/systemd --user
                 └─90026 "(sd-pam)"

Jan 23 10:41:18 compute-0 podman[295489]: 2026-01-23 10:41:18.037797664 +0000 UTC m=+0.023774503 image pull aade1b12b8e6196a39b8c83a7f707419487931732368729477a8c2bbcbca1d7c quay.io/ceph/ceph@sha256:7c69e59beaeea61ca714e71cb84ff6d5e533db7f1fd84143dd9ba6649a5fd2ec
Jan 23 10:41:18 compute-0 podman[295489]: 2026-01-23 10:41:18.161610781 +0000 UTC m=+0.147587640 container init 70935c21499685672b8e368e4d9f412081240a225439ed4442c8712ab1111167 (image=quay.io/ceph/ceph@sha256:7c69e59beaeea61ca714e71cb84ff6d5e533db7f1fd84143dd9ba6649a5fd2ec, name=compassionate_solomon, io.buildah.version=1.40.1, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=c92aebb279828e9c3c1f5d24613efca272649e62, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.build-date=20250325, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, ceph=True, CEPH_REF=squid, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/)
Jan 23 10:41:18 compute-0 podman[295489]: 2026-01-23 10:41:18.172169193 +0000 UTC m=+0.158146012 container start 70935c21499685672b8e368e4d9f412081240a225439ed4442c8712ab1111167 (image=quay.io/ceph/ceph@sha256:7c69e59beaeea61ca714e71cb84ff6d5e533db7f1fd84143dd9ba6649a5fd2ec, name=compassionate_solomon, org.label-schema.vendor=CentOS, CEPH_SHA1=c92aebb279828e9c3c1f5d24613efca272649e62, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=squid, org.label-schema.schema-version=1.0, io.buildah.version=1.40.1, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20250325, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Jan 23 10:41:18 compute-0 podman[295489]: 2026-01-23 10:41:18.175831238 +0000 UTC m=+0.161808087 container attach 70935c21499685672b8e368e4d9f412081240a225439ed4442c8712ab1111167 (image=quay.io/ceph/ceph@sha256:7c69e59beaeea61ca714e71cb84ff6d5e533db7f1fd84143dd9ba6649a5fd2ec, name=compassionate_solomon, io.buildah.version=1.40.1, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, CEPH_SHA1=c92aebb279828e9c3c1f5d24613efca272649e62, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, CEPH_REF=squid, org.label-schema.build-date=20250325, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git)
Jan 23 10:41:18 compute-0 podman[295489]: 2026-01-23 10:41:18.894338682 +0000 UTC m=+0.880315501 container died 70935c21499685672b8e368e4d9f412081240a225439ed4442c8712ab1111167 (image=quay.io/ceph/ceph@sha256:7c69e59beaeea61ca714e71cb84ff6d5e533db7f1fd84143dd9ba6649a5fd2ec, name=compassionate_solomon, ceph=True, CEPH_REF=squid, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, org.label-schema.build-date=20250325, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.40.1, CEPH_SHA1=c92aebb279828e9c3c1f5d24613efca272649e62, org.label-schema.license=GPLv2)
Jan 23 10:41:19 compute-0 podman[295489]: 2026-01-23 10:41:19.24226143 +0000 UTC m=+1.228238259 container remove 70935c21499685672b8e368e4d9f412081240a225439ed4442c8712ab1111167 (image=quay.io/ceph/ceph@sha256:7c69e59beaeea61ca714e71cb84ff6d5e533db7f1fd84143dd9ba6649a5fd2ec, name=compassionate_solomon, CEPH_SHA1=c92aebb279828e9c3c1f5d24613efca272649e62, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, OSD_FLAVOR=default, io.buildah.version=1.40.1, org.label-schema.build-date=20250325, org.label-schema.license=GPLv2, CEPH_REF=squid, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Jan 23 10:41:19 compute-0 sudo[295083]: pam_unix(sudo:session): session closed for user root
Jan 23 10:41:20 compute-0 sudo[296342]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/ls /etc/sysctl.d
Jan 23 10:41:20 compute-0 sudo[296342]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Jan 23 10:41:20 compute-0 sudo[296342]: pam_unix(sudo:session): session closed for user root

● user.slice - User and Session Slice
     Loaded: loaded (/usr/lib/systemd/system/user.slice; static)
     Active: active since Fri 2026-01-23 09:01:22 UTC; 1h 40min ago
      Until: Fri 2026-01-23 09:01:22 UTC; 1h 40min ago
       Docs: man:systemd.special(7)
         IO: 458.6M read, 10.0G written
      Tasks: 40
     Memory: 3.3G (peak: 4.9G)
        CPU: 30min 34.673s
     CGroup: /user.slice
             ├─user-1000.slice
             │ ├─session-1.scope
             │ │ └─4516 /usr/bin/python3
             │ ├─session-59.scope
             │ │ ├─289862 "sshd-session: zuul [priv]"
             │ │ ├─289865 "sshd-session: zuul@notty"
             │ │ ├─289866 sudo bash -c "rm -rf /var/tmp/sos-osp && mkdir /var/tmp/sos-osp && sos report --batch --all-logs --tmp-dir=/var/tmp/sos-osp  -p container,openstack_edpm,system,storage,virt"
             │ │ ├─289890 /usr/bin/python3 -s /sbin/sos report --batch --all-logs --tmp-dir=/var/tmp/sos-osp -p container,openstack_edpm,system,storage,virt
             │ │ ├─296659 timeout 15s turbostat --debug sleep 10
             │ │ ├─297031 timeout 300s systemctl status --all
             │ │ ├─297032 systemctl status --all
             │ │ ├─297060 timeout 300s ceph osd numa-status --format json-pretty
             │ │ ├─297061 /usr/bin/python3 -s /usr/bin/ceph osd numa-status --format json-pretty
             │ │ ├─297112 timeout 300s tuned-adm recommend
             │ │ └─297113 /usr/bin/python3 -Es /usr/sbin/tuned-adm recommend
             │ └─user@1000.service
             │   ├─app.slice
             │   │ └─dbus-broker.service
             │   │   ├─11765 /usr/bin/dbus-broker-launch --scope user
             │   │   └─11778 dbus-broker --log 4 --controller 9 --machine-id 85ac68c10a6e7ae08ceb898dbdca0cb5 --max-bytes 100000000000000 --max-fds 25000000000000 --max-matches 5000000000
             │   ├─init.scope
             │   │ ├─4304 /usr/lib/systemd/systemd --user
             │   │ └─4306 "(sd-pam)"
             │   └─user.slice
             │     └─podman-pause-3184ab7f.scope
             │       └─11628 catatonit -P
             └─user-42477.slice
               ├─session-39.scope
               │ ├─99842 "sshd-session: ceph-admin [priv]"
               │ └─99848 "sshd-session: ceph-admin@notty"
               └─user@42477.service
                 └─init.scope
                   ├─90024 /usr/lib/systemd/systemd --user
                   └─90026 "(sd-pam)"

● dbus.socket - D-Bus System Message Bus Socket
     Loaded: loaded (/usr/lib/systemd/system/dbus.socket; enabled; preset: enabled)
     Active: active (running) since Fri 2026-01-23 09:01:23 UTC; 1h 40min ago
      Until: Fri 2026-01-23 09:01:23 UTC; 1h 40min ago
   Triggers: ● dbus-broker.service
     Listen: /run/dbus/system_bus_socket (Stream)
     CGroup: /system.slice/dbus.socket

Jan 23 09:01:23 localhost systemd[1]: Listening on D-Bus System Message Bus Socket.

● dm-event.socket - Device-mapper event daemon FIFOs
     Loaded: loaded (/usr/lib/systemd/system/dm-event.socket; enabled; preset: enabled)
     Active: active (listening) since Fri 2026-01-23 09:39:04 UTC; 1h 2min ago
      Until: Fri 2026-01-23 09:39:04 UTC; 1h 2min ago
   Triggers: ● dm-event.service
       Docs: man:dmeventd(8)
     Listen: /run/dmeventd-server (FIFO)
             /run/dmeventd-client (FIFO)
     CGroup: /system.slice/dm-event.socket

Jan 23 09:39:04 compute-0 systemd[1]: Listening on Device-mapper event daemon FIFOs.

● iscsid.socket - Open-iSCSI iscsid Socket
     Loaded: loaded (/usr/lib/systemd/system/iscsid.socket; enabled; preset: enabled)
     Active: active (running) since Fri 2026-01-23 10:08:19 UTC; 33min ago
      Until: Fri 2026-01-23 10:08:19 UTC; 33min ago
   Triggers: ● iscsid.service
       Docs: man:iscsid(8)
             man:iscsiadm(8)
     Listen: @ISCSIADM_ABSTRACT_NAMESPACE (Stream)
     CGroup: /system.slice/iscsid.socket

Jan 23 10:08:19 compute-0 systemd[1]: Listening on Open-iSCSI iscsid Socket.

○ iscsiuio.socket - Open-iSCSI iscsiuio Socket
     Loaded: loaded (/usr/lib/systemd/system/iscsiuio.socket; enabled; preset: enabled)
     Active: inactive (dead)
   Triggers: ● iscsiuio.service
       Docs: man:iscsiuio(8)
     Listen: @ISCSID_UIP_ABSTRACT_NAMESPACE (Stream)

○ libvirtd-admin.socket - libvirt legacy monolithic daemon admin socket
     Loaded: loaded (/usr/lib/systemd/system/libvirtd-admin.socket; disabled; preset: disabled)
     Active: inactive (dead)
   Triggers: ● libvirtd.service
     Listen: /run/libvirt/libvirt-admin-sock (Stream)

○ libvirtd-ro.socket - libvirt legacy monolithic daemon read-only socket
     Loaded: loaded (/usr/lib/systemd/system/libvirtd-ro.socket; disabled; preset: disabled)
     Active: inactive (dead)
   Triggers: ● libvirtd.service
     Listen: /run/libvirt/libvirt-sock-ro (Stream)

○ libvirtd-tls.socket
     Loaded: masked (Reason: Unit libvirtd-tls.socket is masked.)
     Active: inactive (dead)

○ libvirtd.socket - libvirt legacy monolithic daemon socket
     Loaded: loaded (/usr/lib/systemd/system/libvirtd.socket; disabled; preset: disabled)
     Active: inactive (dead)
   Triggers: ● libvirtd.service
     Listen: /run/libvirt/libvirt-sock (Stream)

● lvm2-lvmpolld.socket - LVM2 poll daemon socket
     Loaded: loaded (/usr/lib/systemd/system/lvm2-lvmpolld.socket; enabled; preset: enabled)
     Active: active (listening) since Fri 2026-01-23 09:39:05 UTC; 1h 2min ago
      Until: Fri 2026-01-23 09:39:05 UTC; 1h 2min ago
   Triggers: ● lvm2-lvmpolld.service
       Docs: man:lvmpolld(8)
     Listen: /run/lvm/lvmpolld.socket (Stream)
     CGroup: /system.slice/lvm2-lvmpolld.socket

Jan 23 09:39:05 compute-0 systemd[1]: Listening on LVM2 poll daemon socket.

● multipathd.socket - multipathd control socket
     Loaded: loaded (/usr/lib/systemd/system/multipathd.socket; enabled; preset: disabled)
     Active: active (running) since Fri 2026-01-23 10:08:46 UTC; 32min ago
      Until: Fri 2026-01-23 10:08:46 UTC; 32min ago
   Triggers: ● multipathd.service
     Listen: @/org/kernel/linux/storage/multipathd (Stream)
             /run/multipathd.socket (Stream)
     CGroup: /system.slice/multipathd.socket

Jan 23 10:08:46 compute-0 systemd[1]: Listening on multipathd control socket.

● rpcbind.socket - RPCbind Server Activation Socket
     Loaded: loaded (/usr/lib/systemd/system/rpcbind.socket; enabled; preset: enabled)
     Active: active (running) since Fri 2026-01-23 09:01:22 UTC; 1h 40min ago
      Until: Fri 2026-01-23 09:01:22 UTC; 1h 40min ago
   Triggers: ● rpcbind.service
     Listen: /run/rpcbind.sock (Stream)
             0.0.0.0:111 (Stream)
             0.0.0.0:111 (Datagram)
             [::]:111 (Stream)
             [::]:111 (Datagram)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48560)
     Memory: 16.0K (peak: 288.0K)
        CPU: 5ms
     CGroup: /system.slice/rpcbind.socket

● sssd-kcm.socket - SSSD Kerberos Cache Manager responder socket
     Loaded: loaded (/usr/lib/systemd/system/sssd-kcm.socket; enabled; preset: enabled)
     Active: active (listening) since Fri 2026-01-23 09:01:23 UTC; 1h 40min ago
      Until: Fri 2026-01-23 09:01:23 UTC; 1h 40min ago
   Triggers: ● sssd-kcm.service
       Docs: man:sssd-kcm(8)
     Listen: /run/.heim_org.h5l.kcm-socket (Stream)
     CGroup: /system.slice/sssd-kcm.socket

Jan 23 09:01:23 localhost systemd[1]: Listening on SSSD Kerberos Cache Manager responder socket.

○ syslog.socket - Syslog Socket
     Loaded: loaded (/usr/lib/systemd/system/syslog.socket; static)
     Active: inactive (dead)
   Triggers: ● syslog.service
       Docs: man:systemd.special(7)
             https://www.freedesktop.org/wiki/Software/systemd/syslog
     Listen: /run/systemd/journal/syslog (Datagram)

● systemd-coredump.socket - Process Core Dump Socket
     Loaded: loaded (/usr/lib/systemd/system/systemd-coredump.socket; static)
     Active: active (listening) since Fri 2026-01-23 09:01:22 UTC; 1h 40min ago
      Until: Fri 2026-01-23 09:01:22 UTC; 1h 40min ago
       Docs: man:systemd-coredump(8)
     Listen: /run/systemd/coredump (SequentialPacket)
   Accepted: 12; Connected: 0;
     CGroup: /system.slice/systemd-coredump.socket

● systemd-initctl.socket - initctl Compatibility Named Pipe
     Loaded: loaded (/usr/lib/systemd/system/systemd-initctl.socket; static)
     Active: active (listening) since Fri 2026-01-23 09:01:22 UTC; 1h 40min ago
      Until: Fri 2026-01-23 09:01:22 UTC; 1h 40min ago
   Triggers: ● systemd-initctl.service
       Docs: man:systemd-initctl.socket(8)
     Listen: /run/initctl (FIFO)
     CGroup: /system.slice/systemd-initctl.socket

● systemd-journald-dev-log.socket - Journal Socket (/dev/log)
     Loaded: loaded (/usr/lib/systemd/system/systemd-journald-dev-log.socket; static)
     Active: active (running) since Fri 2026-01-23 09:01:20 UTC; 1h 40min ago
      Until: Fri 2026-01-23 09:01:20 UTC; 1h 40min ago
   Triggers: ● systemd-journald.service
       Docs: man:systemd-journald.service(8)
             man:journald.conf(5)
     Listen: /run/systemd/journal/dev-log (Datagram)
     CGroup: /system.slice/systemd-journald-dev-log.socket

Notice: journal has been rotated since unit was started, output may be incomplete.

● systemd-journald.socket - Journal Socket
     Loaded: loaded (/usr/lib/systemd/system/systemd-journald.socket; static)
     Active: active (running) since Fri 2026-01-23 09:01:20 UTC; 1h 40min ago
      Until: Fri 2026-01-23 09:01:20 UTC; 1h 40min ago
   Triggers: ● systemd-journald.service
       Docs: man:systemd-journald.service(8)
             man:journald.conf(5)
     Listen: /run/systemd/journal/socket (Datagram)
             /run/systemd/journal/stdout (Stream)
     CGroup: /system.slice/systemd-journald.socket

Notice: journal has been rotated since unit was started, output may be incomplete.

● systemd-rfkill.socket - Load/Save RF Kill Switch Status /dev/rfkill Watch
     Loaded: loaded (/usr/lib/systemd/system/systemd-rfkill.socket; static)
     Active: active (listening) since Fri 2026-01-23 09:01:24 UTC; 1h 40min ago
      Until: Fri 2026-01-23 09:01:24 UTC; 1h 40min ago
   Triggers: ● systemd-rfkill.service
       Docs: man:systemd-rfkill.socket(8)
     Listen: /dev/rfkill (Special)
     CGroup: /system.slice/systemd-rfkill.socket

Jan 23 09:01:24 np0005593293.novalocal systemd[1]: Listening on Load/Save RF Kill Switch Status /dev/rfkill Watch.

● systemd-udevd-control.socket - udev Control Socket
     Loaded: loaded (/usr/lib/systemd/system/systemd-udevd-control.socket; static)
     Active: active (running) since Fri 2026-01-23 09:01:22 UTC; 1h 40min ago
      Until: Fri 2026-01-23 09:01:22 UTC; 1h 40min ago
   Triggers: ● systemd-udevd.service
       Docs: man:systemd-udevd-control.socket(8)
             man:udev(7)
     Listen: /run/udev/control (SequentialPacket)
     CGroup: /system.slice/systemd-udevd-control.socket

● systemd-udevd-kernel.socket - udev Kernel Socket
     Loaded: loaded (/usr/lib/systemd/system/systemd-udevd-kernel.socket; static)
     Active: active (running) since Fri 2026-01-23 09:01:22 UTC; 1h 40min ago
      Until: Fri 2026-01-23 09:01:22 UTC; 1h 40min ago
   Triggers: ● systemd-udevd.service
       Docs: man:systemd-udevd-kernel.socket(8)
             man:udev(7)
     Listen: kobject-uevent 1 (Netlink)
     CGroup: /system.slice/systemd-udevd-kernel.socket

○ virtinterfaced-admin.socket - libvirt interface daemon admin socket
     Loaded: loaded (/usr/lib/systemd/system/virtinterfaced-admin.socket; enabled; preset: enabled)
     Active: inactive (dead)
   Triggers: ● virtinterfaced.service
     Listen: /run/libvirt/virtinterfaced-admin-sock (Stream)

○ virtinterfaced-ro.socket - libvirt interface daemon read-only socket
     Loaded: loaded (/usr/lib/systemd/system/virtinterfaced-ro.socket; enabled; preset: enabled)
     Active: inactive (dead)
   Triggers: ● virtinterfaced.service
     Listen: /run/libvirt/virtinterfaced-sock-ro (Stream)

○ virtinterfaced.socket - libvirt interface daemon socket
     Loaded: loaded (/usr/lib/systemd/system/virtinterfaced.socket; enabled; preset: enabled)
     Active: inactive (dead)
   Triggers: ● virtinterfaced.service
     Listen: /run/libvirt/virtinterfaced-sock (Stream)

○ virtlockd-admin.socket - libvirt locking daemon admin socket
     Loaded: loaded (/usr/lib/systemd/system/virtlockd-admin.socket; enabled; preset: enabled)
     Active: inactive (dead)
   Triggers: ● virtlockd.service
     Listen: /run/libvirt/virtlockd-admin-sock (Stream)

● virtlockd.socket - libvirt locking daemon socket
     Loaded: loaded (/usr/lib/systemd/system/virtlockd.socket; enabled; preset: enabled)
     Active: active (listening) since Fri 2026-01-23 10:07:17 UTC; 34min ago
      Until: Fri 2026-01-23 10:07:17 UTC; 34min ago
   Triggers: ● virtlockd.service
     Listen: /run/libvirt/virtlockd-sock (Stream)
     CGroup: /system.slice/virtlockd.socket

Jan 23 10:07:17 compute-0 systemd[1]: Listening on libvirt locking daemon socket.

● virtlogd-admin.socket - libvirt logging daemon admin socket
     Loaded: loaded (/usr/lib/systemd/system/virtlogd-admin.socket; enabled; preset: enabled)
    Drop-In: /etc/systemd/system/virtlogd-admin.socket.d
             └─override.conf
     Active: active (running) since Fri 2026-01-23 10:07:13 UTC; 34min ago
      Until: Fri 2026-01-23 10:07:13 UTC; 34min ago
   Triggers: ● virtlogd.service
     Listen: /run/libvirt/virtlogd-admin-sock (Stream)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48560)
     Memory: 0B (peak: 512.0K)
        CPU: 3ms
     CGroup: /system.slice/virtlogd-admin.socket

Jan 23 10:07:13 compute-0 systemd[1]: Starting libvirt logging daemon admin socket...
Jan 23 10:07:13 compute-0 systemd[1]: Listening on libvirt logging daemon admin socket.

● virtlogd.socket - libvirt logging daemon socket
     Loaded: loaded (/usr/lib/systemd/system/virtlogd.socket; enabled; preset: enabled)
    Drop-In: /etc/systemd/system/virtlogd.socket.d
             └─override.conf
     Active: active (running) since Fri 2026-01-23 10:07:13 UTC; 34min ago
      Until: Fri 2026-01-23 10:07:13 UTC; 34min ago
   Triggers: ● virtlogd.service
     Listen: /run/libvirt/virtlogd-sock (Stream)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48560)
     Memory: 0B (peak: 512.0K)
        CPU: 3ms
     CGroup: /system.slice/virtlogd.socket

Jan 23 10:07:13 compute-0 systemd[1]: Starting libvirt logging daemon socket...
Jan 23 10:07:13 compute-0 systemd[1]: Listening on libvirt logging daemon socket.

○ virtnetworkd-admin.socket - libvirt network daemon admin socket
     Loaded: loaded (/usr/lib/systemd/system/virtnetworkd-admin.socket; enabled; preset: enabled)
     Active: inactive (dead)
   Triggers: ● virtnetworkd.service
     Listen: /run/libvirt/virtnetworkd-admin-sock (Stream)

○ virtnetworkd-ro.socket - libvirt network daemon read-only socket
     Loaded: loaded (/usr/lib/systemd/system/virtnetworkd-ro.socket; enabled; preset: enabled)
     Active: inactive (dead)
   Triggers: ● virtnetworkd.service
     Listen: /run/libvirt/virtnetworkd-sock-ro (Stream)

○ virtnetworkd.socket - libvirt network daemon socket
     Loaded: loaded (/usr/lib/systemd/system/virtnetworkd.socket; enabled; preset: enabled)
     Active: inactive (dead)
   Triggers: ● virtnetworkd.service
     Listen: /run/libvirt/virtnetworkd-sock (Stream)

● virtnodedevd-admin.socket - libvirt nodedev daemon admin socket
     Loaded: loaded (/usr/lib/systemd/system/virtnodedevd-admin.socket; enabled; preset: enabled)
    Drop-In: /etc/systemd/system/virtnodedevd-admin.socket.d
             └─override.conf
     Active: active (running) since Fri 2026-01-23 10:07:15 UTC; 34min ago
      Until: Fri 2026-01-23 10:07:15 UTC; 34min ago
   Triggers: ● virtnodedevd.service
     Listen: /run/libvirt/virtnodedevd-admin-sock (Stream)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48560)
     Memory: 4.0K (peak: 512.0K)
        CPU: 3ms
     CGroup: /system.slice/virtnodedevd-admin.socket

Jan 23 10:07:15 compute-0 systemd[1]: Starting libvirt nodedev daemon admin socket...
Jan 23 10:07:15 compute-0 systemd[1]: Listening on libvirt nodedev daemon admin socket.

● virtnodedevd-ro.socket - libvirt nodedev daemon read-only socket
     Loaded: loaded (/usr/lib/systemd/system/virtnodedevd-ro.socket; enabled; preset: enabled)
    Drop-In: /etc/systemd/system/virtnodedevd-ro.socket.d
             └─override.conf
     Active: active (running) since Fri 2026-01-23 10:07:15 UTC; 34min ago
      Until: Fri 2026-01-23 10:07:15 UTC; 34min ago
   Triggers: ● virtnodedevd.service
     Listen: /run/libvirt/virtnodedevd-sock-ro (Stream)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48560)
     Memory: 0B (peak: 512.0K)
        CPU: 2ms
     CGroup: /system.slice/virtnodedevd-ro.socket

Jan 23 10:07:15 compute-0 systemd[1]: Starting libvirt nodedev daemon read-only socket...
Jan 23 10:07:15 compute-0 systemd[1]: Listening on libvirt nodedev daemon read-only socket.

● virtnodedevd.socket - libvirt nodedev daemon socket
     Loaded: loaded (/usr/lib/systemd/system/virtnodedevd.socket; enabled; preset: enabled)
    Drop-In: /etc/systemd/system/virtnodedevd.socket.d
             └─override.conf
     Active: active (running) since Fri 2026-01-23 10:07:15 UTC; 34min ago
      Until: Fri 2026-01-23 10:07:15 UTC; 34min ago
   Triggers: ● virtnodedevd.service
     Listen: /run/libvirt/virtnodedevd-sock (Stream)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48560)
     Memory: 0B (peak: 512.0K)
        CPU: 2ms
     CGroup: /system.slice/virtnodedevd.socket

Jan 23 10:07:15 compute-0 systemd[1]: Starting libvirt nodedev daemon socket...
Jan 23 10:07:15 compute-0 systemd[1]: Listening on libvirt nodedev daemon socket.

○ virtnwfilterd-admin.socket - libvirt nwfilter daemon admin socket
     Loaded: loaded (/usr/lib/systemd/system/virtnwfilterd-admin.socket; enabled; preset: enabled)
     Active: inactive (dead)
   Triggers: ● virtnwfilterd.service
     Listen: /run/libvirt/virtnwfilterd-admin-sock (Stream)

○ virtnwfilterd-ro.socket - libvirt nwfilter daemon read-only socket
     Loaded: loaded (/usr/lib/systemd/system/virtnwfilterd-ro.socket; enabled; preset: enabled)
     Active: inactive (dead)
   Triggers: ● virtnwfilterd.service
     Listen: /run/libvirt/virtnwfilterd-sock-ro (Stream)

○ virtnwfilterd.socket - libvirt nwfilter daemon socket
     Loaded: loaded (/usr/lib/systemd/system/virtnwfilterd.socket; enabled; preset: enabled)
     Active: inactive (dead)
   Triggers: ● virtnwfilterd.service
     Listen: /run/libvirt/virtnwfilterd-sock (Stream)

● virtproxyd-admin.socket - libvirt proxy daemon admin socket
     Loaded: loaded (/usr/lib/systemd/system/virtproxyd-admin.socket; enabled; preset: enabled)
    Drop-In: /etc/systemd/system/virtproxyd-admin.socket.d
             └─override.conf
     Active: active (listening) since Fri 2026-01-23 10:07:16 UTC; 34min ago
      Until: Fri 2026-01-23 10:07:16 UTC; 34min ago
   Triggers: ● virtproxyd.service
     Listen: /run/libvirt/libvirt-admin-sock (Stream)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48560)
     Memory: 0B (peak: 512.0K)
        CPU: 5ms
     CGroup: /system.slice/virtproxyd-admin.socket

Jan 23 10:07:16 compute-0 systemd[1]: Starting libvirt proxy daemon admin socket...
Jan 23 10:07:16 compute-0 systemd[1]: Listening on libvirt proxy daemon admin socket.

● virtproxyd-ro.socket - libvirt proxy daemon read-only socket
     Loaded: loaded (/usr/lib/systemd/system/virtproxyd-ro.socket; enabled; preset: enabled)
    Drop-In: /etc/systemd/system/virtproxyd-ro.socket.d
             └─override.conf
     Active: active (listening) since Fri 2026-01-23 10:07:16 UTC; 34min ago
      Until: Fri 2026-01-23 10:07:16 UTC; 34min ago
   Triggers: ● virtproxyd.service
     Listen: /run/libvirt/libvirt-sock-ro (Stream)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48560)
     Memory: 0B (peak: 512.0K)
        CPU: 3ms
     CGroup: /system.slice/virtproxyd-ro.socket

Jan 23 10:07:16 compute-0 systemd[1]: Starting libvirt proxy daemon read-only socket...
Jan 23 10:07:16 compute-0 systemd[1]: Listening on libvirt proxy daemon read-only socket.

● virtproxyd-tls.socket - libvirt proxy daemon TLS IP socket
     Loaded: loaded (/usr/lib/systemd/system/virtproxyd-tls.socket; enabled; preset: disabled)
     Active: active (listening) since Fri 2026-01-23 10:06:03 UTC; 35min ago
      Until: Fri 2026-01-23 10:06:03 UTC; 35min ago
   Triggers: ● virtproxyd.service
     Listen: [::]:16514 (Stream)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48560)
     Memory: 8.0K (peak: 256.0K)
        CPU: 2ms
     CGroup: /system.slice/virtproxyd-tls.socket

Jan 23 10:06:03 compute-0 systemd[1]: Listening on libvirt proxy daemon TLS IP socket.

● virtproxyd.socket - libvirt proxy daemon socket
     Loaded: loaded (/usr/lib/systemd/system/virtproxyd.socket; enabled; preset: enabled)
    Drop-In: /etc/systemd/system/virtproxyd.socket.d
             └─override.conf
     Active: active (listening) since Fri 2026-01-23 10:06:03 UTC; 35min ago
      Until: Fri 2026-01-23 10:06:03 UTC; 35min ago
   Triggers: ● virtproxyd.service
     Listen: /run/libvirt/libvirt-sock (Stream)
     CGroup: /system.slice/virtproxyd.socket

Jan 23 10:06:03 compute-0 systemd[1]: Listening on libvirt proxy daemon socket.

● virtqemud-admin.socket - libvirt QEMU daemon admin socket
     Loaded: loaded (/usr/lib/systemd/system/virtqemud-admin.socket; enabled; preset: enabled)
    Drop-In: /etc/systemd/system/virtqemud-admin.socket.d
             └─override.conf
     Active: active (running) since Fri 2026-01-23 10:07:17 UTC; 34min ago
      Until: Fri 2026-01-23 10:07:17 UTC; 34min ago
   Triggers: ● virtqemud.service
     Listen: /run/libvirt/virtqemud-admin-sock (Stream)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48560)
     Memory: 4.0K (peak: 512.0K)
        CPU: 3ms
     CGroup: /system.slice/virtqemud-admin.socket

Jan 23 10:07:17 compute-0 systemd[1]: Starting libvirt QEMU daemon admin socket...
Jan 23 10:07:17 compute-0 systemd[1]: Listening on libvirt QEMU daemon admin socket.

● virtqemud-ro.socket - libvirt QEMU daemon read-only socket
     Loaded: loaded (/usr/lib/systemd/system/virtqemud-ro.socket; enabled; preset: enabled)
    Drop-In: /etc/systemd/system/virtqemud-ro.socket.d
             └─override.conf
     Active: active (running) since Fri 2026-01-23 10:07:17 UTC; 34min ago
      Until: Fri 2026-01-23 10:07:17 UTC; 34min ago
   Triggers: ● virtqemud.service
     Listen: /run/libvirt/virtqemud-sock-ro (Stream)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48560)
     Memory: 4.0K (peak: 512.0K)
        CPU: 3ms
     CGroup: /system.slice/virtqemud-ro.socket

Jan 23 10:07:17 compute-0 systemd[1]: Starting libvirt QEMU daemon read-only socket...
Jan 23 10:07:17 compute-0 systemd[1]: Listening on libvirt QEMU daemon read-only socket.

● virtqemud.socket - libvirt QEMU daemon socket
     Loaded: loaded (/usr/lib/systemd/system/virtqemud.socket; enabled; preset: enabled)
    Drop-In: /etc/systemd/system/virtqemud.socket.d
             └─override.conf
     Active: active (running) since Fri 2026-01-23 10:07:17 UTC; 34min ago
      Until: Fri 2026-01-23 10:07:17 UTC; 34min ago
   Triggers: ● virtqemud.service
     Listen: /run/libvirt/virtqemud-sock (Stream)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48560)
     Memory: 4.0K (peak: 512.0K)
        CPU: 3ms
     CGroup: /system.slice/virtqemud.socket

Jan 23 10:07:17 compute-0 systemd[1]: Starting libvirt QEMU daemon socket...
Jan 23 10:07:17 compute-0 systemd[1]: Listening on libvirt QEMU daemon socket.

● virtsecretd-admin.socket - libvirt secret daemon admin socket
     Loaded: loaded (/usr/lib/systemd/system/virtsecretd-admin.socket; enabled; preset: enabled)
    Drop-In: /etc/systemd/system/virtsecretd-admin.socket.d
             └─override.conf
     Active: active (listening) since Fri 2026-01-23 10:07:18 UTC; 34min ago
      Until: Fri 2026-01-23 10:07:18 UTC; 34min ago
   Triggers: ● virtsecretd.service
     Listen: /run/libvirt/virtsecretd-admin-sock (Stream)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48560)
     Memory: 0B (peak: 652.0K)
        CPU: 5ms
     CGroup: /system.slice/virtsecretd-admin.socket

Jan 23 10:07:18 compute-0 systemd[1]: Starting libvirt secret daemon admin socket...
Jan 23 10:07:18 compute-0 systemd[1]: Listening on libvirt secret daemon admin socket.

● virtsecretd-ro.socket - libvirt secret daemon read-only socket
     Loaded: loaded (/usr/lib/systemd/system/virtsecretd-ro.socket; enabled; preset: enabled)
    Drop-In: /etc/systemd/system/virtsecretd-ro.socket.d
             └─override.conf
     Active: active (listening) since Fri 2026-01-23 10:07:18 UTC; 34min ago
      Until: Fri 2026-01-23 10:07:18 UTC; 34min ago
   Triggers: ● virtsecretd.service
     Listen: /run/libvirt/virtsecretd-sock-ro (Stream)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48560)
     Memory: 4.0K (peak: 512.0K)
        CPU: 5ms
     CGroup: /system.slice/virtsecretd-ro.socket

Jan 23 10:07:18 compute-0 systemd[1]: Starting libvirt secret daemon read-only socket...
Jan 23 10:07:18 compute-0 systemd[1]: Listening on libvirt secret daemon read-only socket.

● virtsecretd.socket - libvirt secret daemon socket
     Loaded: loaded (/usr/lib/systemd/system/virtsecretd.socket; enabled; preset: enabled)
    Drop-In: /etc/systemd/system/virtsecretd.socket.d
             └─override.conf
     Active: active (listening) since Fri 2026-01-23 10:07:18 UTC; 34min ago
      Until: Fri 2026-01-23 10:07:18 UTC; 34min ago
   Triggers: ● virtsecretd.service
     Listen: /run/libvirt/virtsecretd-sock (Stream)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48560)
     Memory: 0B (peak: 512.0K)
        CPU: 3ms
     CGroup: /system.slice/virtsecretd.socket

Jan 23 10:07:18 compute-0 systemd[1]: Starting libvirt secret daemon socket...
Jan 23 10:07:18 compute-0 systemd[1]: Listening on libvirt secret daemon socket.

○ virtstoraged-admin.socket - libvirt storage daemon admin socket
     Loaded: loaded (/usr/lib/systemd/system/virtstoraged-admin.socket; enabled; preset: enabled)
     Active: inactive (dead)
   Triggers: ● virtstoraged.service
     Listen: /run/libvirt/virtstoraged-admin-sock (Stream)

○ virtstoraged-ro.socket - libvirt storage daemon read-only socket
     Loaded: loaded (/usr/lib/systemd/system/virtstoraged-ro.socket; enabled; preset: enabled)
     Active: inactive (dead)
   Triggers: ● virtstoraged.service
     Listen: /run/libvirt/virtstoraged-sock-ro (Stream)

○ virtstoraged.socket - libvirt storage daemon socket
     Loaded: loaded (/usr/lib/systemd/system/virtstoraged.socket; enabled; preset: enabled)
     Active: inactive (dead)
   Triggers: ● virtstoraged.service
     Listen: /run/libvirt/virtstoraged-sock (Stream)

● swap.swap - /swap
     Loaded: loaded (/etc/fstab; generated)
     Active: active since Fri 2026-01-23 09:41:35 UTC; 59min ago
      Until: Fri 2026-01-23 09:41:35 UTC; 59min ago
       What: /swap
       Docs: man:fstab(5)
             man:systemd-fstab-generator(8)

● basic.target - Basic System
     Loaded: loaded (/usr/lib/systemd/system/basic.target; static)
     Active: active since Fri 2026-01-23 09:01:23 UTC; 1h 40min ago
      Until: Fri 2026-01-23 09:01:23 UTC; 1h 40min ago
       Docs: man:systemd.special(7)

Jan 23 09:01:23 localhost systemd[1]: Reached target Basic System.

○ blockdev@dev-disk-by\x2duuid-22ac9141\x2d3960\x2d4912\x2db20e\x2d19fc8a328d40.target - Block Device Preparation for /dev/disk/by-uuid/22ac9141-3960-4912-b20e-19fc8a328d40
     Loaded: loaded (/usr/lib/systemd/system/blockdev@.target; static)
     Active: inactive (dead)
       Docs: man:systemd.special(7)

○ blockdev@dev-vda1.target - Block Device Preparation for /dev/vda1
     Loaded: loaded (/usr/lib/systemd/system/blockdev@.target; static)
     Active: inactive (dead)
       Docs: man:systemd.special(7)

● ceph-f3005f84-239a-55b6-a948-8f1fb592b920.target - Ceph cluster f3005f84-239a-55b6-a948-8f1fb592b920
     Loaded: loaded (/etc/systemd/system/ceph-f3005f84-239a-55b6-a948-8f1fb592b920.target; enabled; preset: disabled)
     Active: active since Fri 2026-01-23 09:47:36 UTC; 53min ago
      Until: Fri 2026-01-23 09:47:36 UTC; 53min ago

Jan 23 09:47:36 compute-0 systemd[1]: Reached target Ceph cluster f3005f84-239a-55b6-a948-8f1fb592b920.

● ceph.target - All Ceph clusters and services
     Loaded: loaded (/etc/systemd/system/ceph.target; enabled; preset: disabled)
     Active: active since Fri 2026-01-23 09:47:36 UTC; 53min ago
      Until: Fri 2026-01-23 09:47:36 UTC; 53min ago

Jan 23 09:47:36 compute-0 systemd[1]: Reached target All Ceph clusters and services.

● cloud-config.target - Cloud-config availability
     Loaded: loaded (/usr/lib/systemd/system/cloud-config.target; static)
     Active: active since Fri 2026-01-23 09:01:30 UTC; 1h 39min ago
      Until: Fri 2026-01-23 09:01:30 UTC; 1h 39min ago

Jan 23 09:01:30 np0005593293.novalocal systemd[1]: Reached target Cloud-config availability.

● cloud-init.target - Cloud-init target
     Loaded: loaded (/usr/lib/systemd/system/cloud-init.target; enabled-runtime; preset: disabled)
     Active: active since Fri 2026-01-23 09:01:31 UTC; 1h 39min ago
      Until: Fri 2026-01-23 09:01:31 UTC; 1h 39min ago

Jan 23 09:01:31 np0005593293.novalocal systemd[1]: Reached target Cloud-init target.

○ cryptsetup-pre.target - Local Encrypted Volumes (Pre)
     Loaded: loaded (/usr/lib/systemd/system/cryptsetup-pre.target; static)
     Active: inactive (dead)
       Docs: man:systemd.special(7)

● cryptsetup.target - Local Encrypted Volumes
     Loaded: loaded (/usr/lib/systemd/system/cryptsetup.target; static)
     Active: active since Fri 2026-01-23 09:01:22 UTC; 1h 40min ago
      Until: Fri 2026-01-23 09:01:22 UTC; 1h 40min ago
       Docs: man:systemd.special(7)

● edpm_libvirt.target
     Loaded: loaded (/etc/systemd/system/edpm_libvirt.target; static)
     Active: active since Fri 2026-01-23 10:07:50 UTC; 33min ago
      Until: Fri 2026-01-23 10:07:50 UTC; 33min ago

Jan 23 10:07:50 compute-0 systemd[1]: Reached target edpm_libvirt.target.

○ emergency.target - Emergency Mode
     Loaded: loaded (/usr/lib/systemd/system/emergency.target; static)
     Active: inactive (dead)
       Docs: man:systemd.special(7)

○ first-boot-complete.target - First Boot Complete
     Loaded: loaded (/usr/lib/systemd/system/first-boot-complete.target; static)
     Active: inactive (dead)
  Condition: start condition failed at Fri 2026-01-23 09:01:23 UTC; 1h 40min ago
       Docs: man:systemd.special(7)

Jan 23 09:01:23 localhost systemd[1]: First Boot Complete was skipped because of an unmet condition check (ConditionFirstBoot=yes).

○ getty-pre.target - Preparation for Logins
     Loaded: loaded (/usr/lib/systemd/system/getty-pre.target; static)
     Active: inactive (dead)
       Docs: man:systemd.special(7)
             man:systemd-getty-generator(8)
             http://0pointer.de/blog/projects/serial-console.html

● getty.target - Login Prompts
     Loaded: loaded (/usr/lib/systemd/system/getty.target; static)
     Active: active since Fri 2026-01-23 09:01:30 UTC; 1h 39min ago
      Until: Fri 2026-01-23 09:01:30 UTC; 1h 39min ago
       Docs: man:systemd.special(7)
             man:systemd-getty-generator(8)
             http://0pointer.de/blog/projects/serial-console.html

Jan 23 09:01:30 np0005593293.novalocal systemd[1]: Reached target Login Prompts.

○ graphical.target - Graphical Interface
     Loaded: loaded (/usr/lib/systemd/system/graphical.target; static)
     Active: inactive (dead)
       Docs: man:systemd.special(7)

○ initrd-fs.target - Initrd File Systems
     Loaded: loaded (/usr/lib/systemd/system/initrd-fs.target; static)
     Active: inactive (dead) since Fri 2026-01-23 09:01:22 UTC; 1h 40min ago
       Docs: man:systemd.special(7)

Jan 23 09:01:21 localhost systemd[1]: Reached target Initrd File Systems.

○ initrd-root-device.target - Initrd Root Device
     Loaded: loaded (/usr/lib/systemd/system/initrd-root-device.target; static)
     Active: inactive (dead) since Fri 2026-01-23 09:01:22 UTC; 1h 40min ago
       Docs: man:systemd.special(7)

Jan 23 09:01:21 localhost systemd[1]: Reached target Initrd Root Device.
Jan 23 09:01:22 localhost systemd[1]: Stopped target Initrd Root Device.

○ initrd-root-fs.target - Initrd Root File System
     Loaded: loaded (/usr/lib/systemd/system/initrd-root-fs.target; static)
     Active: inactive (dead) since Fri 2026-01-23 09:01:22 UTC; 1h 40min ago
       Docs: man:systemd.special(7)

Jan 23 09:01:21 localhost systemd[1]: Reached target Initrd Root File System.

○ initrd-switch-root.target - Switch Root
     Loaded: loaded (/usr/lib/systemd/system/initrd-switch-root.target; static)
     Active: inactive (dead) since Fri 2026-01-23 09:01:22 UTC; 1h 40min ago

Jan 23 09:01:22 localhost systemd[1]: Reached target Switch Root.

○ initrd-usr-fs.target - Initrd /usr File System
     Loaded: loaded (/usr/lib/systemd/system/initrd-usr-fs.target; static)
     Active: inactive (dead) since Fri 2026-01-23 09:01:22 UTC; 1h 40min ago
       Docs: man:systemd.special(7)

Jan 23 09:01:22 localhost systemd[1]: Stopped target Initrd /usr File System.
Notice: journal has been rotated since unit was started, output may be incomplete.

○ initrd.target - Initrd Default Target
     Loaded: loaded (/usr/lib/systemd/system/initrd.target; static)
     Active: inactive (dead) since Fri 2026-01-23 09:01:22 UTC; 1h 40min ago
       Docs: man:systemd.special(7)

Jan 23 09:01:21 localhost systemd[1]: Reached target Initrd Default Target.
Jan 23 09:01:22 localhost systemd[1]: Stopped target Initrd Default Target.

● integritysetup.target - Local Integrity Protected Volumes
     Loaded: loaded (/usr/lib/systemd/system/integritysetup.target; static)
     Active: active since Fri 2026-01-23 09:01:22 UTC; 1h 40min ago
      Until: Fri 2026-01-23 09:01:22 UTC; 1h 40min ago
       Docs: man:systemd.special(7)

● local-fs-pre.target - Preparation for Local File Systems
     Loaded: loaded (/usr/lib/systemd/system/local-fs-pre.target; static)
     Active: active since Fri 2026-01-23 09:01:23 UTC; 1h 40min ago
      Until: Fri 2026-01-23 09:01:23 UTC; 1h 40min ago
       Docs: man:systemd.special(7)

Jan 23 09:01:23 localhost systemd[1]: Reached target Preparation for Local File Systems.

● local-fs.target - Local File Systems
     Loaded: loaded (/usr/lib/systemd/system/local-fs.target; static)
     Active: active since Fri 2026-01-23 09:01:23 UTC; 1h 40min ago
      Until: Fri 2026-01-23 09:01:23 UTC; 1h 40min ago
       Docs: man:systemd.special(7)

Jan 23 09:01:23 localhost systemd[1]: Reached target Local File Systems.

● multi-user.target - Multi-User System
     Loaded: loaded (/usr/lib/systemd/system/multi-user.target; indirect; preset: disabled)
     Active: active since Fri 2026-01-23 09:01:30 UTC; 1h 39min ago
      Until: Fri 2026-01-23 09:01:30 UTC; 1h 39min ago
       Docs: man:systemd.special(7)

Jan 23 09:01:30 np0005593293.novalocal systemd[1]: Reached target Multi-User System.

● network-online.target - Network is Online
     Loaded: loaded (/usr/lib/systemd/system/network-online.target; static)
     Active: active since Fri 2026-01-23 09:01:30 UTC; 1h 39min ago
      Until: Fri 2026-01-23 09:01:30 UTC; 1h 39min ago
       Docs: man:systemd.special(7)
             https://systemd.io/NETWORK_ONLINE

Jan 23 09:01:30 np0005593293.novalocal systemd[1]: Reached target Network is Online.

● network-pre.target - Preparation for Network
     Loaded: loaded (/usr/lib/systemd/system/network-pre.target; static)
     Active: active since Fri 2026-01-23 09:01:24 UTC; 1h 40min ago
      Until: Fri 2026-01-23 09:01:24 UTC; 1h 40min ago
       Docs: man:systemd.special(7)
             https://systemd.io/NETWORK_ONLINE

Jan 23 09:01:24 np0005593293.novalocal systemd[1]: Reached target Preparation for Network.

● network.target - Network
     Loaded: loaded (/usr/lib/systemd/system/network.target; static)
     Active: active since Fri 2026-01-23 09:01:24 UTC; 1h 40min ago
      Until: Fri 2026-01-23 09:01:24 UTC; 1h 40min ago
       Docs: man:systemd.special(7)
             https://systemd.io/NETWORK_ONLINE

Jan 23 09:01:24 np0005593293.novalocal systemd[1]: Reached target Network.

● nfs-client.target - NFS client services
     Loaded: loaded (/usr/lib/systemd/system/nfs-client.target; enabled; preset: disabled)
     Active: active since Fri 2026-01-23 09:01:24 UTC; 1h 40min ago
      Until: Fri 2026-01-23 09:01:24 UTC; 1h 40min ago

Jan 23 09:01:24 np0005593293.novalocal systemd[1]: Reached target NFS client services.

○ nss-lookup.target - Host and Network Name Lookups
     Loaded: loaded (/usr/lib/systemd/system/nss-lookup.target; static)
     Active: inactive (dead)
       Docs: man:systemd.special(7)

● nss-user-lookup.target - User and Group Name Lookups
     Loaded: loaded (/usr/lib/systemd/system/nss-user-lookup.target; static)
     Active: active since Fri 2026-01-23 09:01:23 UTC; 1h 40min ago
      Until: Fri 2026-01-23 09:01:23 UTC; 1h 40min ago
       Docs: man:systemd.special(7)

Jan 23 09:01:23 localhost systemd[1]: Reached target User and Group Name Lookups.

● paths.target - Path Units
     Loaded: loaded (/usr/lib/systemd/system/paths.target; static)
     Active: active since Fri 2026-01-23 09:01:22 UTC; 1h 40min ago
      Until: Fri 2026-01-23 09:01:22 UTC; 1h 40min ago
       Docs: man:systemd.special(7)

○ remote-cryptsetup.target - Remote Encrypted Volumes
     Loaded: loaded (/usr/lib/systemd/system/remote-cryptsetup.target; disabled; preset: enabled)
     Active: inactive (dead) since Fri 2026-01-23 09:01:22 UTC; 1h 40min ago
       Docs: man:systemd.special(7)

Jan 23 09:01:21 localhost systemd[1]: Reached target Remote Encrypted Volumes.
Jan 23 09:01:22 localhost systemd[1]: Stopped target Remote Encrypted Volumes.

● remote-fs-pre.target - Preparation for Remote File Systems
     Loaded: loaded (/usr/lib/systemd/system/remote-fs-pre.target; static)
     Active: active since Fri 2026-01-23 09:01:24 UTC; 1h 40min ago
      Until: Fri 2026-01-23 09:01:24 UTC; 1h 40min ago
       Docs: man:systemd.special(7)

Jan 23 09:01:24 np0005593293.novalocal systemd[1]: Reached target Preparation for Remote File Systems.

● remote-fs.target - Remote File Systems
     Loaded: loaded (/usr/lib/systemd/system/remote-fs.target; enabled; preset: enabled)
     Active: active since Fri 2026-01-23 09:01:24 UTC; 1h 40min ago
      Until: Fri 2026-01-23 09:01:24 UTC; 1h 40min ago
       Docs: man:systemd.special(7)

Jan 23 09:01:24 np0005593293.novalocal systemd[1]: Reached target Remote File Systems.

○ remote-veritysetup.target - Remote Verity Protected Volumes
     Loaded: loaded (/usr/lib/systemd/system/remote-veritysetup.target; disabled; preset: disabled)
     Active: inactive (dead)
       Docs: man:systemd.special(7)

○ rescue.target - Rescue Mode
     Loaded: loaded (/usr/lib/systemd/system/rescue.target; static)
     Active: inactive (dead)
       Docs: man:systemd.special(7)

● rpc_pipefs.target
     Loaded: loaded (/usr/lib/systemd/system/rpc_pipefs.target; static)
     Active: active since Fri 2026-01-23 09:01:22 UTC; 1h 40min ago
      Until: Fri 2026-01-23 09:01:22 UTC; 1h 40min ago

● rpcbind.target - RPC Port Mapper
     Loaded: loaded (/usr/lib/systemd/system/rpcbind.target; static)
     Active: active since Fri 2026-01-23 09:01:22 UTC; 1h 40min ago
      Until: Fri 2026-01-23 09:01:22 UTC; 1h 40min ago
       Docs: man:systemd.special(7)

○ shutdown.target - System Shutdown
     Loaded: loaded (/usr/lib/systemd/system/shutdown.target; static)
     Active: inactive (dead)
       Docs: man:systemd.special(7)

● slices.target - Slice Units
     Loaded: loaded (/usr/lib/systemd/system/slices.target; static)
     Active: active since Fri 2026-01-23 09:01:22 UTC; 1h 40min ago
      Until: Fri 2026-01-23 09:01:22 UTC; 1h 40min ago
       Docs: man:systemd.special(7)

● sockets.target - Socket Units
     Loaded: loaded (/usr/lib/systemd/system/sockets.target; static)
     Active: active since Fri 2026-01-23 09:01:23 UTC; 1h 40min ago
      Until: FrUnit syslog.target could not be found.
i 2026-01-23 09:01:23 UTC; 1h 40min ago
       Docs: man:systemd.special(7)

Jan 23 09:01:23 localhost systemd[1]: Reached target Socket Units.

● sshd-keygen.target
     Loaded: loaded (/usr/lib/systemd/system/sshd-keygen.target; static)
     Active: active since Fri 2026-01-23 10:05:45 UTC; 35min ago
      Until: Fri 2026-01-23 10:05:45 UTC; 35min ago

Jan 23 10:05:45 compute-0 systemd[1]: Reached target sshd-keygen.target.

● swap.target - Swaps
     Loaded: loaded (/usr/lib/systemd/system/swap.target; static)
     Active: active since Fri 2026-01-23 09:01:22 UTC; 1h 40min ago
      Until: Fri 2026-01-23 09:01:22 UTC; 1h 40min ago
       Docs: man:systemd.special(7)

● sysinit.target - System Initialization
     Loaded: loaded (/usr/lib/systemd/system/sysinit.target; static)
     Active: active since Fri 2026-01-23 09:01:23 UTC; 1h 40min ago
      Until: Fri 2026-01-23 09:01:23 UTC; 1h 40min ago
       Docs: man:systemd.special(7)

Jan 23 09:01:23 localhost systemd[1]: Reached target System Initialization.

● time-set.target - System Time Set
     Loaded: loaded (/usr/lib/systemd/system/time-set.target; static)
     Active: active since Fri 2026-01-23 09:47:37 UTC; 53min ago
      Until: Fri 2026-01-23 09:47:37 UTC; 53min ago
       Docs: man:systemd.special(7)

Jan 23 09:47:37 compute-0 systemd[1]: Reached target System Time Set.

● time-sync.target - System Time Synchronized
     Loaded: loaded (/usr/lib/systemd/system/time-sync.target; static)
     Active: active since Fri 2026-01-23 09:47:37 UTC; 53min ago
      Until: Fri 2026-01-23 09:47:37 UTC; 53min ago
       Docs: man:systemd.special(7)

Jan 23 09:47:37 compute-0 systemd[1]: Reached target System Time Synchronized.

● timers.target - Timer Units
     Loaded: loaded (/usr/lib/systemd/system/timers.target; static)
     Active: active since Fri 2026-01-23 09:01:23 UTC; 1h 40min ago
      Until: Fri 2026-01-23 09:01:23 UTC; 1h 40min ago
       Docs: man:systemd.special(7)

Jan 23 09:01:23 localhost systemd[1]: Reached target Timer Units.

○ umount.target - Unmount All Filesystems
     Loaded: loaded (/usr/lib/systemd/system/umount.target; static)
     Active: inactive (dead)
       Docs: man:systemd.special(7)

○ veritysetup-pre.target - Local Verity Protected Volumes (Pre)
     Loaded: loaded (/usr/lib/systemd/system/veritysetup-pre.target; static)
     Active: inactive (dead)
       Docs: man:systemd.special(7)

● veritysetup.target - Local Verity Protected Volumes
     Loaded: loaded (/usr/lib/systemd/system/veritysetup.target; static)
     Active: active since Fri 2026-01-23 09:01:22 UTC; 1h 40min ago
      Until: Fri 2026-01-23 09:01:22 UTC; 1h 40min ago
       Docs: man:systemd.special(7)

○ virt-guest-shutdown.target - Libvirt guests shutdown
     Loaded: loaded (/etc/systemd/system/virt-guest-shutdown.target; static)
     Active: inactive (dead)
       Docs: https://libvirt.org

● 7c4b1914e1e86e16566f40dac1c2043d119deee57a046ec037c84640bd0c067d-11d4d8d4c83d38a3.timer - /usr/bin/podman healthcheck run 7c4b1914e1e86e16566f40dac1c2043d119deee57a046ec037c84640bd0c067d
     Loaded: loaded (/run/systemd/transient/7c4b1914e1e86e16566f40dac1c2043d119deee57a046ec037c84640bd0c067d-11d4d8d4c83d38a3.timer; transient)
  Transient: yes
     Active: active (waiting) since Fri 2026-01-23 10:02:57 UTC; 38min ago
      Until: Fri 2026-01-23 10:02:57 UTC; 38min ago
    Trigger: Fri 2026-01-23 10:41:56 UTC; 29s left
   Triggers: ● 7c4b1914e1e86e16566f40dac1c2043d119deee57a046ec037c84640bd0c067d-11d4d8d4c83d38a3.service

Jan 23 10:02:57 compute-0 systemd[1]: Started /usr/bin/podman healthcheck run 7c4b1914e1e86e16566f40dac1c2043d119deee57a046ec037c84640bd0c067d.

● dnf-makecache.timer - dnf makecache --timer
     Loaded: loaded (/usr/lib/systemd/system/dnf-makecache.timer; enabled; preset: enabled)
     Active: active (waiting) since Fri 2026-01-23 09:01:23 UTC; 1h 40min ago
      Until: Fri 2026-01-23 09:01:23 UTC; 1h 40min ago
    Trigger: Fri 2026-01-23 11:03:09 UTC; 21min left
   Triggers: ● dnf-makecache.service

Jan 23 09:01:23 localhost systemd[1]: Started dnf makecache --timer.

● ffd3fd97eb8ccb69f06ea21df042fbfc8784045b6313bea6a684bfa168f1196d-47444066448a439e.timer - /usr/bin/podman healthcheck run ffd3fd97eb8ccb69f06ea21df042fbfc8784045b6313bea6a684bfa168f1196d
     Loaded: loaded (/run/systemd/transient/ffd3fd97eb8ccb69f06ea21df042fbfc8784045b6313bea6a684bfa168f1196d-47444066448a439e.timer; transient)
  Transient: yes
     Active: active (waiting) since Fri 2026-01-23 10:01:34 UTC; 39min ago
      Until: Fri 2026-01-23 10:01:34 UTC; 39min ago
    Trigger: Fri 2026-01-23 10:41:51 UTC; 23s left
   Triggers: ● ffd3fd97eb8ccb69f06ea21df042fbfc8784045b6313bea6a684bfa168f1196d-47444066448a439e.service

Jan 23 10:01:34 compute-0 systemd[1]: Started /usr/bin/podman healthcheck run ffd3fd97eb8ccb69f06ea21df042fbfc8784045b6313bea6a684bfa168f1196d.

● logrotate.timer - Daily rotation of log files
     Loaded: loaded (/usr/lib/systemd/system/logrotate.timer; enabled; preset: enabled)
     Active: active (waiting) since Fri 2026-01-23 09:01:23 UTC; 1h 40min ago
      Until: Fri 2026-01-23 09:01:23 UTC; 1h 40min ago
    Trigger: Sat 2026-01-24 00:00:00 UTC; 13h left
   Triggers: ● logrotate.service
       Docs: man:logrotate(8)
             man:logrotate.conf(5)

Jan 23 09:01:23 localhost systemd[1]: Started Daily rotation of log files.

○ raid-check.timer - Weekly RAID setup health check
     Loaded: loaded (/usr/lib/systemd/system/raid-check.timer; enabled; preset: enabled)
     Active: inactive (dead)
    Trigger: n/a
   Triggers: ● raid-check.service

○ sysstat-collect.timer - Run system activity accounting tool every 10 minutes
     Loaded: loaded (/usr/lib/systemd/system/sysstat-collect.timer; enabled; preset: disabled)
     Active: inactive (dead)
    Trigger: n/a
   Triggers: ● sysstat-collect.service

○ sysstat-summary.timer - Generate summary of yesterday's process accounting
     Loaded: loaded (/usr/lib/systemd/system/sysstat-summary.timer; enabled; preset: disabled)
     Active: inactive (dead)
    Trigger: n/a
   Triggers: ● sysstat-summary.service

● systemd-tmpfiles-clean.timer - Daily Cleanup of Temporary Directories
     Loaded: loaded (/usr/lib/systemd/system/systemd-tmpfiles-clean.timer; static)
     Active: active (waiting) since Fri 2026-01-23 09:01:23 UTC; 1h 40min ago
      Until: Fri 2026-01-23 09:01:23 UTC; 1h 40min ago
    Trigger: Sat 2026-01-24 09:16:18 UTC; 22h left
   Triggers: ● systemd-tmpfiles-clean.service
       Docs: man:tmpfiles.d(5)
             man:systemd-tmpfiles(8)

Jan 23 09:01:23 localhost systemd[1]: Started Daily Cleanup of Temporary Directories.

● unbound-anchor.timer - daily update of the root trust anchor for DNSSEC
     Loaded: loaded (/usr/lib/systemd/system/unbound-anchor.timer; enabled; preset: enabled)
     Active: active (waiting) since Fri 2026-01-23 09:42:17 UTC; 59min ago
      Until: Fri 2026-01-23 09:42:17 UTC; 59min ago
    Trigger: Sat 2026-01-24 00:00:00 UTC; 13h left
   Triggers: ● unbound-anchor.service
       Docs: man:unbound-anchor(8)

Jan 23 09:42:17 compute-0 systemd[1]: Started daily update of the root trust anchor for DNSSEC.
