● compute-0
    State: running
    Units: 455 loaded (incl. loaded aliases)
     Jobs: 0 queued
   Failed: 0 units
    Since: Mon 2026-02-02 09:00:44 UTC; 1h 24min ago
  systemd: 252-64.el9
   CGroup: /
           ├─297147 turbostat --debug sleep 10
           ├─297154 sleep 10
           ├─init.scope
           │ └─1 /usr/lib/systemd/systemd --switched-root --system --deserialize 31
           ├─machine.slice
           │ ├─libpod-29bea7eb4976451e3dfdb1e9a3aba30b10e64cbce131bf8d09548b4a224f8ee8.scope
           │ │ └─container
           │ │   ├─165361 dumb-init --single-child -- kolla_start
           │ │   ├─165364 "neutron-ovn-metadata-agent (/usr/bin/python3 /usr/bin/neutron-ovn-metadata-agent)"
           │ │   ├─165809 "neutron-ovn-metadata-agent (/usr/bin/python3 /usr/bin/neutron-ovn-metadata-agent)"
           │ │   ├─166028 /usr/bin/python3 /bin/privsep-helper --config-file /etc/neutron/neutron.conf --config-dir /etc/neutron.conf.d --privsep_context neutron.privileged.namespace_cmd --privsep_sock_path /tmp/tmp_26sdv4_/privsep.sock
           │ │   ├─257524 /usr/bin/python3 /bin/privsep-helper --config-file /etc/neutron/neutron.conf --config-dir /etc/neutron.conf.d --privsep_context neutron.privileged.default --privsep_sock_path /tmp/tmpq3jugzpc/privsep.sock
           │ │   └─257582 /usr/bin/python3 /bin/privsep-helper --config-file /etc/neutron/neutron.conf --config-dir /etc/neutron.conf.d --privsep_context neutron.privileged.link_cmd --privsep_sock_path /tmp/tmpjv70h06s/privsep.sock
           │ ├─libpod-46ea9bbd396be6c9cf42e800f6ecffbc7ecf6b0a7dd6f731e2744db95bbe8ea3.scope
           │ │ └─container
           │ │   ├─252674 dumb-init --single-child -- kolla_start
           │ │   ├─252676 /usr/bin/python3 /usr/bin/nova-compute
           │ │   ├─257381 /usr/bin/python3 /bin/privsep-helper --config-file /etc/nova/nova.conf --config-file /etc/nova/nova-compute.conf --config-dir /etc/nova/nova.conf.d --privsep_context vif_plug_ovs.privsep.vif_plug --privsep_sock_path /tmp/tmpwhhqa15o/privsep.sock
           │ │   └─258300 /usr/bin/python3 /bin/privsep-helper --config-file /etc/nova/nova.conf --config-file /etc/nova/nova-compute.conf --config-dir /etc/nova/nova.conf.d --privsep_context nova.privsep.sys_admin_pctxt --privsep_sock_path /tmp/tmp683yhkaj/privsep.sock
           │ └─libpod-e39122d9482da8df802204ab6c35fb7c982874580968140e6f06bdfc8eefae36.scope
           │   └─container
           │     ├─154633 dumb-init --single-child -- kolla_start
           │     └─154636 /usr/bin/ovn-controller --pidfile unix:/run/openvswitch/db.sock -p /etc/pki/tls/private/ovndb.key -c /etc/pki/tls/certs/ovndb.crt -C /etc/pki/tls/certs/ovndbca.crt
           ├─system.slice
           │ ├─NetworkManager.service
           │ │ └─49024 /usr/sbin/NetworkManager --no-daemon
           │ ├─auditd.service
           │ │ ├─701 /sbin/auditd
           │ │ └─703 /usr/sbin/sedispatch
           │ ├─chronyd.service
           │ │ └─58590 /usr/sbin/chronyd -F 2
           │ ├─crond.service
           │ │ └─1009 /usr/sbin/crond -n
           │ ├─dbus-broker.service
           │ │ ├─772 /usr/bin/dbus-broker-launch --scope system --audit
           │ │ └─780 dbus-broker --log 4 --controller 9 --machine-id bf0bc0bb03de29b24cba1cc9599cf5d0 --max-bytes 536870912 --max-fds 4096 --max-matches 131072 --audit
           │ ├─edpm_nova_compute.service
           │ │ └─252672 /usr/bin/conmon --api-version 1 -c 46ea9bbd396be6c9cf42e800f6ecffbc7ecf6b0a7dd6f731e2744db95bbe8ea3 -u 46ea9bbd396be6c9cf42e800f6ecffbc7ecf6b0a7dd6f731e2744db95bbe8ea3 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/46ea9bbd396be6c9cf42e800f6ecffbc7ecf6b0a7dd6f731e2744db95bbe8ea3/userdata -p /run/containers/storage/overlay-containers/46ea9bbd396be6c9cf42e800f6ecffbc7ecf6b0a7dd6f731e2744db95bbe8ea3/userdata/pidfile -n nova_compute --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/46ea9bbd396be6c9cf42e800f6ecffbc7ecf6b0a7dd6f731e2744db95bbe8ea3 --full-attach -s -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/46ea9bbd396be6c9cf42e800f6ecffbc7ecf6b0a7dd6f731e2744db95bbe8ea3/userdata/oci-log --conmon-pidfile /run/nova_compute.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg 46ea9bbd396be6c9cf42e800f6ecffbc7ecf6b0a7dd6f731e2744db95bbe8ea3
           │ ├─edpm_ovn_controller.service
           │ │ └─154631 /usr/bin/conmon --api-version 1 -c e39122d9482da8df802204ab6c35fb7c982874580968140e6f06bdfc8eefae36 -u e39122d9482da8df802204ab6c35fb7c982874580968140e6f06bdfc8eefae36 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/e39122d9482da8df802204ab6c35fb7c982874580968140e6f06bdfc8eefae36/userdata -p /run/containers/storage/overlay-containers/e39122d9482da8df802204ab6c35fb7c982874580968140e6f06bdfc8eefae36/userdata/pidfile -n ovn_controller --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/e39122d9482da8df802204ab6c35fb7c982874580968140e6f06bdfc8eefae36 --full-attach -s -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/e39122d9482da8df802204ab6c35fb7c982874580968140e6f06bdfc8eefae36/userdata/oci-log --conmon-pidfile /run/ovn_controller.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg e39122d9482da8df802204ab6c35fb7c982874580968140e6f06bdfc8eefae36
           │ ├─edpm_ovn_metadata_agent.service
           │ │ └─165359 /usr/bin/conmon --api-version 1 -c 29bea7eb4976451e3dfdb1e9a3aba30b10e64cbce131bf8d09548b4a224f8ee8 -u 29bea7eb4976451e3dfdb1e9a3aba30b10e64cbce131bf8d09548b4a224f8ee8 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/29bea7eb4976451e3dfdb1e9a3aba30b10e64cbce131bf8d09548b4a224f8ee8/userdata -p /run/containers/storage/overlay-containers/29bea7eb4976451e3dfdb1e9a3aba30b10e64cbce131bf8d09548b4a224f8ee8/userdata/pidfile -n ovn_metadata_agent --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/29bea7eb4976451e3dfdb1e9a3aba30b10e64cbce131bf8d09548b4a224f8ee8 --full-attach -s -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/29bea7eb4976451e3dfdb1e9a3aba30b10e64cbce131bf8d09548b4a224f8ee8/userdata/oci-log --conmon-pidfile /run/ovn_metadata_agent.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg 29bea7eb4976451e3dfdb1e9a3aba30b10e64cbce131bf8d09548b4a224f8ee8
           │ ├─gssproxy.service
           │ │ └─879 /usr/sbin/gssproxy -D
           │ ├─irqbalance.service
           │ │ └─788 /usr/sbin/irqbalance
           │ ├─iscsid.service
           │ │ └─236646 /usr/sbin/iscsid -f
           │ ├─multipathd.service
           │ │ └─236888 /sbin/multipathd -d -s
           │ ├─ovs-vswitchd.service
           │ │ └─47331 ovs-vswitchd unix:/var/run/openvswitch/db.sock -vconsole:emer -vsyslog:err -vfile:info --mlockall --user openvswitch:hugetlbfs --no-chdir --log-file=/var/log/openvswitch/ovs-vswitchd.log --pidfile=/var/run/openvswitch/ovs-vswitchd.pid --detach
           │ ├─ovsdb-server.service
           │ │ └─47250 ovsdb-server /etc/openvswitch/conf.db -vconsole:emer -vsyslog:err -vfile:info --remote=punix:/var/run/openvswitch/db.sock --private-key=db:Open_vSwitch,SSL,private_key --certificate=db:Open_vSwitch,SSL,certificate --bootstrap-ca-cert=db:Open_vSwitch,SSL,ca_cert --user openvswitch:hugetlbfs --no-chdir --log-file=/var/log/openvswitch/ovsdb-server.log --pidfile=/var/run/openvswitch/ovsdb-server.pid --detach
           │ ├─polkit.service
           │ │ └─43515 /usr/lib/polkit-1/polkitd --no-debug
           │ ├─rpcbind.service
           │ │ └─699 /usr/bin/rpcbind -w -f
           │ ├─rsyslog.service
           │ │ └─1005 /usr/sbin/rsyslogd -n
           │ ├─sshd.service
           │ │ └─192944 "sshd: /usr/sbin/sshd -D [listener] 0 of 10-100 startups"
           │ ├─system-ceph\x2dd241d473\x2d9fcb\x2d5f74\x2db163\x2df1ca4454e7f1.slice
           │ │ ├─ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1@alertmanager.compute-0.service
           │ │ │ ├─libpod-payload-63a3970896ab11bd3cbece1b971a05452b0bd8a5b643f0eac52d5b3639ab19c4
           │ │ │ │ ├─104368 /run/podman-init -- /bin/alertmanager --cluster.listen-address=:9094 --web.listen-address=192.168.122.100:9093 --config.file=/etc/alertmanager/alertmanager.yml
           │ │ │ │ └─104370 /bin/alertmanager --cluster.listen-address=:9094 --web.listen-address=192.168.122.100:9093 --config.file=/etc/alertmanager/alertmanager.yml
           │ │ │ └─runtime
           │ │ │   └─104366 /usr/bin/conmon --api-version 1 -c 63a3970896ab11bd3cbece1b971a05452b0bd8a5b643f0eac52d5b3639ab19c4 -u 63a3970896ab11bd3cbece1b971a05452b0bd8a5b643f0eac52d5b3639ab19c4 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/63a3970896ab11bd3cbece1b971a05452b0bd8a5b643f0eac52d5b3639ab19c4/userdata -p /run/containers/storage/overlay-containers/63a3970896ab11bd3cbece1b971a05452b0bd8a5b643f0eac52d5b3639ab19c4/userdata/pidfile -n ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1-alertmanager-compute-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/63a3970896ab11bd3cbece1b971a05452b0bd8a5b643f0eac52d5b3639ab19c4 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/63a3970896ab11bd3cbece1b971a05452b0bd8a5b643f0eac52d5b3639ab19c4/userdata/oci-log --conmon-pidfile /run/ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1@alertmanager.compute-0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 63a3970896ab11bd3cbece1b971a05452b0bd8a5b643f0eac52d5b3639ab19c4
           │ │ ├─ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1@crash.compute-0.service
           │ │ │ ├─libpod-payload-318ef38b81cae6eaaebf216bc863b04a4bef5216fba3bfba4e81b73ac8904bc2
           │ │ │ │ ├─79741 /run/podman-init -- /usr/bin/ceph-crash -n client.crash.compute-0
           │ │ │ │ └─79743 /usr/bin/python3 -s /usr/bin/ceph-crash -n client.crash.compute-0
           │ │ │ └─runtime
           │ │ │   └─79739 /usr/bin/conmon --api-version 1 -c 318ef38b81cae6eaaebf216bc863b04a4bef5216fba3bfba4e81b73ac8904bc2 -u 318ef38b81cae6eaaebf216bc863b04a4bef5216fba3bfba4e81b73ac8904bc2 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/318ef38b81cae6eaaebf216bc863b04a4bef5216fba3bfba4e81b73ac8904bc2/userdata -p /run/containers/storage/overlay-containers/318ef38b81cae6eaaebf216bc863b04a4bef5216fba3bfba4e81b73ac8904bc2/userdata/pidfile -n ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1-crash-compute-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/318ef38b81cae6eaaebf216bc863b04a4bef5216fba3bfba4e81b73ac8904bc2 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/318ef38b81cae6eaaebf216bc863b04a4bef5216fba3bfba4e81b73ac8904bc2/userdata/oci-log --conmon-pidfile /run/ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1@crash.compute-0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 318ef38b81cae6eaaebf216bc863b04a4bef5216fba3bfba4e81b73ac8904bc2
           │ │ ├─ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1@grafana.compute-0.service
           │ │ │ ├─libpod-payload-207575e5b32cec4e058e0ca4f48bad94c6e447fc9aa765a0aa8117f4604125b2
           │ │ │ │ ├─104675 /run/podman-init -- /run.sh
           │ │ │ │ └─104677 grafana server --homepath=/usr/share/grafana --config=/etc/grafana/grafana.ini --packaging=docker cfg:default.log.mode=console cfg:default.paths.data=/var/lib/grafana cfg:default.paths.logs=/var/log/grafana cfg:default.paths.plugins=/var/lib/grafana/plugins cfg:default.paths.provisioning=/etc/grafana/provisioning
           │ │ │ └─runtime
           │ │ │   └─104673 /usr/bin/conmon --api-version 1 -c 207575e5b32cec4e058e0ca4f48bad94c6e447fc9aa765a0aa8117f4604125b2 -u 207575e5b32cec4e058e0ca4f48bad94c6e447fc9aa765a0aa8117f4604125b2 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/207575e5b32cec4e058e0ca4f48bad94c6e447fc9aa765a0aa8117f4604125b2/userdata -p /run/containers/storage/overlay-containers/207575e5b32cec4e058e0ca4f48bad94c6e447fc9aa765a0aa8117f4604125b2/userdata/pidfile -n ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1-grafana-compute-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/207575e5b32cec4e058e0ca4f48bad94c6e447fc9aa765a0aa8117f4604125b2 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/207575e5b32cec4e058e0ca4f48bad94c6e447fc9aa765a0aa8117f4604125b2/userdata/oci-log --conmon-pidfile /run/ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1@grafana.compute-0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 207575e5b32cec4e058e0ca4f48bad94c6e447fc9aa765a0aa8117f4604125b2
           │ │ ├─ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1@haproxy.nfs.cephfs.compute-0.ooxkuo.service
           │ │ │ ├─libpod-payload-5812768ed72c0881a5b563a239565cde81bb05b6f1e1beebab3f203681cce03e
           │ │ │ │ ├─97798 /run/podman-init -- docker-entrypoint.sh haproxy -f /var/lib/haproxy/haproxy.cfg
           │ │ │ │ ├─97800 haproxy -W -db -f /var/lib/haproxy/haproxy.cfg
           │ │ │ │ └─97802 haproxy -W -db -f /var/lib/haproxy/haproxy.cfg
           │ │ │ └─runtime
           │ │ │   └─97796 /usr/bin/conmon --api-version 1 -c 5812768ed72c0881a5b563a239565cde81bb05b6f1e1beebab3f203681cce03e -u 5812768ed72c0881a5b563a239565cde81bb05b6f1e1beebab3f203681cce03e -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/5812768ed72c0881a5b563a239565cde81bb05b6f1e1beebab3f203681cce03e/userdata -p /run/containers/storage/overlay-containers/5812768ed72c0881a5b563a239565cde81bb05b6f1e1beebab3f203681cce03e/userdata/pidfile -n ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1-haproxy-nfs-cephfs-compute-0-ooxkuo --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/5812768ed72c0881a5b563a239565cde81bb05b6f1e1beebab3f203681cce03e --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/5812768ed72c0881a5b563a239565cde81bb05b6f1e1beebab3f203681cce03e/userdata/oci-log --conmon-pidfile /run/ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1@haproxy.nfs.cephfs.compute-0.ooxkuo.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 5812768ed72c0881a5b563a239565cde81bb05b6f1e1beebab3f203681cce03e
           │ │ ├─ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1@haproxy.rgw.default.compute-0.avekxu.service
           │ │ │ ├─libpod-payload-19feecaa7fcd517b2bfb973fc8fcf623ad4b0956f16c53292d24fe12f4780190
           │ │ │ │ ├─90405 /run/podman-init -- docker-entrypoint.sh haproxy -f /var/lib/haproxy/haproxy.cfg
           │ │ │ │ ├─90407 haproxy -W -db -f /var/lib/haproxy/haproxy.cfg
           │ │ │ │ └─90409 haproxy -W -db -f /var/lib/haproxy/haproxy.cfg
           │ │ │ └─runtime
           │ │ │   └─90403 /usr/bin/conmon --api-version 1 -c 19feecaa7fcd517b2bfb973fc8fcf623ad4b0956f16c53292d24fe12f4780190 -u 19feecaa7fcd517b2bfb973fc8fcf623ad4b0956f16c53292d24fe12f4780190 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/19feecaa7fcd517b2bfb973fc8fcf623ad4b0956f16c53292d24fe12f4780190/userdata -p /run/containers/storage/overlay-containers/19feecaa7fcd517b2bfb973fc8fcf623ad4b0956f16c53292d24fe12f4780190/userdata/pidfile -n ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1-haproxy-rgw-default-compute-0-avekxu --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/19feecaa7fcd517b2bfb973fc8fcf623ad4b0956f16c53292d24fe12f4780190 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/19feecaa7fcd517b2bfb973fc8fcf623ad4b0956f16c53292d24fe12f4780190/userdata/oci-log --conmon-pidfile /run/ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1@haproxy.rgw.default.compute-0.avekxu.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 19feecaa7fcd517b2bfb973fc8fcf623ad4b0956f16c53292d24fe12f4780190
           │ │ ├─ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1@keepalived.nfs.cephfs.compute-0.pqolko.service
           │ │ │ ├─libpod-payload-5f2bbf7994e4a92479e9d1b19dfd01c3876abee624a9d2019b778a31c38bb173
           │ │ │ │ ├─98165 /run/podman-init -- ./init.sh
           │ │ │ │ ├─98167 /usr/sbin/keepalived -n -l -f /etc/keepalived/keepalived.conf
           │ │ │ │ └─98169 /usr/sbin/keepalived -n -l -f /etc/keepalived/keepalived.conf
           │ │ │ └─runtime
           │ │ │   └─98163 /usr/bin/conmon --api-version 1 -c 5f2bbf7994e4a92479e9d1b19dfd01c3876abee624a9d2019b778a31c38bb173 -u 5f2bbf7994e4a92479e9d1b19dfd01c3876abee624a9d2019b778a31c38bb173 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/5f2bbf7994e4a92479e9d1b19dfd01c3876abee624a9d2019b778a31c38bb173/userdata -p /run/containers/storage/overlay-containers/5f2bbf7994e4a92479e9d1b19dfd01c3876abee624a9d2019b778a31c38bb173/userdata/pidfile -n ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1-keepalived-nfs-cephfs-compute-0-pqolko --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/5f2bbf7994e4a92479e9d1b19dfd01c3876abee624a9d2019b778a31c38bb173 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/5f2bbf7994e4a92479e9d1b19dfd01c3876abee624a9d2019b778a31c38bb173/userdata/oci-log --conmon-pidfile /run/ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1@keepalived.nfs.cephfs.compute-0.pqolko.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 5f2bbf7994e4a92479e9d1b19dfd01c3876abee624a9d2019b778a31c38bb173
           │ │ ├─ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1@keepalived.rgw.default.compute-0.pxmjnp.service
           │ │ │ ├─libpod-payload-860a76d66e8eda9eaa418ca2983e711afeb6dee68d75c8d8ff31ac03764f810e
           │ │ │ │ ├─99765 /run/podman-init -- ./init.sh
           │ │ │ │ ├─99767 /usr/sbin/keepalived -n -l -f /etc/keepalived/keepalived.conf
           │ │ │ │ └─99769 /usr/sbin/keepalived -n -l -f /etc/keepalived/keepalived.conf
           │ │ │ └─runtime
           │ │ │   └─99763 /usr/bin/conmon --api-version 1 -c 860a76d66e8eda9eaa418ca2983e711afeb6dee68d75c8d8ff31ac03764f810e -u 860a76d66e8eda9eaa418ca2983e711afeb6dee68d75c8d8ff31ac03764f810e -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/860a76d66e8eda9eaa418ca2983e711afeb6dee68d75c8d8ff31ac03764f810e/userdata -p /run/containers/storage/overlay-containers/860a76d66e8eda9eaa418ca2983e711afeb6dee68d75c8d8ff31ac03764f810e/userdata/pidfile -n ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1-keepalived-rgw-default-compute-0-pxmjnp --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/860a76d66e8eda9eaa418ca2983e711afeb6dee68d75c8d8ff31ac03764f810e --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/860a76d66e8eda9eaa418ca2983e711afeb6dee68d75c8d8ff31ac03764f810e/userdata/oci-log --conmon-pidfile /run/ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1@keepalived.rgw.default.compute-0.pxmjnp.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 860a76d66e8eda9eaa418ca2983e711afeb6dee68d75c8d8ff31ac03764f810e
           │ │ ├─ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1@mds.cephfs.compute-0.clmmzw.service
           │ │ │ ├─libpod-payload-7adc20511c464cc8d1a3fc124e35d564818f3952b55827abfa6fd9e805055a10
           │ │ │ │ ├─96759 /run/podman-init -- /usr/bin/ceph-mds -n mds.cephfs.compute-0.clmmzw -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
           │ │ │ │ └─96761 /usr/bin/ceph-mds -n mds.cephfs.compute-0.clmmzw -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
           │ │ │ └─runtime
           │ │ │   └─96757 /usr/bin/conmon --api-version 1 -c 7adc20511c464cc8d1a3fc124e35d564818f3952b55827abfa6fd9e805055a10 -u 7adc20511c464cc8d1a3fc124e35d564818f3952b55827abfa6fd9e805055a10 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/7adc20511c464cc8d1a3fc124e35d564818f3952b55827abfa6fd9e805055a10/userdata -p /run/containers/storage/overlay-containers/7adc20511c464cc8d1a3fc124e35d564818f3952b55827abfa6fd9e805055a10/userdata/pidfile -n ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1-mds-cephfs-compute-0-clmmzw --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/7adc20511c464cc8d1a3fc124e35d564818f3952b55827abfa6fd9e805055a10 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/7adc20511c464cc8d1a3fc124e35d564818f3952b55827abfa6fd9e805055a10/userdata/oci-log --conmon-pidfile /run/ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1@mds.cephfs.compute-0.clmmzw.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 7adc20511c464cc8d1a3fc124e35d564818f3952b55827abfa6fd9e805055a10
           │ │ ├─ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1@mgr.compute-0.djvyfo.service
           │ │ │ ├─libpod-payload-3dfd19b9ab30bf136f4a18ad3b4a13ee303004a583ad880116709be18eec72dc
           │ │ │ │ ├─74783 /run/podman-init -- /usr/bin/ceph-mgr -n mgr.compute-0.djvyfo -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
           │ │ │ │ └─74785 /usr/bin/ceph-mgr -n mgr.compute-0.djvyfo -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
           │ │ │ └─runtime
           │ │ │   └─74781 /usr/bin/conmon --api-version 1 -c 3dfd19b9ab30bf136f4a18ad3b4a13ee303004a583ad880116709be18eec72dc -u 3dfd19b9ab30bf136f4a18ad3b4a13ee303004a583ad880116709be18eec72dc -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/3dfd19b9ab30bf136f4a18ad3b4a13ee303004a583ad880116709be18eec72dc/userdata -p /run/containers/storage/overlay-containers/3dfd19b9ab30bf136f4a18ad3b4a13ee303004a583ad880116709be18eec72dc/userdata/pidfile -n ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1-mgr-compute-0-djvyfo --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/3dfd19b9ab30bf136f4a18ad3b4a13ee303004a583ad880116709be18eec72dc --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/3dfd19b9ab30bf136f4a18ad3b4a13ee303004a583ad880116709be18eec72dc/userdata/oci-log --conmon-pidfile /run/ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1@mgr.compute-0.djvyfo.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 3dfd19b9ab30bf136f4a18ad3b4a13ee303004a583ad880116709be18eec72dc
           │ │ ├─ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1@mon.compute-0.service
           │ │ │ ├─libpod-payload-79ef7165b184aa21ab9e464efe33891b6304e1ba848414549f299d1b301d6783
           │ │ │ │ ├─74487 /run/podman-init -- /usr/bin/ceph-mon -n mon.compute-0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false
           │ │ │ │ └─74489 /usr/bin/ceph-mon -n mon.compute-0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false
           │ │ │ └─runtime
           │ │ │   └─74485 /usr/bin/conmon --api-version 1 -c 79ef7165b184aa21ab9e464efe33891b6304e1ba848414549f299d1b301d6783 -u 79ef7165b184aa21ab9e464efe33891b6304e1ba848414549f299d1b301d6783 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/79ef7165b184aa21ab9e464efe33891b6304e1ba848414549f299d1b301d6783/userdata -p /run/containers/storage/overlay-containers/79ef7165b184aa21ab9e464efe33891b6304e1ba848414549f299d1b301d6783/userdata/pidfile -n ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1-mon-compute-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/79ef7165b184aa21ab9e464efe33891b6304e1ba848414549f299d1b301d6783 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/79ef7165b184aa21ab9e464efe33891b6304e1ba848414549f299d1b301d6783/userdata/oci-log --conmon-pidfile /run/ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1@mon.compute-0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 79ef7165b184aa21ab9e464efe33891b6304e1ba848414549f299d1b301d6783
           │ │ ├─ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1@nfs.cephfs.2.0.compute-0.fdwwab.service
           │ │ │ ├─libpod-payload-47ffd521b52a4e817e05a876d9da3b01cdfdfeae11aa3098649e39241d4ffff9
           │ │ │ │ ├─270910 /run/podman-init -- /usr/bin/ganesha.nfsd -F -L STDERR -N NIV_EVENT
           │ │ │ │ └─270912 /usr/bin/ganesha.nfsd -F -L STDERR -N NIV_EVENT
           │ │ │ └─runtime
           │ │ │   └─270907 /usr/bin/conmon --api-version 1 -c 47ffd521b52a4e817e05a876d9da3b01cdfdfeae11aa3098649e39241d4ffff9 -u 47ffd521b52a4e817e05a876d9da3b01cdfdfeae11aa3098649e39241d4ffff9 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/47ffd521b52a4e817e05a876d9da3b01cdfdfeae11aa3098649e39241d4ffff9/userdata -p /run/containers/storage/overlay-containers/47ffd521b52a4e817e05a876d9da3b01cdfdfeae11aa3098649e39241d4ffff9/userdata/pidfile -n ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1-nfs-cephfs-2-0-compute-0-fdwwab --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/47ffd521b52a4e817e05a876d9da3b01cdfdfeae11aa3098649e39241d4ffff9 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/47ffd521b52a4e817e05a876d9da3b01cdfdfeae11aa3098649e39241d4ffff9/userdata/oci-log --conmon-pidfile /run/ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1@nfs.cephfs.2.0.compute-0.fdwwab.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 47ffd521b52a4e817e05a876d9da3b01cdfdfeae11aa3098649e39241d4ffff9
           │ │ ├─ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1@node-exporter.compute-0.service
           │ │ │ ├─libpod-payload-690ade5beb0aa03a99cf3b4b1da5291c57988034f4a9506f786da5a6fb824998
           │ │ │ │ ├─104074 /run/podman-init -- /bin/node_exporter --no-collector.timex --web.listen-address=:9100 --path.procfs=/host/proc --path.sysfs=/host/sys --path.rootfs=/rootfs
           │ │ │ │ └─104076 /bin/node_exporter --no-collector.timex --web.listen-address=:9100 --path.procfs=/host/proc --path.sysfs=/host/sys --path.rootfs=/rootfs
           │ │ │ └─runtime
           │ │ │   └─104072 /usr/bin/conmon --api-version 1 -c 690ade5beb0aa03a99cf3b4b1da5291c57988034f4a9506f786da5a6fb824998 -u 690ade5beb0aa03a99cf3b4b1da5291c57988034f4a9506f786da5a6fb824998 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/690ade5beb0aa03a99cf3b4b1da5291c57988034f4a9506f786da5a6fb824998/userdata -p /run/containers/storage/overlay-containers/690ade5beb0aa03a99cf3b4b1da5291c57988034f4a9506f786da5a6fb824998/userdata/pidfile -n ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1-node-exporter-compute-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/690ade5beb0aa03a99cf3b4b1da5291c57988034f4a9506f786da5a6fb824998 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/690ade5beb0aa03a99cf3b4b1da5291c57988034f4a9506f786da5a6fb824998/userdata/oci-log --conmon-pidfile /run/ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1@node-exporter.compute-0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 690ade5beb0aa03a99cf3b4b1da5291c57988034f4a9506f786da5a6fb824998
           │ │ ├─ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1@osd.1.service
           │ │ │ ├─libpod-payload-4642ed65ea9037166532825913a80a3f5fa996c66d25a8d6ec32643bd7f52763
           │ │ │ │ ├─82703 /run/podman-init -- /usr/bin/ceph-osd -n osd.1 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
           │ │ │ │ └─82705 /usr/bin/ceph-osd -n osd.1 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
           │ │ │ └─runtime
           │ │ │   └─82701 /usr/bin/conmon --api-version 1 -c 4642ed65ea9037166532825913a80a3f5fa996c66d25a8d6ec32643bd7f52763 -u 4642ed65ea9037166532825913a80a3f5fa996c66d25a8d6ec32643bd7f52763 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/4642ed65ea9037166532825913a80a3f5fa996c66d25a8d6ec32643bd7f52763/userdata -p /run/containers/storage/overlay-containers/4642ed65ea9037166532825913a80a3f5fa996c66d25a8d6ec32643bd7f52763/userdata/pidfile -n ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1-osd-1 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/4642ed65ea9037166532825913a80a3f5fa996c66d25a8d6ec32643bd7f52763 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/4642ed65ea9037166532825913a80a3f5fa996c66d25a8d6ec32643bd7f52763/userdata/oci-log --conmon-pidfile /run/ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1@osd.1.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 4642ed65ea9037166532825913a80a3f5fa996c66d25a8d6ec32643bd7f52763
           │ │ ├─ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1@prometheus.compute-0.service
           │ │ │ ├─libpod-payload-214561532da1ee185d9ab0bf03f5b2d46320266c11cddad43265e8842ed9d667
           │ │ │ │ ├─100307 /run/podman-init -- /bin/prometheus --config.file=/etc/prometheus/prometheus.yml --storage.tsdb.path=/prometheus --storage.tsdb.retention.time=15d --storage.tsdb.retention.size=0 --web.external-url=http://compute-0.ctlplane.example.com:9095 --web.listen-address=192.168.122.100:9095
           │ │ │ │ └─100309 /bin/prometheus --config.file=/etc/prometheus/prometheus.yml --storage.tsdb.path=/prometheus --storage.tsdb.retention.time=15d --storage.tsdb.retention.size=0 --web.external-url=http://compute-0.ctlplane.example.com:9095 --web.listen-address=192.168.122.100:9095
           │ │ │ └─runtime
           │ │ │   └─100305 /usr/bin/conmon --api-version 1 -c 214561532da1ee185d9ab0bf03f5b2d46320266c11cddad43265e8842ed9d667 -u 214561532da1ee185d9ab0bf03f5b2d46320266c11cddad43265e8842ed9d667 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/214561532da1ee185d9ab0bf03f5b2d46320266c11cddad43265e8842ed9d667/userdata -p /run/containers/storage/overlay-containers/214561532da1ee185d9ab0bf03f5b2d46320266c11cddad43265e8842ed9d667/userdata/pidfile -n ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1-prometheus-compute-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/214561532da1ee185d9ab0bf03f5b2d46320266c11cddad43265e8842ed9d667 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/214561532da1ee185d9ab0bf03f5b2d46320266c11cddad43265e8842ed9d667/userdata/oci-log --conmon-pidfile /run/ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1@prometheus.compute-0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 214561532da1ee185d9ab0bf03f5b2d46320266c11cddad43265e8842ed9d667
           │ │ └─ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1@rgw.rgw.compute-0.vltabo.service
           │ │   ├─libpod-payload-2dea2149c1564b5d8c5d4eba925c3597c22745b2ed907d0270e5352d9fbd157f
           │ │   │ ├─89250 /run/podman-init -- /usr/bin/radosgw -n client.rgw.rgw.compute-0.vltabo -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
           │ │   │ └─89254 /usr/bin/radosgw -n client.rgw.rgw.compute-0.vltabo -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
           │ │   └─runtime
           │ │     └─89247 /usr/bin/conmon --api-version 1 -c 2dea2149c1564b5d8c5d4eba925c3597c22745b2ed907d0270e5352d9fbd157f -u 2dea2149c1564b5d8c5d4eba925c3597c22745b2ed907d0270e5352d9fbd157f -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/2dea2149c1564b5d8c5d4eba925c3597c22745b2ed907d0270e5352d9fbd157f/userdata -p /run/containers/storage/overlay-containers/2dea2149c1564b5d8c5d4eba925c3597c22745b2ed907d0270e5352d9fbd157f/userdata/pidfile -n ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1-rgw-rgw-compute-0-vltabo --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/2dea2149c1564b5d8c5d4eba925c3597c22745b2ed907d0270e5352d9fbd157f --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/2dea2149c1564b5d8c5d4eba925c3597c22745b2ed907d0270e5352d9fbd157f/userdata/oci-log --conmon-pidfile /run/ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1@rgw.rgw.compute-0.vltabo.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 2dea2149c1564b5d8c5d4eba925c3597c22745b2ed907d0270e5352d9fbd157f
           │ ├─system-getty.slice
           │ │ └─getty@tty1.service
           │ │   └─1010 /sbin/agetty -o "-p -- \\u" --noclear - linux
           │ ├─system-serial\x2dgetty.slice
           │ │ └─serial-getty@ttyS0.service
           │ │   └─1011 /sbin/agetty -o "-p -- \\u" --keep-baud 115200,57600,38400,9600 - vt220
           │ ├─systemd-hostnamed.service
           │ │ └─293546 /usr/lib/systemd/systemd-hostnamed
           │ ├─systemd-journald.service
           │ │ └─677 /usr/lib/systemd/systemd-journald
           │ ├─systemd-logind.service
           │ │ └─793 /usr/lib/systemd/systemd-logind
           │ ├─systemd-machined.service
           │ │ └─219024 /usr/lib/systemd/systemd-machined
           │ ├─systemd-udevd.service
           │ │ └─udev
           │ │   └─729 /usr/lib/systemd/systemd-udevd
           │ ├─tuned.service
           │ │ └─113383 /usr/bin/python3 -Es /usr/sbin/tuned -l -P
           │ ├─virtlogd.service
           │ │ └─218379 /usr/sbin/virtlogd
           │ ├─virtnodedevd.service
           │ │ └─252999 /usr/sbin/virtnodedevd --timeout 120
           │ └─virtqemud.service
           │   └─252362 /usr/sbin/virtqemud --timeout 120
           └─user.slice
             ├─user-1000.slice
             │ ├─session-1.scope
             │ │ └─4532 /usr/bin/python3
             │ ├─session-59.scope
             │ │ ├─291112 "sshd-session: zuul [priv]"
             │ │ ├─291116 "sshd-session: zuul@notty"
             │ │ ├─291117 sudo bash -c "rm -rf /var/tmp/sos-osp && mkdir /var/tmp/sos-osp && sos report --batch --all-logs --tmp-dir=/var/tmp/sos-osp  -p container,openstack_edpm,system,storage,virt"
             │ │ ├─291141 /usr/bin/python3 -s /sbin/sos report --batch --all-logs --tmp-dir=/var/tmp/sos-osp -p container,openstack_edpm,system,storage,virt
             │ │ ├─297141 timeout 15s turbostat --debug sleep 10
             │ │ ├─297537 timeout 300s semanage node -l
             │ │ ├─297538 /usr/bin/python3 -EsI /usr/sbin/semanage node -l
             │ │ ├─297541 timeout 300s ceph osd blocked-by --format json-pretty
             │ │ ├─297542 /usr/bin/python3 -s /usr/bin/ceph osd blocked-by --format json-pretty
             │ │ ├─297545 timeout 300s systemctl status --all
             │ │ └─297547 systemctl status --all
             │ └─user@1000.service
             │   Unit boot.automount could not be found.
─app.slice
             │   │ └─dbus-broker.service
             │   │   ├─13129 /usr/bin/dbus-broker-launch --scope user
             │   │   └─13141 dbus-broker --log 4 --controller 9 --machine-id bf0bc0bb03de29b24cba1cc9599cf5d0 --max-bytes 100000000000000 --max-fds 25000000000000 --max-matches 5000000000
             │   ├─init.scope
             │   │ ├─4320 /usr/lib/systemd/systemd --user
             │   │ └─4322 "(sd-pam)"
             │   └─user.slice
             │     └─podman-pause-663ee478.scope
             │       └─13025 catatonit -P
             └─user-42477.slice
               ├─session-37.scope
               │ ├─100495 "sshd-session: ceph-admin [priv]"
               │ └─100520 "sshd-session: ceph-admin@notty"
               └─user@42477.service
                 └─init.scope
                   ├─93258 /usr/lib/systemd/systemd --user
                   └─93260 "(sd-pam)"

● proc-sys-fs-binfmt_misc.automount - Arbitrary Executable File Formats File System Automount Point
     Loaded: loaded (/usr/lib/systemd/system/proc-sys-fs-binfmt_misc.automount; static)
     Active: active (running) since Mon 2026-02-02 09:00:45 UTC; 1h 24min ago
      Until: Mon 2026-02-02 09:00:45 UTC; 1h 24min ago
   Triggers: ● proc-sys-fs-binfmt_misc.mount
      Where: /proc/sys/fs/binfmt_misc
       Docs: https://docs.kernel.org/admin-guide/binfmt-misc.html
             https://www.freedesktop.org/wiki/Software/systemd/APIFileSystems

Feb 02 09:38:16 compute-0 systemd[1]: proc-sys-fs-binfmt_misc.automount: Got automount request for /proc/sys/fs/binfmt_misc, triggered by 76843 (sysctl)

● dev-cdrom.device - QEMU_DVD-ROM config-2
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:01.1-ata1-host0-target0:0:0-0:0:0:0-block-sr0.device
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 09:00:46 UTC; 1h 24min ago
      Until: Mon 2026-02-02 09:00:46 UTC; 1h 24min ago
     Device: /sys/devices/pci0000:00/0000:00:01.1/ata1/host0/target0:0:0/0:0:0:0/block/sr0

● dev-ceph_vg0-ceph_lv0.device - /dev/ceph_vg0/ceph_lv0
    Follows: unit currently follows state of sys-devices-virtual-block-dm\x2d0.device
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 09:37:05 UTC; 48min ago
      Until: Mon 2026-02-02 09:37:05 UTC; 48min ago
     Device: /sys/devices/virtual/block/dm-0

● dev-disk-by\x2ddiskseq-1.device - /dev/disk/by-diskseq/1
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:04.0-virtio2-block-vda.device
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 09:00:45 UTC; 1h 24min ago
      Until: Mon 2026-02-02 09:00:45 UTC; 1h 24min ago
     Device: /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda

● dev-disk-by\x2ddiskseq-3.device - QEMU_DVD-ROM config-2
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:01.1-ata1-host0-target0:0:0-0:0:0:0-block-sr0.device
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 09:00:46 UTC; 1h 24min ago
      Until: Mon 2026-02-02 09:00:46 UTC; 1h 24min ago
     Device: /sys/devices/pci0000:00/0000:00:01.1/ata1/host0/target0:0:0/0:0:0:0/block/sr0

● dev-disk-by\x2ddiskseq-5.device - /dev/disk/by-diskseq/5
    Follows: unit currently follows state of sys-devices-virtual-block-loop3.device
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 09:37:04 UTC; 48min ago
      Until: Mon 2026-02-02 09:37:04 UTC; 48min ago
     Device: /sys/devices/virtual/block/loop3

● dev-disk-by\x2ddiskseq-6.device - /dev/disk/by-diskseq/6
    Follows: unit currently follows state of sys-devices-virtual-block-dm\x2d0.device
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 09:37:05 UTC; 48min ago
      Until: Mon 2026-02-02 09:37:05 UTC; 48min ago
     Device: /sys/devices/virtual/block/dm-0

● dev-disk-by\x2did-ata\x2dQEMU_DVD\x2dROM_QM00001.device - QEMU_DVD-ROM config-2
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:01.1-ata1-host0-target0:0:0-0:0:0:0-block-sr0.device
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 09:00:46 UTC; 1h 24min ago
      Until: Mon 2026-02-02 09:00:46 UTC; 1h 24min ago
     Device: /sys/devices/pci0000:00/0000:00:01.1/ata1/host0/target0:0:0/0:0:0:0/block/sr0

● dev-disk-by\x2did-dm\x2dname\x2dceph_vg0\x2dceph_lv0.device - /dev/disk/by-id/dm-name-ceph_vg0-ceph_lv0
    Follows: unit currently follows state of sys-devices-virtual-block-dm\x2d0.device
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 09:37:05 UTC; 48min ago
      Until: Mon 2026-02-02 09:37:05 UTC; 48min ago
     Device: /sys/devices/virtual/block/dm-0

● dev-disk-by\x2did-dm\x2duuid\x2dLVM\x2dDGTtgKUDNktA4Ci8COVQ242svLzQA45Alj33ZfB0baTfOd9onWKq61RI0Xy2nN5a.device - /dev/disk/by-id/dm-uuid-LVM-DGTtgKUDNktA4Ci8COVQ242svLzQA45Alj33ZfB0baTfOd9onWKq61RI0Xy2nN5a
    Follows: unit currently follows state of sys-devices-virtual-block-dm\x2d0.device
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 09:37:05 UTC; 48min ago
      Until: Mon 2026-02-02 09:37:05 UTC; 48min ago
     Device: /sys/devices/virtual/block/dm-0

● dev-disk-by\x2did-lvm\x2dpv\x2duuid\x2dhh6B3N\x2dvaba\x2dHuH3\x2d0494\x2dceqv\x2dkeS0\x2derwHyS.device - /dev/disk/by-id/lvm-pv-uuid-hh6B3N-vaba-HuH3-0494-ceqv-keS0-erwHyS
    Follows: unit currently follows state of sys-devices-virtual-block-loop3.device
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 09:37:05 UTC; 48min ago
      Until: Mon 2026-02-02 09:37:05 UTC; 48min ago
     Device: /sys/devices/virtual/block/loop3

● dev-disk-by\x2dlabel-config\x2d2.device - QEMU_DVD-ROM config-2
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:01.1-ata1-host0-target0:0:0-0:0:0:0-block-sr0.device
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 09:00:46 UTC; 1h 24min ago
      Until: Mon 2026-02-02 09:00:46 UTC; 1h 24min ago
     Device: /sys/devices/pci0000:00/0000:00:01.1/ata1/host0/target0:0:0/0:0:0:0/block/sr0

● dev-disk-by\x2dpartuuid-714daac1\x2d01.device - /dev/disk/by-partuuid/714daac1-01
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:04.0-virtio2-block-vda-vda1.device
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 09:00:45 UTC; 1h 24min ago
      Until: Mon 2026-02-02 09:00:45 UTC; 1h 24min ago
     Device: /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda/vda1

● dev-disk-by\x2dpath-pci\x2d0000:00:01.1\x2data\x2d1.0.device - QEMU_DVD-ROM config-2
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:01.1-ata1-host0-target0:0:0-0:0:0:0-block-sr0.device
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 09:00:46 UTC; 1h 24min ago
      Until: Mon 2026-02-02 09:00:46 UTC; 1h 24min ago
     Device: /sys/devices/pci0000:00/0000:00:01.1/ata1/host0/target0:0:0/0:0:0:0/block/sr0

● dev-disk-by\x2dpath-pci\x2d0000:00:01.1\x2data\x2d1.device - QEMU_DVD-ROM config-2
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:01.1-ata1-host0-target0:0:0-0:0:0:0-block-sr0.device
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 09:00:46 UTC; 1h 24min ago
      Until: Mon 2026-02-02 09:00:46 UTC; 1h 24min ago
     Device: /sys/devices/pci0000:00/0000:00:01.1/ata1/host0/target0:0:0/0:0:0:0/block/sr0

● dev-disk-by\x2dpath-pci\x2d0000:00:04.0.device - /dev/disk/by-path/pci-0000:00:04.0
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:04.0-virtio2-block-vda.device
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 09:00:45 UTC; 1h 24min ago
      Until: Mon 2026-02-02 09:00:45 UTC; 1h 24min ago
     Device: /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda

● dev-disk-by\x2dpath-pci\x2d0000:00:04.0\x2dpart1.device - /dev/disk/by-path/pci-0000:00:04.0-part1
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:04.0-virtio2-block-vda-vda1.device
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 09:00:45 UTC; 1h 24min ago
      Until: Mon 2026-02-02 09:00:45 UTC; 1h 24min ago
     Device: /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda/vda1

● dev-disk-by\x2dpath-virtio\x2dpci\x2d0000:00:04.0.device - /dev/disk/by-path/virtio-pci-0000:00:04.0
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:04.0-virtio2-block-vda.device
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 09:00:45 UTC; 1h 24min ago
      Until: Mon 2026-02-02 09:00:45 UTC; 1h 24min ago
     Device: /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda

● dev-disk-by\x2dpath-virtio\x2dpci\x2d0000:00:04.0\x2dpart1.device - /dev/disk/by-path/virtio-pci-0000:00:04.0-part1
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:04.0-virtio2-block-vda-vda1.device
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 09:00:45 UTC; 1h 24min ago
      Until: Mon 2026-02-02 09:00:45 UTC; 1h 24min ago
     Device: /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda/vda1

● dev-disk-by\x2duuid-2026\x2d02\x2d02\x2d09\x2d00\x2d30\x2d00.device - QEMU_DVD-ROM config-2
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:01.1-ata1-host0-target0:0:0-0:0:0:0-block-sr0.device
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 09:00:46 UTC; 1h 24min ago
      Until: Mon 2026-02-02 09:00:46 UTC; 1h 24min ago
     Device: /sys/devices/pci0000:00/0000:00:01.1/ata1/host0/target0:0:0/0:0:0:0/block/sr0

● dev-disk-by\x2duuid-822f14ea\x2d6e7e\x2d41df\x2db0d8\x2dfbe282d9ded8.device - /dev/disk/by-uuid/822f14ea-6e7e-41df-b0d8-fbe282d9ded8
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:04.0-virtio2-block-vda-vda1.device
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 09:00:45 UTC; 1h 24min ago
      Until: Mon 2026-02-02 09:00:45 UTC; 1h 24min ago
     Device: /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda/vda1

Feb 02 09:00:42 localhost systemd[1]: Found device /dev/disk/by-uuid/822f14ea-6e7e-41df-b0d8-fbe282d9ded8.

● dev-dm\x2d0.device - /dev/dm-0
    Follows: unit currently follows state of sys-devices-virtual-block-dm\x2d0.device
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 09:37:05 UTC; 48min ago
      Until: Mon 2026-02-02 09:37:05 UTC; 48min ago
     Device: /sys/devices/virtual/block/dm-0

● dev-loop3.device - /dev/loop3
    Follows: unit currently follows state of sys-devices-virtual-block-loop3.device
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 09:37:04 UTC; 48min ago
      Until: Mon 2026-02-02 09:37:04 UTC; 48min ago
     Device: /sys/devices/virtual/block/loop3

● dev-mapper-ceph_vg0\x2dceph_lv0.device - /dev/mapper/ceph_vg0-ceph_lv0
    Follows: unit currently follows state of sys-devices-virtual-block-dm\x2d0.device
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 09:37:05 UTC; 48min ago
      Until: Mon 2026-02-02 09:37:05 UTC; 48min ago
     Device: /sys/devices/virtual/block/dm-0

● dev-rfkill.device - /dev/rfkill
    Follows: unit currently follows state of sys-devices-virtual-misc-rfkill.device
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 09:00:47 UTC; 1h 24min ago
      Until: Mon 2026-02-02 09:00:47 UTC; 1h 24min ago
     Device: /sys/devices/virtual/misc/rfkill

● dev-sr0.device - QEMU_DVD-ROM config-2
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:01.1-ata1-host0-target0:0:0-0:0:0:0-block-sr0.device
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 09:00:46 UTC; 1h 24min ago
      Until: Mon 2026-02-02 09:00:46 UTC; 1h 24min ago
     Device: /sys/devices/pci0000:00/0000:00:01.1/ata1/host0/target0:0:0/0:0:0:0/block/sr0

● dev-ttyS0.device - /dev/ttyS0
    Follows: unit currently follows state of sys-devices-pnp0-00:00-tty-ttyS0.device
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 09:00:45 UTC; 1h 24min ago
      Until: Mon 2026-02-02 09:00:45 UTC; 1h 24min ago
     Device: /sys/devices/pnp0/00:00/tty/ttyS0

Feb 02 09:00:45 localhost systemd[1]: Condition check resulted in /dev/ttyS0 being skipped.

● dev-ttyS1.device - /dev/ttyS1
    Follows: unit currently follows state of sys-devices-platform-serial8250-tty-ttyS1.device
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 09:00:45 UTC; 1h 24min ago
      Until: Mon 2026-02-02 09:00:45 UTC; 1h 24min ago
     Device: /sys/devices/platform/serial8250/tty/ttyS1

● dev-ttyS2.device - /dev/ttyS2
    Follows: unit currently follows state of sys-devices-platform-serial8250-tty-ttyS2.device
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 09:00:45 UTC; 1h 24min ago
      Until: Mon 2026-02-02 09:00:45 UTC; 1h 24min ago
     Device: /sys/devices/platform/serial8250/tty/ttyS2

● dev-ttyS3.device - /dev/ttyS3
    Follows: unit currently follows state of sys-devices-platform-serial8250-tty-ttyS3.device
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 09:00:45 UTC; 1h 24min ago
      Until: Mon 2026-02-02 09:00:45 UTC; 1h 24min ago
     Device: /sys/devices/platform/serial8250/tty/ttyS3

● dev-vda.device - /dev/vda
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:04.0-virtio2-block-vda.device
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 09:00:45 UTC; 1h 24min ago
      Until: Mon 2026-02-02 09:00:45 UTC; 1h 24min ago
     Device: /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda

● dev-vda1.device - /dev/vda1
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:04.0-virtio2-block-vda-vda1.device
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 09:00:45 UTC; 1h 24min ago
      Until: Mon 2026-02-02 09:00:45 UTC; 1h 24min ago
     Device: /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda/vda1

● sys-devices-pci0000:00-0000:00:01.1-ata1-host0-target0:0:0-0:0:0:0-block-sr0.device - QEMU_DVD-ROM config-2
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 09:00:46 UTC; 1h 24min ago
      Until: Mon 2026-02-02 09:00:46 UTC; 1h 24min ago
     Device: /sys/devices/pci0000:00/0000:00:01.1/ata1/host0/target0:0:0/0:0:0:0/block/sr0

● sys-devices-pci0000:00-0000:00:03.0-virtio1-net-eth0.device - Virtio network device
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 09:00:45 UTC; 1h 24min ago
      Until: Mon 2026-02-02 09:00:45 UTC; 1h 24min ago
     Device: /sys/devices/pci0000:00/0000:00:03.0/virtio1/net/eth0

● sys-devices-pci0000:00-0000:00:04.0-virtio2-block-vda-vda1.device - /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda/vda1
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 09:00:45 UTC; 1h 24min ago
      Until: Mon 2026-02-02 09:00:45 UTC; 1h 24min ago
     Device: /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda/vda1

● sys-devices-pci0000:00-0000:00:04.0-virtio2-block-vda.device - /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 09:00:45 UTC; 1h 24min ago
      Until: Mon 2026-02-02 09:00:45 UTC; 1h 24min ago
     Device: /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda

● sys-devices-pci0000:00-0000:00:07.0-virtio5-net-eth1.device - Virtio network device
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 09:02:57 UTC; 1h 22min ago
      Until: Mon 2026-02-02 09:02:57 UTC; 1h 22min ago
     Device: /sys/devices/pci0000:00/0000:00:07.0/virtio5/net/eth1

● sys-devices-platform-serial8250-tty-ttyS1.device - /sys/devices/platform/serial8250/tty/ttyS1
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 09:00:45 UTC; 1h 24min ago
      Until: Mon 2026-02-02 09:00:45 UTC; 1h 24min ago
     Device: /sys/devices/platform/serial8250/tty/ttyS1

● sys-devices-platform-serial8250-tty-ttyS2.device - /sys/devices/platform/serial8250/tty/ttyS2
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 09:00:45 UTC; 1h 24min ago
      Until: Mon 2026-02-02 09:00:45 UTC; 1h 24min ago
     Device: /sys/devices/platform/serial8250/tty/ttyS2

● sys-devices-platform-serial8250-tty-ttyS3.device - /sys/devices/platform/serial8250/tty/ttyS3
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 09:00:45 UTC; 1h 24min ago
      Until: Mon 2026-02-02 09:00:45 UTC; 1h 24min ago
     Device: /sys/devices/platform/serial8250/tty/ttyS3

● sys-devices-pnp0-00:00-tty-ttyS0.device - /sys/devices/pnp0/00:00/tty/ttyS0
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 09:00:45 UTC; 1h 24min ago
      Until: Mon 2026-02-02 09:00:45 UTC; 1h 24min ago
     Device: /sys/devices/pnp0/00:00/tty/ttyS0

● sys-devices-virtual-block-dm\x2d0.device - /sys/devices/virtual/block/dm-0
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 09:37:05 UTC; 48min ago
      Until: Mon 2026-02-02 09:37:05 UTC; 48min ago
     Device: /sys/devices/virtual/block/dm-0

● sys-devices-virtual-block-loop3.device - /sys/devices/virtual/block/loop3
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 09:37:04 UTC; 48min ago
      Until: Mon 2026-02-02 09:37:04 UTC; 48min ago
     Device: /sys/devices/virtual/block/loop3

● sys-devices-virtual-misc-rfkill.device - /sys/devices/virtual/misc/rfkill
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 09:00:47 UTC; 1h 24min ago
      Until: Mon 2026-02-02 09:00:47 UTC; 1h 24min ago
     Device: /sys/devices/virtual/misc/rfkill

● sys-devices-virtual-net-br\x2dex.device - /sys/devices/virtual/net/br-ex
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 09:34:04 UTC; 51min ago
      Until: Mon 2026-02-02 09:34:04 UTC; 51min ago
     Device: /sys/devices/virtual/net/br-ex

● sys-devices-virtual-net-br\x2dint.device - /sys/devices/virtual/net/br-int
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 09:49:28 UTC; 35min ago
      Until: Mon 2026-02-02 09:49:28 UTC; 35min ago
     Device: /sys/devices/virtual/net/br-int

● sys-devices-virtual-net-genev_sys_6081.device - /sys/devices/virtual/net/genev_sys_6081
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 09:49:28 UTC; 35min ago
      Until: Mon 2026-02-02 09:49:28 UTC; 35min ago
     Device: /sys/devices/virtual/net/genev_sys_6081

● sys-devices-virtual-net-ovs\x2dsystem.device - /sys/devices/virtual/net/ovs-system
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 09:34:04 UTC; 51min ago
      Until: Mon 2026-02-02 09:34:04 UTC; 51min ago
     Device: /sys/devices/virtual/net/ovs-system

● sys-devices-virtual-net-vlan20.device - /sys/devices/virtual/net/vlan20
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 09:34:04 UTC; 51min ago
      Until: Mon 2026-02-02 09:34:04 UTC; 51min ago
     Device: /sys/devices/virtual/net/vlan20

● sys-devices-virtual-net-vlan21.device - /sys/devices/virtual/net/vlan21
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 09:34:04 UTC; 51min ago
      Until: Mon 2026-02-02 09:34:04 UTC; 51min ago
     Device: /sys/devices/virtual/net/vlan21

● sys-devices-virtual-net-vlan22.device - /sys/devices/virtual/net/vlan22
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 09:34:04 UTC; 51min ago
      Until: Mon 2026-02-02 09:34:04 UTC; 51min ago
     Device: /sys/devices/virtual/net/vlan22

● sys-devices-virtual-net-vlan23.device - /sys/devices/virtual/net/vlan23
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 09:34:04 UTC; 51min ago
      Until: Mon 2026-02-02 09:34:04 UTC; 51min ago
     Device: /sys/devices/virtual/net/vlan23

● sys-module-configfs.device - /sys/module/configfs
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 09:00:45 UTC; 1h 24min ago
      Until: Mon 2026-02-02 09:00:45 UTC; 1h 24min ago
     Device: /sys/module/configfs

● sys-module-fuse.device - /sys/module/fuse
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-Unit boot.mount could not be found.
02 09:00:45 UTC; 1h 24min ago
      Until: Mon 2026-02-02 09:00:45 UTC; 1h 24min ago
     Device: /sys/module/fuse

● sys-subsystem-net-devices-br\x2dex.device - /sys/subsystem/net/devices/br-ex
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 09:34:04 UTC; 51min ago
      Until: Mon 2026-02-02 09:34:04 UTC; 51min ago
     Device: /sys/devices/virtual/net/br-ex

● sys-subsystem-net-devices-br\x2dint.device - /sys/subsystem/net/devices/br-int
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 09:49:28 UTC; 35min ago
      Until: Mon 2026-02-02 09:49:28 UTC; 35min ago
     Device: /sys/devices/virtual/net/br-int

● sys-subsystem-net-devices-eth0.device - Virtio network device
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 09:00:45 UTC; 1h 24min ago
      Until: Mon 2026-02-02 09:00:45 UTC; 1h 24min ago
     Device: /sys/devices/pci0000:00/0000:00:03.0/virtio1/net/eth0

● sys-subsystem-net-devices-eth1.device - Virtio network device
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 09:02:57 UTC; 1h 22min ago
      Until: Mon 2026-02-02 09:02:57 UTC; 1h 22min ago
     Device: /sys/devices/pci0000:00/0000:00:07.0/virtio5/net/eth1

● sys-subsystem-net-devices-genev_sys_6081.device - /sys/subsystem/net/devices/genev_sys_6081
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 09:49:28 UTC; 35min ago
      Until: Mon 2026-02-02 09:49:28 UTC; 35min ago
     Device: /sys/devices/virtual/net/genev_sys_6081

● sys-subsystem-net-devices-ovs\x2dsystem.device - /sys/subsystem/net/devices/ovs-system
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 09:34:04 UTC; 51min ago
      Until: Mon 2026-02-02 09:34:04 UTC; 51min ago
     Device: /sys/devices/virtual/net/ovs-system

● sys-subsystem-net-devices-vlan20.device - /sys/subsystem/net/devices/vlan20
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 09:34:04 UTC; 51min ago
      Until: Mon 2026-02-02 09:34:04 UTC; 51min ago
     Device: /sys/devices/virtual/net/vlan20

● sys-subsystem-net-devices-vlan21.device - /sys/subsystem/net/devices/vlan21
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 09:34:04 UTC; 51min ago
      Until: Mon 2026-02-02 09:34:04 UTC; 51min ago
     Device: /sys/devices/virtual/net/vlan21

● sys-subsystem-net-devices-vlan22.device - /sys/subsystem/net/devices/vlan22
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 09:34:04 UTC; 51min ago
      Until: Mon 2026-02-02 09:34:04 UTC; 51min ago
     Device: /sys/devices/virtual/net/vlan22

● sys-subsystem-net-devices-vlan23.device - /sys/subsystem/net/devices/vlan23
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-02 09:34:04 UTC; 51min ago
      Until: Mon 2026-02-02 09:34:04 UTC; 51min ago
     Device: /sys/devices/virtual/net/vlan23

● -.mount - Root Mount
     Loaded: loaded (/etc/fstab; generated)
     Active: active (mounted)
      Where: /
       What: /dev/vda1
       Docs: man:fstab(5)
             man:systemd-fstab-generator(8)

● dev-hugepages.mount - Huge Pages File System
     Loaded: loaded (/usr/lib/systemd/system/dev-hugepages.mount; static)
     Active: active (mounted) since Mon 2026-02-02 09:00:45 UTC; 1h 24min ago
      Until: Mon 2026-02-02 09:00:45 UTC; 1h 24min ago
      Where: /dev/hugepages
       What: hugetlbfs
       Docs: https://docs.kernel.org/admin-guide/mm/hugetlbpage.html
             https://www.freedesktop.org/wiki/Software/systemd/APIFileSystems
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48560)
     Memory: 52.0K (peak: 556.0K)
        CPU: 6ms
     CGroup: /dev-hugepages.mount

● dev-hugepages1G.mount - /dev/hugepages1G
     Loaded: loaded (/etc/fstab; generated)
     Active: active (mounted) since Mon 2026-02-02 09:36:09 UTC; 48min ago
      Until: Mon 2026-02-02 09:36:09 UTC; 48min ago
      Where: /dev/hugepages1G
       What: none
       Docs: man:fstab(5)
             man:systemd-fstab-generator(8)

● dev-hugepages2M.mount - /dev/hUnit home.mount could not be found.
ugepages2M
     Loaded: loaded (/etc/fstab; generated)
     Active: active (mounted) since Mon 2026-02-02 09:36:09 UTC; 48min ago
      Until: Mon 2026-02-02 09:36:09 UTC; 48min ago
      Where: /dev/hugepages2M
       What: none
       Docs: man:fstab(5)
             man:systemd-fstab-generator(8)

● dev-mqueue.mount - POSIX Message Queue File System
     Loaded: loaded (/usr/lib/systemd/system/dev-mqueue.mount; static)
     Active: active (mounted) since Mon 2026-02-02 09:00:45 UTC; 1h 24min ago
      Until: Mon 2026-02-02 09:00:45 UTC; 1h 24min ago
      Where: /dev/mqueue
       What: mqueue
       Docs: man:mq_overview(7)
             https://www.freedesktop.org/wiki/Software/systemd/APIFileSystems
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48560)
     Memory: 4.0K (peak: 532.0K)
        CPU: 4ms
     CGroup: /dev-mqueue.mount

○ proc-fs-nfsd.mount - NFSD configuration filesystem
     Loaded: loaded (/usr/lib/systemd/system/proc-fs-nfsd.mount; static)
     Active: inactive (dead)
      Where: /proc/fs/nfsd
       What: nfsd

● proc-sys-fs-binfmt_misc.mount - Arbitrary Executable File Formats File System
     Loaded: loaded (/usr/lib/systemd/system/proc-sys-fs-binfmt_misc.mount; disabled; preset: disabled)
     Active: active (mounted) since Mon 2026-02-02 09:38:16 UTC; 46min ago
      Until: Mon 2026-02-02 09:38:16 UTC; 46min ago
TriggeredBy: ● proc-sys-fs-binfmt_misc.automount
      Where: /proc/sys/fs/binfmt_misc
       What: binfmt_misc
       Docs: https://docs.kernel.org/admin-guide/binfmt-misc.html
             https://www.freedesktop.org/wiki/Software/systemd/APIFileSystems
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48560)
     Memory: 8.0K (peak: 540.0K)
        CPU: 7ms
     CGroup: /proc-sys-fs-binfmt_misc.mount

Feb 02 09:38:16 compute-0 systemd[1]: Mounting Arbitrary Executable File Formats File System...
Feb 02 09:38:16 compute-0 systemd[1]: Mounted Arbitrary Executable File Formats File System.

● run-credentials-systemd\x2dsysctl.service.mount - /run/credentials/systemd-sysctl.service
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Mon 2026-02-02 09:33:01 UTC; 52min ago
      Until: Mon 2026-02-02 09:33:01 UTC; 52min ago
      Where: /run/credentials/systemd-sysctl.service
       What: none

● run-credentials-systemd\x2dsysusers.service.mount - /run/credentials/systemd-sysusers.service
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Mon 2026-02-02 09:00:45 UTC; 1h 24min ago
      Until: Mon 2026-02-02 09:00:45 UTC; 1h 24min ago
      Where: /run/credentials/systemd-sysusers.service
       What: none

● run-credentials-systemd\x2dtmpfiles\x2dsetup.service.mount - /run/credentials/systemd-tmpfiles-setup.service
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Mon 2026-02-02 09:00:45 UTC; 1h 24min ago
      Until: Mon 2026-02-02 09:00:45 UTC; 1h 24min ago
      Where: /run/credentials/systemd-tmpfiles-setup.service
       What: none

● run-credentials-systemd\x2dtmpfiles\x2dsetup\x2ddev.service.mount - /run/credentials/systemd-tmpfiles-setup-dev.service
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Mon 2026-02-02 09:00:45 UTC; 1h 24min ago
      Until: Mon 2026-02-02 09:00:45 UTC; 1h 24min ago
      Where: /run/credentials/systemd-tmpfiles-setup-dev.service
       What: none

● run-netns.mount - /run/netns
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Mon 2026-02-02 09:35:37 UTC; 49min ago
      Until: Mon 2026-02-02 09:35:37 UTC; 49min ago
      Where: /run/netns
       What: tmpfs

● run-user-1000.mount - /run/user/1000
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Mon 2026-02-02 09:01:14 UTC; 1h 23min ago
      Until: Mon 2026-02-02 09:01:14 UTC; 1h 23min ago
      Where: /run/user/1000
       What: tmpfs

● run-user-42477.mount - /run/user/42477
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Mon 2026-02-02 09:40:48 UTC; 44min ago
    Unit sysroot.mount could not be found.
  Until: Mon 2026-02-02 09:40:48 UTC; 44min ago
      Where: /run/user/42477
       What: tmpfs

● sys-fs-fuse-connections.mount - FUSE Control File System
     Loaded: loaded (/usr/lib/systemd/system/sys-fs-fuse-connections.mount; static)
     Active: active (mounted) since Mon 2026-02-02 09:00:45 UTC; 1h 24min ago
      Until: Mon 2026-02-02 09:00:45 UTC; 1h 24min ago
      Where: /sys/fs/fuse/connections
       What: fusectl
       Docs: https://docs.kernel.org/filesystems/fuse.html
             https://www.freedesktop.org/wiki/Software/systemd/APIFileSystems
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48560)
     Memory: 4.0K (peak: 540.0K)
        CPU: 3ms
     CGroup: /sys-fs-fuse-connections.mount

Feb 02 09:00:45 localhost systemd[1]: Mounting FUSE Control File System...
Feb 02 09:00:45 localhost systemd[1]: Mounted FUSE Control File System.

● sys-kernel-config.mount - Kernel Configuration File System
     Loaded: loaded (/proc/self/mountinfo; static)
     Active: active (mounted) since Mon 2026-02-02 09:00:44 UTC; 1h 24min ago
      Until: Mon 2026-02-02 09:00:44 UTC; 1h 24min ago
      Where: /sys/kernel/config
       What: configfs
       Docs: https://docs.kernel.org/filesystems/configfs.html
             https://www.freedesktop.org/wiki/Software/systemd/APIFileSystems

● sys-kernel-debug-tracing.mount - /sys/kernel/debug/tracing
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Mon 2026-02-02 10:14:17 UTC; 10min ago
      Until: Mon 2026-02-02 10:14:17 UTC; 10min ago
      Where: /sys/kernel/debug/tracing
       What: tracefs

● sys-kernel-debug.mount - Kernel Debug File System
     Loaded: loaded (/usr/lib/systemd/system/sys-kernel-debug.mount; static)
     Active: active (mounted) since Mon 2026-02-02 09:00:45 UTC; 1h 24min ago
      Until: Mon 2026-02-02 09:00:45 UTC; 1h 24min ago
      Where: /sys/kernel/debug
       What: debugfs
       Docs: https://docs.kernel.org/filesystems/debugfs.html
             https://www.freedesktop.org/wiki/Software/systemd/APIFileSystems
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48560)
     Memory: 4.0K (peak: 540.0K)
        CPU: 4ms
     CGroup: /sys-kernel-debug.mount

● sys-kernel-tracing.mount - Kernel Trace File System
     Loaded: loaded (/usr/lib/systemd/system/sys-kernel-tracing.mount; static)
     Active: active (mounted) since Mon 2026-02-02 09:00:45 UTC; 1h 24min ago
      Until: Mon 2026-02-02 09:00:45 UTC; 1h 24min ago
      Where: /sys/kernel/tracing
       What: tracefs
       Docs: https://docs.kernel.org/trace/ftrace.html
             https://www.freedesktop.org/wiki/Software/systemd/APIFileSystems
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48560)
     Memory: 4.0K (peak: 540.0K)
        CPU: 2ms
     CGroup: /sys-kernel-tracing.mount

○ tmp.mount - Temporary Directory /tmp
     Loaded: loaded (/usr/lib/systemd/system/tmp.mount; disabled; preset: disabled)
     Active: inactive (dead)
      Where: /tmp
       What: tmpfs
       Docs: https://systemd.io/TEMPORARY_DIRECTORIES
             man:file-hierarchy(7)
             https://www.freedesktop.org/wiki/Software/systemd/APIFileSystems

● var-lib-containers-storage-overlay-09e8e6085b4a2eecf6ab7232b6a1785b68e895e99877332b0150de6baf16110b-merged.mount - /var/lib/containers/storage/overlay/09e8e6085b4a2eecf6ab7232b6a1785b68e895e99877332b0150de6baf16110b/merged
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Mon 2026-02-02 09:38:21 UTC; 46min ago
      Until: Mon 2026-02-02 09:38:21 UTC; 46min ago
      Where: /var/lib/containers/storage/overlay/09e8e6085b4a2eecf6ab7232b6a1785b68e895e99877332b0150de6baf16110b/merged
       What: overlay

● var-lib-containers-storage-overlay-1bdd0a853112b3ec875d866cff438e220b7eda68ea9ce4532fce7ea0b4493419-merged.mount - /var/lib/containers/storage/overlay/1bdd0a853112b3ec875d866cff438e220b7eda68ea9ce4532fce7ea0b4493419/merged
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Mon 2026-02-02 09:40:22 UTC; 44min ago
      Until: Mon 2026-02-02 09:40:22 UTC; 44min ago
      Where: /var/lib/containers/storage/overlay/1bdd0a853112b3ec875d866cff438e220b7eda68ea9ce4532fce7ea0b4493419/merged
       What: overlay

● var-lib-containers-storage-overlay-1f9e19a8220a0bc02e8b43787fec89d8a49b4998fecb8445945875c198ac4633-merged.mount - /var/lib/containers/storage/overlay/1f9e19a8220a0bc02e8b43787fec89d8a49b4998fecb8445945875c198ac4633/merged
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Mon 2026-02-02 09:42:36 UTC; 42min ago
      Until: Mon 2026-02-02 09:42:36 UTC; 42min ago
      Where: /var/lib/containers/storage/overlay/1f9e19a8220a0bc02e8b43787fec89d8a49b4998fecb8445945875c198ac4633/merged
       What: overlay

● var-lib-containers-storage-overlay-21df4144edb6a271217780204dedf7e33883c6f5dc21ec681521a0ea27488e53-merged.mount - /var/lib/containers/storage/overlay/21df4144edb6a271217780204dedf7e33883c6f5dc21ec681521a0ea27488e53/merged
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Mon 2026-02-02 09:41:06 UTC; 44min ago
      Until: Mon 2026-02-02 09:41:06 UTC; 44min ago
      Where: /var/lib/containers/storage/overlay/21df4144edb6a271217780204dedf7e33883c6f5dc21ec681521a0ea27488e53/merged
       What: overlay

● var-lib-containers-storage-overlay-2ccbbff02638865fc0d6f0d473aea6c5e5e0b4b6746c9741adf356a029a3ea69-merged.mount - /var/lib/containers/storage/overlay/2ccbbff02638865fc0d6f0d473aea6c5e5e0b4b6746c9741adf356a029a3ea69/merged
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Mon 2026-02-02 09:57:58 UTC; 27min ago
      Until: Mon 2026-02-02 09:57:58 UTC; 27min ago
      Where: /var/lib/containers/storage/overlay/2ccbbff02638865fc0d6f0d473aea6c5e5e0b4b6746c9741adf356a029a3ea69/merged
       What: overlay

● var-lib-containers-storage-overlay-3917ba9112d8423396aa79ef74dbbfbaff77004804458c96c92321e41f0dd94a-merged.mount - /var/lib/containers/storage/overlay/3917ba9112d8423396aa79ef74dbbfbaff77004804458c96c92321e41f0dd94a/merged
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Mon 2026-02-02 10:11:55 UTC; 13min ago
      Until: Mon 2026-02-02 10:11:55 UTC; 13min ago
      Where: /var/lib/containers/storage/overlay/3917ba9112d8423396aa79ef74dbbfbaff77004804458c96c92321e41f0dd94a/merged
       What: overlay

● var-lib-containers-storage-overlay-5faed3b7ced04682e9a0e0c1398f6c3e98cb49d2549bdf345236866e797fcdf7-merged.mount - /var/lib/containers/storage/overlay/5faed3b7ced04682e9a0e0c1398f6c3e98cb49d2549bdf345236866e797fcdf7/merged
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Mon 2026-02-02 09:41:33 UTC; 43min ago
      Until: Mon 2026-02-02 09:41:33 UTC; 43min ago
      Where: /var/lib/containers/storage/overlay/5faed3b7ced04682e9a0e0c1398f6c3e98cb49d2549bdf345236866e797fcdf7/merged
       What: overlay

● var-lib-containers-storage-overlay-79ccf887ef036be5e079172275f086adbd2354f062665e49d2418d03a8ee4285-merged.mount - /var/lib/containers/storage/overlay/79ccf887ef036be5e079172275f086adbd2354f062665e49d2418d03a8ee4285/merged
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Mon 2026-02-02 09:49:28 UTC; 35min ago
      Until: Mon 2026-02-02 09:49:28 UTC; 35min ago
      Where: /var/lib/containers/storage/overlay/79ccf887ef036be5e079172275f086adbd2354f062665e49d2418d03a8ee4285/merged
       What: overlay

● var-lib-containers-storage-overlay-8f0364347fd3778f4fdbef6fb687e0de8642f7f1efe2433056c217f84d0ede74-merged.mount - /var/lib/containers/storage/overlay/8f0364347fd3778f4fdbef6fb687e0de8642f7f1efe2433056c217f84d0ede74/merged
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Mon 2026-02-02 09:42:35 UTC; 42min ago
      Until: Mon 2026-02-02 09:42:35 UTC; 42min ago
      Where: /var/lib/containers/storage/overlay/8f0364347fd3778f4fdbef6fb687e0de8642f7f1efe2433056c217f84d0ede74/merged
       What: overlay

● var-lib-containers-storage-overlay-9b5a43e61efdbc66b7eba7ed51e4c81ce2a960bf771ac1f3ef2d331cf7e81a14-merged.mount - /var/lib/containers/storage/overlay/9b5a43e61efdbc66b7eba7ed51e4c81ce2a960bf771ac1f3ef2d331cf7e81a14/merged
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Mon 2026-02-02 09:42:38 UTC; 42min ago
      Until: Mon 2026-02-02 09:42:38 UTC; 42min ago
      Where: /var/lib/containers/storage/overlay/9b5a43e61efdbc66b7eba7ed51e4c81ce2a960bf771ac1f3ef2d331cf7e81a14/merged
       What: overlay

● var-lib-containers-storage-overlay-9c95a6d4b51c4ce08cde680f8c6be7628c5fd815f25ceff02daecc48343cc8d1-merged.mount - /var/lib/containers/storage/overlay/9c95a6d4b51c4ce08cde680f8c6be7628c5fd815f25ceff02daecc48343cc8d1/merged
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Mon 2026-02-02 09:39:07 UTC; 45min ago
      Until: Mon 2026-02-02 09:39:07 UTC; 45min ago
      Where: /var/lib/containers/storage/overlay/9c95a6d4b51c4ce08cde680f8c6be7628c5fd815f25ceff02daecc48343cc8d1/merged
       What: overlay

● var-lib-containers-storage-overlay-a577b7095a7f5d4d2eb851e1ce156d49c6bfa8a218830d94508f667ed7c5b7fa-merged.mount - /var/lib/containers/storage/overlay/a577b7095a7f5d4d2eb851e1ce156d49c6bfa8a218830d94508f667ed7c5b7fa/merged
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Mon 2026-02-02 09:42:09 UTC; 42min ago
      Until: Mon 2026-02-02 09:42:09 UTC; 42min ago
      Where: /var/lib/containers/storage/overlay/a577b7095a7f5d4d2eb851e1ce156d49c6bfa8a218830d94508f667ed7c5b7fa/merged
       What: overlay

● var-lib-containers-storage-overlay-c379911b585540d7d35830547936e5d60aedcf7c36282647d202eb626cf86d15-merged.mount - /var/lib/containers/storage/overlay/c379911b585540d7d35830547936e5d60aedcf7c36282647d202eb626cf86d15/merged
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Mon 2026-02-02 09:41:22 UTC; 43min ago
      Until: Mon 2026-02-02 09:41:22 UTC; 43min ago
      Where: /var/lib/containers/storage/overlay/c379911b585540d7d35830547936e5d60aedcf7c36282647d202eb626cf86d15/merged
       What: overlay

● var-lib-containers-storage-overlay-cf557a00cfcc553ab122155163a30ef37fd88f27c87764e83297c93374e6f2f2-merged.mount - /var/lib/containers/storage/overlay/cf557a00cfcc553ab122155163a30ef37fd88f27c87764e83297c93374e6f2f2/merged
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Mon 2026-02-02 09:37:46 UTC; 47min ago
      Until: Mon 2026-02-02 09:37:46 UTC; 47min ago
      Where: /var/lib/containers/storage/overlay/cf557a00cfcc553ab122155163a30ef37fd88f27c87764e83297c93374e6f2f2/merged
       What: overlay

● var-lib-containers-storage-overlay-e2031b9cceda1de38fb4d7d67acd9ce0d3f4db6123254c89b857c25e37187fdd-merged.mount - /var/lib/containers/storage/overlay/e2031b9cceda1de38fb4d7d67acd9ce0d3f4db6123254c89b857c25e37187fdd/merged
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Mon 2026-02-02 09:42:03 UTC; 43min ago
      Until: Mon 2026-02-02 09:42:03 UTC; 43min ago
      Where: /var/lib/containers/storage/overlay/e2031b9cceda1de38fb4d7d67acd9ce0d3f4db6123254c89b857c25e37187fdd/merged
       What: overlay

● var-lib-containers-storage-overlay-e2036ee02252aa9c2636df8dbfaa1c298b21cf03906444b82293233fc35f7080-merged.mount - /var/lib/containers/storage/overlay/e2036ee02252aa9c2636df8dbfaa1c298b21cf03906444b82293233fc35f7080/merged
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Mon 2026-02-02 09:40:17 UTC; 44min ago
      Until: Mon 2026-02-02 09:40:17 UTC; 44min ago
      Where: /var/lib/containers/storage/overlay/e2036ee02252aa9c2636df8dbfaa1c298b21cf03906444b82293233fc35f7080/merged
       What: overlay

● var-lib-containers-storage-overlay-f2c9cd16f6499563d8331fc3fabbb541653c7493462c4fc51518f4d6dde3fcbc-merged.mount - /var/lib/containers/storage/overlay/f2c9cd16f6499563d8331fc3fabbb541653c7493462c4fc51518f4d6dde3fcbc/merged
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Mon 2026-02-02 09:50:43 UTC; 34min ago
      Until: Mon 2026-02-02 09:50:43 UTC; 34min ago
      Where: /var/lib/containers/storage/overlay/f2c9cd16f6499563d8331fc3fabbb541653c7493462c4fc51518f4d6dde3fcbc/merged
       What: overlay

● var-lib-containers-storage-overlay-f2f37ceb4f74980d688bddcb1c28aa48f1c53d269dd02fed6509615520c371f9-merged.mount - /var/lib/containers/storage/overlay/f2f37ceb4f74980d688bddcb1c28aa48f1c53d269dd02fed6509615520c371f9/merged
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Mon 2026-02-02 09:37:47 UTC; 47min ago
      Until: Mon 2026-02-02 09:37:47 UTC; 47min ago
      Where: /var/lib/containers/storage/overlay/f2f37ceb4f74980d688bddcb1c28aa48f1c53d269dd02fed6509615520c371f9/merged
       What: overlay

● var-lib-containers-storage-overlay.mount - /var/lib/containers/storage/overlay
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Mon 2026-02-02 09:37:45 UTC; 47min ago
      Until: Mon 2026-02-02 09:37:45 UTC; 47min ago
      Where: /var/lib/containers/storage/overlay
       What: /dev/vda1

● var-lib-containers-storage-overlay\x2dcontainers-29bea7eb4976451e3dfdb1e9a3aba30b10e64cbce131bf8d09548b4a224f8ee8-userdata-shm.mount - /var/lib/containers/storage/overlay-containers/29bea7eb4976451e3dfdb1e9a3aba30b10e64cbce131bf8d09548b4a224f8ee8/userdata/shm
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Mon 2026-02-02 09:50:43 UTC; 34min ago
      Until: Mon 2026-02-02 09:50:43 UTC; 34min ago
      Where: /var/lib/containers/storage/overlay-containers/29bea7eb4976451e3dfdb1e9a3aba30b10e64cbce131bf8d09548b4a224f8ee8/userdata/shm
       What: shm

● var-lib-containers-storage-overlay\x2dcontainers-46ea9bbd396be6c9cf42e800f6ecffbc7ecf6b0a7dd6f731e2744db95bbe8ea3-userdata-shm.mount - /var/lib/containers/storage/overlay-containers/46ea9bbd396be6c9cf42e800f6ecffbc7ecf6b0a7dd6f731e2744db95bbe8ea3/userdata/shm
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Mon 2026-02-02 09:57:58 UTC; 27min ago
      Until: Mon 2026-02-02 09:57:58 UTC; 27min ago
      Where: /var/lib/containers/storage/overlay-containers/46ea9bbd396be6c9cf42e800f6ecffbc7ecf6b0a7dd6f731e2744db95bbe8ea3/userdata/shm
       What: shm

● var-lib-containers-storage-overlay\x2dcontainers-e39122d9482da8df802204ab6c35fb7c982874580968140e6f06bdfc8eefae36-userdata-shm.mount - /var/lib/containers/storage/overlay-containers/e39122d9482da8df802204ab6c35fb7c982874580968140e6f06bdfc8eefae36/userdata/shm
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Mon 2026-02-02 09:49:28 UTC; 35min ago
      Until: Mon 2026-02-02 09:49:28 UTC; 35min ago
      Where: /var/lib/containers/storage/overlay-containers/e39122d9482da8df802204ab6c35fb7c982874580968140e6f06bdfc8eefae36/userdata/shm
       What: shm

○ var-lib-machines.mount - Virtual Machine and Container Storage (Compatibility)
     Loaded: loaded (/usr/lib/systemd/system/var-lib-machines.mount; static)
     Active: inactive (dead)
  Condition: start condition failed at Mon 2026-02-02 09:54:31 UTC; 30min ago
      Where: /var/lib/machines
       What: /var/lib/machines.raw

Feb 02 09:54:31 compute-0 systemd[1]: Virtual Machine and Container Storage (Compatibility) was skipped because of an unmet condition check (ConditionPathExists=/var/lib/machines.raw).

● var-lib-nfs-rpc_pipefs.mount - RPC Pipe File System
     Loaded: loaded (/proc/self/mountinfo; static)
     Active: active (mounted) since Mon 2026-02-02 09:00:44 UTC; 1h 24min ago
      Until: Mon 2026-02-02 09:00:44 UTC; 1h 24min ago
      Where: /var/lib/nfs/rpc_pipefs
       What: rpc_pipefs

● systemd-ask-password-console.path - Dispatch Password Requests to Console Directory Watch
     Loaded: loaded (/usr/lib/systemd/system/systemd-ask-password-console.path; static)
     Active: active (waiting) since Mon 2026-02-02 09:00:44 UTC; 1h 24min ago
      Until: Mon 2026-02-02 09:00:44 UTC; 1h 24min ago
   Triggers: ● systemd-ask-password-console.service
       Docs: man:systemd-ask-password-console.path(8)

● systemd-ask-password-wall.path - Forward Password Requests to Wall Directory Watch
     Loaded: loaded (/usr/lib/systemd/system/systemd-ask-password-wall.path; static)
     Active: active (waiting) since Mon 2026-02-02 09:00:45 UTC; 1h 24min ago
      Until: Mon 2026-02-02 09:00:45 UTC; 1h 24min ago
   Triggers: ● systemd-ask-password-wall.service
       Docs: man:systemd-ask-password-wall.path(8)

● init.scope - System and Service Manager
     Loaded: loaded
  Transient: yes
     Active: active (running) since Mon 2026-02-02 09:00:41 UTC; 1h 24min ago
       Docs: man:systemd(1)
         IO: 756.0K read, 0B written
      Tasks: 1 (limit: 48560)
     Memory: 49.9M (peak: 67.3M)
        CPU: 1min 9.651s
     CGroup: /init.scope
             └─1 /usr/lib/systemd/systemd --switched-root --system --deserialize 31

Feb 02 10:24:12 compute-0 systemd[1]: libpod-conmon-848dc93cda9557f239cd4c71f3a2a0105c0319e4ba7e14dc1ba3d399ae689142.scope: Deactivated successfully.
Feb 02 10:24:12 compute-0 systemd[1]: Started libpod-conmon-2a81d223795487067ed66f2d6b051fab157fee0d4360f4f93536cc39825dbbeb.scope.
Feb 02 10:24:12 compute-0 systemd[1]: Started libcrun container.
Feb 02 10:24:13 compute-0 systemd[1]: libpod-2a81d223795487067ed66f2d6b051fab157fee0d4360f4f93536cc39825dbbeb.scope: Deactivated successfully.
Feb 02 10:24:13 compute-0 systemd[1]: libpod-2a81d223795487067ed66f2d6b051fab157fee0d4360f4f93536cc39825dbbeb.scope: Consumed 1.138s CPU time.
Feb 02 10:24:13 compute-0 systemd[1]: var-lib-containers-storage-overlay-96fc210fd1a7d459dcdfa35fe58c22b8659262809c7e28b8d8a865e13479f766-merged.mount: Deactivated successfully.
Feb 02 10:24:13 compute-0 systemd[1]: libpod-conmon-2a81d223795487067ed66f2d6b051fab157fee0d4360f4f93536cc39825dbbeb.scope: Deactivated successfully.
Feb 02 10:24:27 compute-0 systemd[1]: Started Session 59 of User zuul.
Feb 02 10:24:47 compute-0 systemd[1]: Starting Hostname Service...
Feb 02 10:24:47 compute-0 systemd[1]: Started Hostname Service.

● libpod-29bea7eb4976451e3dfdb1e9a3aba30b10e64cbce131bf8d09548b4a224f8ee8.scope - libcrun container
     Loaded: loaded (/run/systemd/transient/libpod-29bea7eb4976451e3dfdb1e9a3aba30b10e64cbce131bf8d09548b4a224f8ee8.scope; transient)
  Transient: yes
    Drop-In: /run/systemd/transient/libpod-29bea7eb4976451e3dfdb1e9a3aba30b10e64cbce131bf8d09548b4a224f8ee8.scope.d
             └─dep.conf
     Active: active (running) since Mon 2026-02-02 09:50:43 UTC; 34min ago
         IO: 14.6M read, 3.7M written
      Tasks: 10 (limit: 4096)
     Memory: 429.0M (peak: 476.2M)
        CPU: 18.292s
     CGroup: /machine.slice/libpod-29bea7eb4976451e3dfdb1e9a3aba30b10e64cbce131bf8d09548b4a224f8ee8.scope
             └─container
               ├─165361 dumb-init --single-child -- kolla_start
               ├─165364 "neutron-ovn-metadata-agent (/usr/bin/python3 /usr/bin/neutron-ovn-metadata-agent)"
               ├─165809 "neutron-ovn-metadata-agent (/usr/bin/python3 /usr/bin/neutron-ovn-metadata-agent)"
               ├─166028 /usr/bin/python3 /bin/privsep-helper --config-file /etc/neutron/neutron.conf --config-dir /etc/neutron.conf.d --privsep_context neutron.privileged.namespace_cmd --privsep_sock_path /tmp/tmp_26sdv4_/privsep.sock
               ├─257524 /usr/bin/python3 /bin/privsep-helper --config-file /etc/neutron/neutron.conf --config-dir /etc/neutron.conf.d --privsep_context neutron.privileged.default --privsep_sock_path /tmp/tmpq3jugzpc/privsep.sock
               └─257582 /usr/bin/python3 /bin/privsep-helper --config-file /etc/neutron/neutron.conf --config-dir /etc/neutron.conf.d --privsep_context neutron.privileged.link_cmd --privsep_sock_path /tmp/tmpjv70h06s/privsep.sock

Feb 02 10:08:51 compute-0 podman[266799]: 2026-02-02 10:08:51.281741708 +0000 UTC m=+0.077370899 container died 06d11ed54b4b61d6bf104d4ff4f5ea6a1eece6cae636763afaad86da4b7f9416 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn@sha256:7c305a77ab65247f0dc2ea1616c427b173cb95f37bb37e34c631d9615a73d2cc, name=neutron-haproxy-ovnmeta-e125f54e-7556-49c5-8356-e7390df43c53, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, tcib_build_tag=b85d0548925081ae8c6bdd697658cec4, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20260127, org.label-schema.vendor=CentOS, tcib_managed=true)
Feb 02 10:08:51 compute-0 podman[266799]: 2026-02-02 10:08:51.338628914 +0000 UTC m=+0.134258145 container cleanup 06d11ed54b4b61d6bf104d4ff4f5ea6a1eece6cae636763afaad86da4b7f9416 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn@sha256:7c305a77ab65247f0dc2ea1616c427b173cb95f37bb37e34c631d9615a73d2cc, name=neutron-haproxy-ovnmeta-e125f54e-7556-49c5-8356-e7390df43c53, io.buildah.version=1.41.3, org.label-schema.build-date=20260127, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, tcib_managed=true, org.label-schema.name=CentOS Stream 9 Base Image, maintainer=OpenStack Kubernetes Operator team, org.label-schema.license=GPLv2, tcib_build_tag=b85d0548925081ae8c6bdd697658cec4)
Feb 02 10:08:51 compute-0 podman[266858]: 2026-02-02 10:08:51.418921699 +0000 UTC m=+0.052326786 container remove 06d11ed54b4b61d6bf104d4ff4f5ea6a1eece6cae636763afaad86da4b7f9416 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn@sha256:7c305a77ab65247f0dc2ea1616c427b173cb95f37bb37e34c631d9615a73d2cc, name=neutron-haproxy-ovnmeta-e125f54e-7556-49c5-8356-e7390df43c53, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, tcib_build_tag=b85d0548925081ae8c6bdd697658cec4, io.buildah.version=1.41.3, org.label-schema.build-date=20260127, maintainer=OpenStack Kubernetes Operator team, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, tcib_managed=true)
Feb 02 10:12:43 compute-0 podman[272123]: 2026-02-02 10:12:43.603491606 +0000 UTC m=+0.048082272 container create 94ab34e156d899c5dccf08cc1987499e0916f0581aa92549dd5e0fa5e98eb192 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn@sha256:7c305a77ab65247f0dc2ea1616c427b173cb95f37bb37e34c631d9615a73d2cc, name=neutron-haproxy-ovnmeta-31e2c386-2e8c-4f03-82cf-3176ce6f5a71, org.label-schema.schema-version=1.0, maintainer=OpenStack Kubernetes Operator team, io.buildah.version=1.41.3, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, tcib_managed=true, org.label-schema.build-date=20260127, org.label-schema.license=GPLv2, tcib_build_tag=b85d0548925081ae8c6bdd697658cec4)
Feb 02 10:12:43 compute-0 podman[272123]: 2026-02-02 10:12:43.57640663 +0000 UTC m=+0.020997356 image pull 19964fda6b912d3d57e21b0bcc221725d936e513025030cb508474fe04b06af8 quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn@sha256:7c305a77ab65247f0dc2ea1616c427b173cb95f37bb37e34c631d9615a73d2cc
Feb 02 10:12:43 compute-0 podman[272123]: 2026-02-02 10:12:43.688227277 +0000 UTC m=+0.132817973 container init 94ab34e156d899c5dccf08cc1987499e0916f0581aa92549dd5e0fa5e98eb192 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn@sha256:7c305a77ab65247f0dc2ea1616c427b173cb95f37bb37e34c631d9615a73d2cc, name=neutron-haproxy-ovnmeta-31e2c386-2e8c-4f03-82cf-3176ce6f5a71, org.label-schema.vendor=CentOS, tcib_build_tag=b85d0548925081ae8c6bdd697658cec4, tcib_managed=true, org.label-schema.build-date=20260127, org.label-schema.schema-version=1.0, io.buildah.version=1.41.3, org.label-schema.license=GPLv2, maintainer=OpenStack Kubernetes Operator team, org.label-schema.name=CentOS Stream 9 Base Image)
Feb 02 10:12:43 compute-0 podman[272123]: 2026-02-02 10:12:43.6936316 +0000 UTC m=+0.138222296 container start 94ab34e156d899c5dccf08cc1987499e0916f0581aa92549dd5e0fa5e98eb192 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn@sha256:7c305a77ab65247f0dc2ea1616c427b173cb95f37bb37e34c631d9615a73d2cc, name=neutron-haproxy-ovnmeta-31e2c386-2e8c-4f03-82cf-3176ce6f5a71, org.label-schema.schema-version=1.0, io.buildah.version=1.41.3, org.label-schema.vendor=CentOS, tcib_build_tag=b85d0548925081ae8c6bdd697658cec4, org.label-schema.build-date=20260127, org.label-schema.license=GPLv2, maintainer=OpenStack Kubernetes Operator team, org.label-schema.name=CentOS Stream 9 Base Image, tcib_managed=true)
Feb 02 10:13:05 compute-0 podman[272803]: 2026-02-02 10:13:05.329292357 +0000 UTC m=+0.059625908 container died 94ab34e156d899c5dccf08cc1987499e0916f0581aa92549dd5e0fa5e98eb192 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn@sha256:7c305a77ab65247f0dc2ea1616c427b173cb95f37bb37e34c631d9615a73d2cc, name=neutron-haproxy-ovnmeta-31e2c386-2e8c-4f03-82cf-3176ce6f5a71, org.label-schema.schema-version=1.0, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20260127, tcib_build_tag=b85d0548925081ae8c6bdd697658cec4, io.buildah.version=1.41.3, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, tcib_managed=true, org.label-schema.license=GPLv2)
Feb 02 10:13:05 compute-0 podman[272803]: 2026-02-02 10:13:05.377787139 +0000 UTC m=+0.108120690 container cleanup 94ab34e156d899c5dccf08cc1987499e0916f0581aa92549dd5e0fa5e98eb192 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn@sha256:7c305a77ab65247f0dc2ea1616c427b173cb95f37bb37e34c631d9615a73d2cc, name=neutron-haproxy-ovnmeta-31e2c386-2e8c-4f03-82cf-3176ce6f5a71, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, tcib_managed=true, org.label-schema.build-date=20260127, tcib_build_tag=b85d0548925081ae8c6bdd697658cec4, io.buildah.version=1.41.3, org.label-schema.name=CentOS Stream 9 Base Image, maintainer=OpenStack Kubernetes Operator team, org.label-schema.license=GPLv2)
Feb 02 10:13:05 compute-0 podman[272883]: 2026-02-02 10:13:05.449527577 +0000 UTC m=+0.049641604 container remove 94ab34e156d899c5dccf08cc1987499e0916f0581aa92549dd5e0fa5e98eb192 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn@sha256:7c305a77ab65247f0dc2ea1616c427b173cb95f37bb37e34c631d9615a73d2cc, name=neutron-haproxy-ovnmeta-31e2c386-2e8c-4f03-82cf-3176ce6f5a71, org.label-schema.vendor=CentOS, tcib_managed=true, org.label-schema.build-date=20260127, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team, tcib_build_tag=b85d0548925081ae8c6bdd697658cec4, org.label-schema.license=GPLv2)

● libpod-46ea9bbd396be6c9cf42e800f6ecffbc7ecf6b0a7dd6f731e2744db95bbe8ea3.scope - libcrun container
     Loaded: loaded (/run/systemd/transient/libpod-46ea9bbd396be6c9cf42e800f6ecffbc7ecf6b0a7dd6f731e2744db95bbe8ea3.scope; transient)
  Transient: yes
     Active: active (running) since Mon 2026-02-02 09:57:59 UTC; 27min ago
         IO: 34.4M read, 41.7M written
      Tasks: 27 (limit: 4096)
     Memory: 366.6M (peak: 455.7M)
        CPU: 59.592s
     CGroup: /machine.slice/libpod-46ea9bbd396be6c9cf42e800f6ecffbc7ecf6b0a7dd6f731e2744db95bbe8ea3.scope
             └─container
               ├─252674 dumb-init --single-child -- kolla_start
               ├─252676 /usr/bin/python3 /usr/bin/nova-compute
               ├─257381 /usr/bin/python3 /bin/privsep-helper --config-file /etc/nova/nova.conf --config-file /etc/nova/nova-compute.conf --config-dir /etc/nova/nova.conf.d --privsep_context vif_plug_ovs.privsep.vif_plug --privsep_sock_path /tmp/tmpwhhqa15o/privsep.sock
               └─258300 /usr/bin/python3 /bin/privsep-helper --config-file /etc/nova/nova.conf --config-file /etc/nova/nova-compute.conf --config-dir /etc/nova/nova.conf.d --privsep_context nova.privsep.sys_admin_pctxt --privsep_sock_path /tmp/tmp683yhkaj/privsep.sock

Feb 02 09:57:59 compute-0 systemd[1]: Started libcrun container.

● libpod-e39122d9482da8df802204ab6c35fb7c982874580968140e6f06bdfc8eefae36.scope - libcrun container
     Loaded: loaded (/run/systemd/transient/libpod-e39122d9482da8df802204ab6c35fb7c982874580968140e6f06bdfc8eefae36.scope; transient)
  Transient: yes
    Drop-In: /run/systemd/transient/libpod-e39122d9482da8df802204ab6c35fb7c982874580968140e6f06bdfc8eefae36.scope.d
             └─dep.conf
     Active: active (running) since Mon 2026-02-02 09:49:28 UTC; 35min ago
         IO: 5.0M read, 4.0K written
      Tasks: 6 (limit: 4096)
     Memory: 14.4M (peak: 17.4M)
        CPU: 4.110s
     CGroup: /machine.slice/libpod-e39122d9482da8df802204ab6c35fb7c982874580968140e6f06bdfc8eefae36.scope
             └─container
               ├─154633 dumb-init --single-child -- kolla_start
               └─154636 /usr/bin/ovn-controller --pidfile unix:/run/openvswitch/db.sock -p /etc/pki/tls/private/ovndb.key -c /etc/pki/tls/certs/ovndb.crt -C /etc/pki/tls/certs/ovndbca.crt

Feb 02 09:49:28 compute-0 systemd[1]: Started libcrun container.

● session-1.scope - Session 1 of User zuul
     Loaded: loaded (/run/systemd/transient/session-1.scope; transient)
  Transient: yes
     Active: active (abandoned) since Mon 2026-02-02 09:01:14 UTC; 1h 23min ago
         IO: 0B read, 0B written
      Tasks: 1
     Memory: 17.1M (peak: 39.1M)
        CPU: 1min 13.778s
     CGroup: /user.slice/user-1000.slice/session-1.scope
             └─4532 /usr/bin/python3

Feb 02 09:03:08 np0005604790.novalocal python3[7145]: ansible-ansible.legacy.copy Invoked with src=/home/zuul/.ansible/tmp/ansible-tmp-1770022988.1670833-104-194039166377788/source dest=/etc/NetworkManager/system-connections/ci-private-network.nmconnection mode=0600 owner=root group=root follow=False _original_basename=bootstrap-ci-network-nm-connection.nmconnection.j2 checksum=5f648ca94637025cdc122ee5c24b92611ec4e7e4 backup=False force=True unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None remote_src=None local_follow=None seuser=None serole=None selevel=None setype=None attributes=None
Feb 02 09:03:08 np0005604790.novalocal sudo[7143]: pam_unix(sudo:session): session closed for user root
Feb 02 09:03:09 np0005604790.novalocal sudo[7193]:     zuul : PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-imujdhykssptskzlucvtthovqbtvomre ; OS_CLOUD=vexxhost /usr/bin/python3'
Feb 02 09:03:09 np0005604790.novalocal sudo[7193]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Feb 02 09:03:09 np0005604790.novalocal python3[7195]: ansible-ansible.builtin.systemd Invoked with name=NetworkManager state=restarted daemon_reload=False daemon_reexec=False scope=system no_block=False enabled=None force=None masked=None
Feb 02 09:03:09 np0005604790.novalocal sudo[7193]: pam_unix(sudo:session): session closed for user root
Feb 02 09:03:10 np0005604790.novalocal python3[7279]: ansible-ansible.legacy.command Invoked with _raw_params=ip route zuul_log_id=fa163ec2-ffbe-26e4-4fa2-0000000000bd-0-controller zuul_ansible_split_streams=False _uses_shell=False warn=False stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None
Feb 02 09:04:10 np0005604790.novalocal sshd-session[4329]: Received disconnect from 38.102.83.114 port 42886:11: disconnected by user
Feb 02 09:04:10 np0005604790.novalocal sshd-session[4329]: Disconnected from user zuul 38.102.83.114 port 42886
Feb 02 09:04:10 np0005604790.novalocal sshd-session[4316]: pam_unix(sshd:session): session closed for user zuul

● session-37.scope - Session 37 of User ceph-admin
     Loaded: loaded (/run/systemd/transient/session-37.scope; transient)
  Transient: yes
     Active: active (running) since Mon 2026-02-02 09:42:17 UTC; 42min ago
         IO: 28.0K read, 137.2M written
      Tasks: 2
     Memory: 3.7M (peak: 54.0M)
        CPU: 3min 22.323s
     CGroup: /user.slice/user-42477.slice/session-37.scope
             ├─100495 "sshd-session: ceph-admin [priv]"
             └─100520 "sshd-session: ceph-admin@notty"

Feb 02 10:24:13 compute-0 sudo[290977]: pam_unix(sudo:session): session closed for user root
Feb 02 10:24:23 compute-0 sudo[291083]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Feb 02 10:24:23 compute-0 sudo[291083]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Feb 02 10:24:23 compute-0 sudo[291083]: pam_unix(sudo:session): session closed for user root
Feb 02 10:24:43 compute-0 sudo[293064]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Feb 02 10:24:43 compute-0 sudo[293064]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Feb 02 10:24:43 compute-0 sudo[293064]: pam_unix(sudo:session): session closed for user root
Feb 02 10:25:03 compute-0 sudo[297283]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Feb 02 10:25:03 compute-0 sudo[297283]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Feb 02 10:25:03 compute-0 sudo[297283]: pam_unix(sudo:session): session closed for user root

● session-59.scope - Session 59 of User zuul
     Loaded: loaded (/run/systemd/transient/session-59.scope; transient)
  Transient: yes
     Active: active (running) since Mon 2026-02-02 10:24:27 UTC; 39s ago
         IO: 6.0M read, 63.4M written
      Tasks: 31
     Memory: 463.1M (peak: 514.2M)
        CPU: 1min 55.181s
     CGroup: /user.slice/user-1000.slice/session-59.scope
             ├─291112 "sshd-session: zuul [priv]"
             ├─291116 "sshd-session: zuul@notty"
             ├─291117 sudo bash -c "rm -rf /var/tmp/sos-osp && mkdir /var/tmp/sos-osp && sos report --batch --all-logs --tmp-dir=/var/tmp/sos-osp  -p container,openstack_edpm,system,storage,virt"
             ├─291141 /usr/bin/python3 -s /sbin/sos report --batch --all-logs --tmp-dir=/var/tmp/sos-osp -p container,openstack_edpm,system,storage,virt
             ├─297141 timeout 15s turbostat --debug sleep 10
             ├─297541 timeout 300s ceph osd blocked-by --format json-pretty
             ├─297542 /usr/bin/python3 -s /usr/bin/ceph osd blocked-by --format json-pretty
             ├─297545 timeout 300s systemctl status --all
             ├─297547 systemctl status --all
             ├─297566 timeout 300s semanage interface -l
             └─297567 /usr/bin/python3 -EsI /usr/sbin/semanage interface -l

Feb 02 10:24:27 compute-0 systemd[1]: Started Session 59 of User zuul.
Feb 02 10:24:27 compute-0 sudo[291117]:     zuul : PWD=/home/zuul ; USER=root ; COMMAND=/bin/bash -c 'rm -rf /var/tmp/sos-osp && mkdir /var/tmp/sos-osp && sos report --batch --all-logs --tmp-dir=/var/tmp/sos-osp  -p container,openstack_edpm,system,storage,virt'
Feb 02 10:24:27 compute-0 sudo[291117]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Feb 02 10:24:43 compute-0 crontab[293123]: (root) LIST (root)
Feb 02 10:24:59 compute-0 ovs-appctl[295930]: ovs|00001|daemon_unix|WARN|/var/run/openvswitch/ovs-monitor-ipsec.pid: open: No such file or directory

○ 29bea7eb4976451e3dfdb1e9a3aba30b10e64cbce131bf8d09548b4a224f8ee8-70356c1a3b924523.service - /usr/bin/podman healthcheck run 29bea7eb4976451e3dfdb1e9a3aba30b10e64cbce131bf8d09548b4a224f8ee8
     Loaded: loaded (/run/systemd/transient/29bea7eb4976451e3dfdb1e9a3aba30b10e64cbce131bf8d09548b4a224f8ee8-70356c1a3b924523.service; transient)
  Transient: yes
     Active: inactive (dead) since Mon 2026-02-02 10:25:01 UTC; 5s ago
   Duration: 77ms
TriggeredBy: ● 29bea7eb4976451e3dfdb1e9a3aba30b10e64cbce131bf8d09548b4a224f8ee8-70356c1a3b924523.timer
    Process: 296714 ExecStart=/usr/bin/podman healthcheck run 29bea7eb4976451e3dfdb1e9a3aba30b10e64cbce131bf8d09548b4a224f8ee8 (code=exited, status=0/SUCCESS)
   Main PID: 296714 (code=exited, status=0/SUCCESS)
        CPU: 81ms

Feb 02 10:25:01 compute-0 podman[296714]: 2026-02-02 10:25:01.343555891 +0000 UTC m=+0.055312007 container health_status 29bea7eb4976451e3dfdb1e9a3aba30b10e64cbce131bf8d09548b4a224f8ee8 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn@sha256:7c305a77ab65247f0dc2ea1616c427b173cb95f37bb37e34c631d9615a73d2cc, name=ovn_metadata_agent, health_status=healthy, health_failing_streak=0, health_log=, tcib_build_tag=b85d0548925081ae8c6bdd697658cec4, config_id=ovn_metadata_agent, maintainer=OpenStack Kubernetes Operator team, config_data={'cgroupns': 'host', 'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'EDPM_CONFIG_HASH': 'db4758ee7523fe447444c4bd2b867Unit apparmor.service could not be found.
Unit apt-daily.service could not be found.
Unit auto-cpufreq.service could not be found.
Unit autofs.service could not be found.
b543b1eee4e3bbcf6676cd1b27bf6147d86-65dc6d8b666d074a9e865f271939acafedbf905c57c15ae47c3f2766afb95121-65dc6d8b666d074a9e865f271939acafedbf905c57c15ae47c3f2766afb95121-65dc6d8b666d074a9e865f271939acafedbf905c57c15ae47c3f2766afb95121-0823bd3e096c75f72e4a95820d41b0d4b6a1172bd2892ddb9f29b788a11bc87d'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_metadata_agent', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn@sha256:7c305a77ab65247f0dc2ea1616c427b173cb95f37bb37e34c631d9615a73d2cc', 'net': 'host', 'pid': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/run/openvswitch:/run/openvswitch:z', '/var/lib/openstack/neutron-ovn-metadata-agent:/etc/neutron.conf.d:z', '/run/netns:/run/netns:shared', '/var/lib/kolla/config_files/ovn_metadata_agent.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/neutron:/var/lib/neutron:shared,z', '/var/lib/neutron/ovn_metadata_haproxy_wrapper:/usr/local/bin/haproxy:ro', '/var/lib/neutron/kill_scripts:/etc/neutron/kill_scripts:ro', '/var/lib/openstack/cacerts/neutron-metadata/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_metadata_agent:/openstack:ro,z']}, container_name=ovn_metadata_agent, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, managed_by=edpm_ansible, org.label-schema.build-date=20260127, tcib_managed=true, io.buildah.version=1.41.3, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0)

● auditd.service - Security Auditing Service
     Loaded: loaded (/usr/lib/systemd/system/auditd.service; enabled; preset: enabled)
     Active: active (running) since Mon 2026-02-02 09:00:45 UTC; 1h 24min ago
       Docs: man:auditd(8)
             https://github.com/linux-audit/audit-documentation
   Main PID: 701 (auditd)
         IO: 4.0K read, 23.3M written
      Tasks: 4 (limit: 48560)
     Memory: 16.3M (peak: 16.7M)
        CPU: 5.612s
     CGroup: /system.slice/auditd.service
             ├─701 /sbin/auditd
             └─703 /usr/sbin/sedispatch

Feb 02 09:00:45 localhost augenrules[721]: failure 1
Feb 02 09:00:45 localhost augenrules[721]: pid 701
Feb 02 09:00:45 localhost augenrules[721]: rate_limit 0
Feb 02 09:00:45 localhost augenrules[721]: backlog_limit 8192
Feb 02 09:00:45 localhost augenrules[721]: lost 0
Feb 02 09:00:45 localhost augenrules[721]: backlog 3
Feb 02 09:00:45 localhost augenrules[721]: backlog_wait_time 60000
Feb 02 09:00:45 localhost augenrules[721]: backlog_wait_time_actual 0
Feb 02 09:00:45 localhost systemd[1]: Started Security Auditing Service.
Feb 02 09:53:23 compute-0 auditd[701]: Audit daemon rotating log files

○ auth-rpcgss-module.service - Kernel Module supporting RPCSEC_GSS
     Loaded: loaded (/usr/lib/systemd/system/auth-rpcgss-module.service; static)
     Active: inactive (dead)
  Condition: start condition failed at Mon 2026-02-02 09:00:45 UTC; 1h 24min ago

○ blk-availability.service - Availability of block devices
     Loaded: loaded (/usr/lib/systemd/system/blk-availability.service; disabled; preset: disabled)
     Active: inactive (dead)

● ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1@alertmanager.compute-0.service - Ceph alertmanager.compute-0 for d241d473-9fcb-5f74-b163-f1ca4454e7f1
     Loaded: loaded (/etc/systemd/system/ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1@.service; enabled; preset: disabled)
     Active: active (running) since Mon 2026-02-02 09:42:37 UTC; 42min ago
   Main PID: 104366 (conmon)
         IO: 3.0M read, 245.0K written
      Tasks: 14 (limit: 48560)
     Memory: 31.0M (peak: 42.0M)
        CPU: 5.813s
     CGroup: /system.slice/system-ceph\x2dd241d473\x2d9fcb\x2d5f74\x2db163\x2df1ca4454e7f1.slice/ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1@alertmanager.compute-0.service
             ├─libpod-payload-63a3970896ab11bd3cbece1b971a05452b0bd8a5b643f0eac52d5b3639ab19c4
             │ ├─104368 /run/podman-init -- /bin/alertmanager --cluster.listen-address=:9094 --web.listen-address=192.168.122.100:9093 --config.file=/etc/alertmanager/alertmanager.yml
             │ └─104370 /bin/alertmanager --cluster.listen-address=:9094 --web.listen-address=192.168.122.100:9093 --config.file=/etc/alertmanager/alertmanager.yml
             └─runtime
               └─104366 /usr/bin/conmon --api-version 1 -c 63a3970896ab11bd3cbece1b971a05452b0bd8a5b643f0eac52d5b3639ab19c4 -u 63a3970896ab11bd3cbece1b971a05452b0bd8a5b643f0eac52d5b3639ab19c4 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/63a3970896ab11bd3cbece1b971a05452b0bd8a5b643f0eac52d5b3639ab19c4/userdata -p /run/containers/storage/overlay-containers/63a3970896ab11bd3cbece1b971a05452b0bd8a5b643f0eac52d5b3639ab19c4/userdata/pidfile -n ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1-alertmanager-compute-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/63a3970896ab11bd3cbece1b971a05452b0bd8a5b643f0eac52d5b3639ab19c4 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/63a3970896ab11bd3cbece1b971a05452b0bd8a5b643f0eac52d5b3639ab19c4/userdata/oci-log --conmon-pidfile /run/ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1@alertmanager.compute-0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 63a3970896ab11bd3cbece1b971a05452b0bd8a5b643f0eac52d5b3639ab19c4

Feb 02 10:24:37 compute-0 ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1-alertmanager-compute-0[104366]: ts=2026-02-02T10:24:37.270Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=1 err="ceph-dashboard/webhook[2]: notify retry canceled after 2 attempts: Post \"http://compute-2.ctlplane.example.com:8443/api/prometheus_receiver\": context deadline exceeded; ceph-dashboard/webhook[1]: notify retry canceled after 2 attempts: Post \"http://compute-1.ctlplane.example.com:8443/api/prometheus_receiver\": context deadline exceeded"
Feb 02 10:24:38 compute-0 ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1-alertmanager-compute-0[104366]: ts=2026-02-02T10:24:38.914Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"http://compute-1.ctlplane.example.com:8443/api/prometheus_receiver\": dial tcp 192.168.122.101:8443: i/o timeout"
Feb 02 10:24:38 compute-0 ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1-alertmanager-compute-0[104366]: ts=2026-02-02T10:24:38.915Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[2] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"http://compute-2.ctlplane.example.com:8443/api/prometheus_receiver\": dial tcp 192.168.122.102:8443: i/o timeout"
Feb 02 10:24:38 compute-0 ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1-alertmanager-compute-0[104366]: ts=2026-02-02T10:24:38.915Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=1 err="ceph-dashboard/webhook[1]: notify retry canceled after 3 attempts: Post \"http://compute-1.ctlplane.example.com:8443/api/prometheus_receiver\": context deadline exceeded; ceph-dashboard/webhook[2]: notify retry canceled after 2 attempts: Post \"http://compute-2.ctlplane.example.com:8443/api/prometheus_receiver\": dial tcp 192.168.122.102:8443: i/o timeout"
Feb 02 10:24:47 compute-0 ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1-alertmanager-compute-0[104366]: ts=2026-02-02T10:24:47.270Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=1 err="ceph-dashboard/webhook[2]: notify retry canceled after 2 attempts: Post \"http://compute-2.ctlplane.example.com:8443/api/prometheus_receiver\": context deadline exceeded; ceph-dashboard/webhook[1]: notify retry canceled after 2 attempts: Post \"http://compute-1.ctlplane.example.com:8443/api/prometheus_receiver\": context deadline exceeded"
Feb 02 10:24:48 compute-0 ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1-alertmanager-compute-0[104366]: ts=2026-02-02T10:24:48.917Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[2] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"http://compute-2.ctlplane.example.com:8443/api/prometheus_receiver\": dial tcp 192.168.122.102:8443: i/o timeout"
Feb 02 10:24:48 compute-0 ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1-alertmanager-compute-0[104366]: ts=2026-02-02T10:24:48.917Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"http://compute-1.ctlplane.example.com:8443/api/prometheus_receiver\": dial tcp 192.168.122.101:8443: i/o timeout"
Feb 02 10:24:48 compute-0 ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1-alertmanager-compute-0[104366]: ts=2026-02-02T10:24:48.917Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=1 err="ceph-dashboard/webhook[2]: notify retry canceled after 2 attempts: Post \"http://compute-2.ctlplane.example.com:8443/api/prometheus_receiver\": dial tcp 192.168.122.102:8443: i/o timeout; ceph-dashboard/webhook[1]: notify retry canceled after 2 attempts: Post \"http://compute-1.ctlplane.example.com:8443/api/prometheus_receiver\": dial tcp 192.168.122.101:8443: i/o timeout"
Feb 02 10:24:57 compute-0 ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1-alertmanager-compute-0[104366]: ts=2026-02-02T10:24:57.273Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=1 err="ceph-dashboard/webhook[2]: notify retry canceled after 2 attempts: Post \"http://compute-2.ctlplane.example.com:8443/api/prometheus_receiver\": context deadline exceeded; ceph-dashboard/webhook[1]: notify retry canceled after 2 attempts: Post \"http://compute-1.ctlplane.example.com:8443/api/prometheus_receiver\": context deadline exceeded"
Feb 02 10:24:58 compute-0 ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1-alertmanager-compute-0[104366]: ts=2026-02-02T10:24:58.917Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=1 err="ceph-dashboard/webhook[2]: notify retry canceled after 2 attempts: Post \"http://compute-2.ctlplane.example.com:8443/api/prometheus_receiver\": context deadline exceeded; ceph-dashboard/webhook[1]: notify retry canceled after 2 attempts: Post \"http://compute-1.ctlplane.example.com:8443/api/prometheus_receiver\": context deadline exceeded"

● ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1@crash.compute-0.service - Ceph crash.compute-0 for d241d473-9fcb-5f74-b163-f1ca4454e7f1
     Loaded: loaded (/etc/systemd/system/ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1@.service; enabled; preset: disabled)
     Active: active (running) since Mon 2026-02-02 09:38:22 UTC; 46min ago
   Main PID: 79739 (conmon)
         IO: 0B read, 176.0K written
      Tasks: 3 (limit: 48560)
     Memory: 7.7M (peak: 24.4M)
        CPU: 573ms
     CGroup: /system.slice/system-ceph\x2dd241d473\x2d9fcb\x2d5f74\x2db163\x2df1ca4454e7f1.slice/ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1@crash.compute-0.service
             ├─libpod-payload-318ef38b81cae6eaaebf216bc863b04a4bef5216fba3bfba4e81b73ac8904bc2
             │ ├─79741 /run/podman-init -- /usr/bin/ceph-crash -n client.crash.compute-0
             │ └─79743 /usr/bin/python3 -s /usr/bin/ceph-crash -n client.crash.compute-0
             └─runtime
               └─79739 /usr/bin/conmon --api-version 1 -c 318ef38b81cae6eaaebf216bc863b04a4bef5216fba3bfba4e81b73ac8904bc2 -u 318ef38b81cae6eaaebf216bc863b04a4bef5216fba3bfba4e81b73ac8904bc2 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/318ef38b81cae6eaaebf216bc863b04a4bef5216fba3bfba4e81b73ac8904bc2/userdata -p /run/containers/storage/overlay-containers/318ef38b81cae6eaaebf216bc863b04a4bef5216fba3bfba4e81b73ac8904bc2/userdata/pidfile -n ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1-crash-compute-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/318ef38b81cae6eaaebf216bc863b04a4bef5216fba3bfba4e81b73ac8904bc2 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/318ef38b81cae6eaaebf216bc863b04a4bef5216fba3bfba4e81b73ac8904bc2/userdata/oci-log --conmon-pidfile /run/ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1@crash.compute-0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 318ef38b81cae6eaaebf216bc863b04a4bef5216fba3bfba4e81b73ac8904bc2

Feb 02 09:38:22 compute-0 ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1-crash-compute-0[79739]: 2026-02-02T09:38:22.231+0000 7f1b36d78640 -1 auth: unable to find a keyring on /etc/ceph/ceph.client.admin.keyring,/etc/ceph/ceph.keyring,/etc/ceph/keyring,/etc/ceph/keyring.bin: (2) No such file or directory
Feb 02 09:38:22 compute-0 ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1-crash-compute-0[79739]: 2026-02-02T09:38:22.231+0000 7f1b36d78640 -1 AuthRegistry(0x7f1b36d76ff0) no keyring found at /etc/ceph/ceph.client.admin.keyring,/etc/ceph/ceph.keyring,/etc/ceph/keyring,/etc/ceph/keyring.bin, disabling cephx
Feb 02 09:38:22 compute-0 ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1-crash-compute-0[79739]: 2026-02-02T09:38:22.232+0000 7f1b34aed640 -1 monclient(hunting): handle_auth_bad_method server allowed_methods [2] but i only support [1]
Feb 02 09:38:22 compute-0 ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1-crash-compute-0[79739]: 2026-02-02T09:38:22.232+0000 7f1b36d78640 -1 monclient: authenticate NOTE: no keyring found; disabled cephx authentication
Feb 02 09:38:22 compute-0 ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1-crash-compute-0[79739]: [errno 13] RADOS permission denied (error connecting to the cluster)
Feb 02 09:38:22 compute-0 ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1-crash-compute-0[79739]: INFO:ceph-crash:monitoring path /var/lib/ceph/crash, delay 600s
Feb 02 09:48:22 compute-0 ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1-crash-compute-0[79739]: ERROR:ceph-crash:Error scraping /var/lib/ceph/crash: [Errno 13] Permission denied: '/var/lib/ceph/crash'
Feb 02 09:58:22 compute-0 ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1-crash-compute-0[79739]: ERROR:ceph-crash:Error scraping /var/lib/ceph/crash: [Errno 13] Permission denied: '/var/lib/ceph/crash'
Feb 02 10:08:22 compute-0 ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1-crash-compute-0[79739]: ERROR:ceph-crash:Error scraping /var/lib/ceph/crash: [Errno 13] Permission denied: '/var/lib/ceph/crash'
Feb 02 10:18:22 compute-0 ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1-crash-compute-0[79739]: ERROR:ceph-crash:Error scraping /var/lib/ceph/crash: [Errno 13] Permission denied: '/var/lib/ceph/crash'

● ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1@grafana.compute-0.service - Ceph grafana.compute-0 for d241d473-9fcb-5f74-b163-f1ca4454e7f1
     Loaded: loaded (/etc/systemd/system/ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1@.service; enabled; preset: disabled)
     Active: active (running) since Mon 2026-02-02 09:42:39 UTC; 42min ago
   Main PID: 104673 (conmon)
         IO: 24.9M read, 297.0K written
      Tasks: 19 (limit: 48560)
     Memory: 107.3M (peak: 147.8M)
        CPU: 37.913s
     CGroup: /system.slice/system-ceph\x2dd241d473\x2d9fcb\x2d5f74\x2db163\x2df1ca4454e7f1.slice/ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1@grafana.compute-0.service
             ├─libpod-payload-207575e5b32cec4e058e0ca4f48bad94c6e447fc9aa765a0aa8117f4604125b2
             │ ├─104675 /run/podman-init -- /run.sh
             │ └─104677 grafana server --homepath=/usr/share/grafana --config=/etc/grafana/grafana.ini --packaging=docker cfg:default.log.mode=console cfg:default.paths.data=/var/lib/grafana cfg:default.paths.logs=/var/log/grafana cfg:default.paths.plugins=/var/lib/grafana/plugins cfg:default.paths.provisioning=/etc/grafana/provisioning
             └─runtime
               └─104673 /usr/bin/conmon --api-version 1 -c 207575e5b32cec4e058e0ca4f48bad94c6e447fc9aa765a0aa8117f4604125b2 -u 207575e5b32cec4e058e0ca4f48bad94c6e447fc9aa765a0aa8117f4604125b2 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/207575e5b32cec4e058e0ca4f48bad94c6e447fc9aa765a0aa8117f4604125b2/userdata -p /run/containers/storage/overlay-containers/207575e5b32cec4e058e0ca4f48bad94c6e447fc9aa765a0aa8117f4604125b2/userdata/pidfile -n ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1-grafana-compute-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/207575e5b32cec4e058e0ca4f48bad94c6e447fc9aa765a0aa8117f4604125b2 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/207575e5b32cec4e058e0ca4f48bad94c6e447fc9aa765a0aa8117f4604125b2/userdata/oci-log --conmon-pidfile /run/ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1@grafana.compute-0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 207575e5b32cec4e058e0ca4f48bad94c6e447fc9aa765a0aa8117f4604125b2

Feb 02 10:02:39 compute-0 ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1-grafana-compute-0[104673]: logger=cleanup t=2026-02-02T10:02:39.506562222Z level=info msg="Completed cleanup jobs" duration=39.241957ms
Feb 02 10:02:39 compute-0 ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1-grafana-compute-0[104673]: logger=grafana.update.checker t=2026-02-02T10:02:39.591577991Z level=info msg="Update check succeeded" duration=49.639635ms
Feb 02 10:02:39 compute-0 ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1-grafana-compute-0[104673]: logger=plugins.update.checker t=2026-02-02T10:02:39.597775267Z level=info msg="Update check succeeded" duration=53.033456ms
Feb 02 10:12:39 compute-0 ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1-grafana-compute-0[104673]: logger=cleanup t=2026-02-02T10:12:39.491304851Z level=info msg="Completed cleanup jobs" duration=23.875611ms
Feb 02 10:12:39 compute-0 ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1-grafana-compute-0[104673]: logger=plugins.update.checker t=2026-02-02T10:12:39.593651488Z level=info msg="Update check succeeded" duration=48.991286ms
Feb 02 10:12:39 compute-0 ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1-grafana-compute-0[104673]: logger=grafana.update.checker t=2026-02-02T10:12:39.61301736Z level=info msg="Update check succeeded" duration=70.944316ms
Feb 02 10:14:24 compute-0 ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1-grafana-compute-0[104673]: logger=infra.usagestats t=2026-02-02T10:14:24.511979657Z level=info msg="Usage stats are ready to report"
Feb 02 10:22:39 compute-0 ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1-grafana-compute-0[104673]: logger=cleanup t=2026-02-02T10:22:39.480840575Z level=info msg="Completed cleanup jobs" duration=12.581824ms
Feb 02 10:22:39 compute-0 ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1-grafana-compute-0[104673]: logger=grafana.update.checker t=2026-02-02T10:22:39.595426813Z level=info msg="Update check succeeded" duration=53.270202ms
Feb 02 10:22:39 compute-0 ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1-grafana-compute-0[104673]: logger=plugins.update.checker t=2026-02-02T10:22:39.598359081Z level=info msg="Update check succeeded" duration=53.326674ms

● ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1@haproxy.nfs.cephfs.compute-0.ooxkuo.service - Ceph haproxy.nfs.cephfs.compute-0.ooxkuo for d241d473-9fcb-5f74-b163-f1ca4454e7f1
     Loaded: loaded (/etc/systemd/system/ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1@.service; enabled; preset: disabled)
     Active: active (running) since Mon 2026-02-02 09:41:22 UTC; 43min ago
   Main PID: 97796 (conmon)
         IO: 0B read, 177.5K written
      Tasks: 11 (limit: 48560)
     Memory: 8.8M (peak: 20.2M)
        CPU: 3.816s
     CGroup: /system.slice/system-ceph\x2dd241d473\x2d9fcb\x2d5f74\x2db163\x2df1ca4454e7f1.slice/ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1@haproxy.nfs.cephfs.compute-0.ooxkuo.service
             ├─libpod-payload-5812768ed72c0881a5b563a239565cde81bb05b6f1e1beebab3f203681cce03e
             │ ├─97798 /run/podman-init -- docker-entrypoint.sh haproxy -f /var/lib/haproxy/haproxy.cfg
             │ ├─97800 haproxy -W -db -f /var/lib/haproxy/haproxy.cfg
             │ └─97802 haproxy -W -db -f /var/lib/haproxy/haproxy.cfg
             └─runtime
               └─97796 /usr/bin/conmon --api-version 1 -c 5812768ed72c0881a5b563a239565cde81bb05b6f1e1beebab3f203681cce03e -u 5812768ed72c0881a5b563a239565cde81bb05b6f1e1beebab3f203681cce03e -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/5812768ed72c0881a5b563a239565cde81bb05b6f1e1beebab3f203681cce03e/userdata -p /run/containers/storage/overlay-containers/5812768ed72c0881a5b563a239565cde81bb05b6f1e1beebab3f203681cce03e/userdata/pidfile -n ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1-haproxy-nfs-cephfs-compute-0-ooxkuo --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/5812768ed72c0881a5b563a239565cde81bb05b6f1e1beebab3f203681cce03e --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/5812768ed72c0881a5b563a239565cde81bb05b6f1e1beebab3f203681cce03e/userdata/oci-log --conmon-pidfile /run/ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1@haproxy.nfs.cephfs.compute-0.ooxkuo.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 5812768ed72c0881a5b563a239565cde81bb05b6f1e1beebab3f203681cce03e

Feb 02 10:05:38 compute-0 ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1-haproxy-nfs-cephfs-compute-0-ooxkuo[97796]: [WARNING] 032/100538 (4) : Server backend/nfs.cephfs.1 is DOWN, reason: Layer4 connection problem, info: "Connection refused", check duration: 0ms. 2 active and 0 backup servers left. 0 sessions active, 0 requeued, 0 remaining in queue.
Feb 02 10:06:00 compute-0 ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1-haproxy-nfs-cephfs-compute-0-ooxkuo[97796]: [WARNING] 032/100600 (4) : Server backend/nfs.cephfs.1 is UP, reason: Layer4 check passed, check duration: 0ms. 3 active and 0 backup servers online. 0 sessions requeued, 0 total in queue.
Feb 02 10:07:42 compute-0 ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1-haproxy-nfs-cephfs-compute-0-ooxkuo[97796]: [WARNING] 032/100742 (4) : Server backend/nfs.cephfs.2 is DOWN, reason: Layer4 connection problem, info: "Connection refused", check duration: 0ms. 2 active and 0 backup servers left. 0 sessions active, 0 requeued, 0 remaining in queue.
Feb 02 10:07:44 compute-0 ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1-haproxy-nfs-cephfs-compute-0-ooxkuo[97796]: [WARNING] 032/100744 (4) : Server backend/nfs.cephfs.1 is DOWN, reason: Layer4 connection problem, info: "Connection refused", check duration: 0ms. 1 active and 0 backup servers left. 0 sessions active, 0 requeued, 0 remaining in queue.
Feb 02 10:08:04 compute-0 ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1-haproxy-nfs-cephfs-compute-0-ooxkuo[97796]: [WARNING] 032/100804 (4) : Server backend/nfs.cephfs.2 is UP, reason: Layer4 check passed, check duration: 0ms. 2 active and 0 backup servers online. 0 sessions requeued, 0 total in queue.
Feb 02 10:08:06 compute-0 ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1-haproxy-nfs-cephfs-compute-0-ooxkuo[97796]: [WARNING] 032/100806 (4) : Server backend/nfs.cephfs.1 is UP, reason: Layer4 check passed, check duration: 0ms. 3 active and 0 backup servers online. 0 sessions requeued, 0 total in queue.
Feb 02 10:09:44 compute-0 ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1-haproxy-nfs-cephfs-compute-0-ooxkuo[97796]: [WARNING] 032/100944 (4) : Server backend/nfs.cephfs.0 is DOWN, reason: Layer4 connection problem, info: "Connection refused", check duration: 0ms. 2 active and 0 backup servers left. 0 sessions active, 0 requeued, 0 remaining in queue.
Feb 02 10:11:31 compute-0 ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1-haproxy-nfs-cephfs-compute-0-ooxkuo[97796]: [WARNING] 032/101131 (4) : Server backend/nfs.cephfs.1 is DOWN, reason: Layer4 connection problem, info: "Connection refused", check duration: 0ms. 1 active and 0 backup servers left. 0 sessions active, 0 requeued, 0 remaining in queue.
Feb 02 10:11:49 compute-0 ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1-haproxy-nfs-cephfs-compute-0-ooxkuo[97796]: [WARNING] 032/101149 (4) : Server backend/nfs.cephfs.2 is DOWN, reason: Layer4 connection problem, info: "Connection refused", check duration: 0ms. 0 active and 0 backup servers left. 0 sessions active, 0 requeued, 0 remaining in queue.
Feb 02 10:11:49 compute-0 ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1-haproxy-nfs-cephfs-compute-0-ooxkuo[97796]: [ALERT] 032/101149 (4) : backend 'backend' has no server available!

● ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1@haproxy.rgw.default.compute-0.avekxu.service - Ceph haproxy.rgw.default.compute-0.avekxu for d241d473-9fcb-5f74-b163-f1ca4454e7f1
     Loaded: loaded (/etc/systemd/system/ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1@.service; enabled; preset: disabled)
     Active: active (running) since Mon 2026-02-02 09:40:22 UTC; 44min ago
   Main PID: 90403 (conmon)
         IO: 4.6M read, 4.7M written
      Tasks: 11 (limit: 48560)
     Memory: 10.6M (peak: 24.1M)
        CPU: 4.337s
     CGroup: /system.slice/system-ceph\x2dd241d473\x2d9fcb\x2d5f74\x2db163\x2df1ca4454e7f1.slice/ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1@haproxy.rgw.default.compute-0.avekxu.service
             ├─libpod-payload-19feecaa7fcd517b2bfb973fc8fcf623ad4b0956f16c53292d24fe12f4780190
             │ ├─90405 /run/podman-init -- docker-entrypoint.sh haproxy -f /var/lib/haproxy/haproxy.cfg
             │ ├─90407 haproxy -W -db -f /var/lib/haproxy/haproxy.cfg
             │ └─90409 haproxy -W -db -f /var/lib/haproxy/haproxy.cfg
             └─runtime
               └─90403 /usr/bin/conmon --api-version 1 -c 19feecaa7fcd517b2bfb973fc8fcf623ad4b0956f16c53292d24fe12f4780190 -u 19feecaa7fcd517b2bfb973fc8fcf623ad4b0956f16c53292d24fe12f4780190 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/19feecaa7fcd517b2bfb973fc8fcf623ad4b0956f16c53292d24fe12f4780190/userdata -p /run/containers/storage/overlay-containers/19feecaa7fcd517b2bfb973fc8fcf623ad4b0956f16c53292d24fe12f4780190/userdata/pidfile -n ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1-haproxy-rgw-default-compute-0-avekxu --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/19feecaa7fcd517b2bfb973fc8fcf623ad4b0956f16c53292d24fe12f4780190 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/19feecaa7fcd517b2bfb973fc8fcf623ad4b0956f16c53292d24fe12f4780190/userdata/oci-log --conmon-pidfile /run/ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1@haproxy.rgw.default.compute-0.avekxu.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 19feecaa7fcd517b2bfb973fc8fcf623ad4b0956f16c53292d24fe12f4780190

Feb 02 09:40:22 compute-0 ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1-haproxy-rgw-default-compute-0-avekxu[90403]: [NOTICE] 032/094022 (2) : New worker #1 (4) forked
Feb 02 09:40:22 compute-0 ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1-haproxy-rgw-default-compute-0-avekxu[90403]: [WARNING] 032/094022 (4) : Server backend/rgw.rgw.compute-0.vltabo is DOWN, reason: Layer4 connection problem, info: "Connection refused", check duration: 0ms. 2 active and 0 backup servers left. 0 sessions active, 0 requeued, 0 remaining in queue.
Feb 02 09:40:22 compute-0 ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1-haproxy-rgw-default-compute-0-avekxu[90403]: [WARNING] 032/094022 (4) : Server backend/rgw.rgw.compute-1.ezjvcf is DOWN, reason: Layer4 connection problem, info: "Connection refused", check duration: 0ms. 1 active and 0 backup servers left. 0 sessions active, 0 requeued, 0 remaining in queue.
Feb 02 09:40:23 compute-0 ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1-haproxy-rgw-default-compute-0-avekxu[90403]: [WARNING] 032/094023 (4) : Server backend/rgw.rgw.compute-2.zjyufj is DOWN, reason: Layer4 connection problem, info: "Connection refused", check duration: 0ms. 0 active and 0 backup servers left. 0 sessions active, 0 requeued, 0 remaining in queue.
Feb 02 09:40:23 compute-0 ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1-haproxy-rgw-default-compute-0-avekxu[90403]: [NOTICE] 032/094023 (4) : haproxy version is 2.3.17-d1c9119
Feb 02 09:40:23 compute-0 ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1-haproxy-rgw-default-compute-0-avekxu[90403]: [NOTICE] 032/094023 (4) : path to executable is /usr/local/sbin/haproxy
Feb 02 09:40:23 compute-0 ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1-haproxy-rgw-default-compute-0-avekxu[90403]: [ALERT] 032/094023 (4) : backend 'backend' has no server available!
Feb 02 09:40:26 compute-0 ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1-haproxy-rgw-default-compute-0-avekxu[90403]: [WARNING] 032/094026 (4) : Server backend/rgw.rgw.compute-1.ezjvcf is UP, reason: Layer7 check passed, code: 200, check duration: 1ms. 1 active and 0 backup servers online. 0 sessions requeued, 0 total in queue.
Feb 02 09:40:27 compute-0 ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1-haproxy-rgw-default-compute-0-avekxu[90403]: [WARNING] 032/094027 (4) : Server backend/rgw.rgw.compute-2.zjyufj is UP, reason: Layer7 check passed, code: 200, check duration: 1ms. 2 active and 0 backup servers online. 0 sessions requeued, 0 total in queue.
Feb 02 09:40:28 compute-0 ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1-haproxy-rgw-default-compute-0-avekxu[90403]: [WARNING] 032/094028 (4) : Server backend/rgw.rgw.compute-0.vltabo is UP, reason: Layer7 check passed, code: 200, check duration: 1ms. 3 active and 0 backup servers online. 0 sessions requeued, 0 total in queue.

● ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1@keepalived.nfs.cephfs.compute-0.pqolko.service - Ceph keepalived.nfs.cephfs.compute-0.pqolko for d241d473-9fcb-5f74-b163-f1ca4454e7f1
     Loaded: loaded (/etc/systemd/system/ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1@.service; enabled; preset: disabled)
     Active: active (running) since Mon 2026-02-02 09:41:33 UTC; 43min ago
   Main PID: 98163 (conmon)
         IO: 14.2M read, 188.5K written
      Tasks: 4 (limit: 48560)
     Memory: 17.2M (peak: 20.3M)
        CPU: 11.683s
     CGroup: /system.slice/system-ceph\x2dd241d473\x2d9fcb\x2d5f74\x2db163\x2df1ca4454e7f1.slice/ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1@keepalived.nfs.cephfs.compute-0.pqolko.service
             ├─libpod-payload-5f2bbf7994e4a92479e9d1b19dfd01c3876abee624a9d2019b778a31c38bb173
             │ ├─98165 /run/podman-init -- ./init.sh
             │ ├─98167 /usr/sbin/keepalived -n -l -f /etc/keepalived/keepalived.conf
             │ └─98169 /usr/sbin/keepalived -n -l -f /etc/keepalived/keepalived.conf
             └─runtime
               └─98163 /usr/bin/conmon --api-version 1 -c 5f2bbf7994e4a92479e9d1b19dfd01c3876abee624a9d2019b778a31c38bb173 -u 5f2bbf7994e4a92479e9d1b19dfd01c3876abee624a9d2019b778a31c38bb173 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/5f2bbf7994e4a92479e9d1b19dfd01c3876abee624a9d2019b778a31c38bb173/userdata -p /run/containers/storage/overlay-containers/5f2bbf7994e4a92479e9d1b19dfd01c3876abee624a9d2019b778a31c38bb173/userdata/pidfile -n ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1-keepalived-nfs-cephfs-compute-0-pqolko --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/5f2bbf7994e4a92479e9d1b19dfd01c3876abee624a9d2019b778a31c38bb173 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/5f2bbf7994e4a92479e9d1b19dfd01c3876abee624a9d2019b778a31c38bb173/userdata/oci-log --conmon-pidfile /run/ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1@keepalived.nfs.cephfs.compute-0.pqolko.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 5f2bbf7994e4a92479e9d1b19dfd01c3876abee624a9d2019b778a31c38bb173

Feb 02 09:41:33 compute-0 ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1-keepalived-nfs-cephfs-compute-0-pqolko[98163]: Mon Feb  2 09:41:33 2026: NOTICE: setting config option max_auto_priority should result in better keepalived performance
Feb 02 09:41:33 compute-0 ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1-keepalived-nfs-cephfs-compute-0-pqolko[98163]: Mon Feb  2 09:41:33 2026: Starting VRRP child process, pid=4
Feb 02 09:41:33 compute-0 ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1-keepalived-nfs-cephfs-compute-0-pqolko[98163]: Mon Feb  2 09:41:33 2026: Startup complete
Feb 02 09:41:33 compute-0 ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1-keepalived-nfs-cephfs-compute-0-pqolko[98163]: Mon Feb  2 09:41:33 2026: (VI_0) Entering BACKUP STATE (init)
Feb 02 09:41:33 compute-0 ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1-keepalived-nfs-cephfs-compute-0-pqolko[98163]: Mon Feb  2 09:41:33 2026: VRRP_Script(check_backend) succeeded
Feb 02 09:41:37 compute-0 ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1-keepalived-nfs-cephfs-compute-0-pqolko[98163]: Mon Feb  2 09:41:37 2026: (VI_0) Entering MASTER STATE
Feb 02 09:41:42 compute-0 ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1-keepalived-nfs-cephfs-compute-0-pqolko[98163]: Mon Feb  2 09:41:42 2026: (VI_0) Received advert from 192.168.122.101 with lower priority 90, ours 100, forcing new election
Feb 02 09:42:03 compute-0 ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1-keepalived-nfs-cephfs-compute-0-pqolko[98163]: Mon Feb  2 09:42:03 2026: (VI_0) Entering BACKUP STATE
Feb 02 09:42:04 compute-0 ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1-keepalived-nfs-cephfs-compute-0-pqolko[98163]: Mon Feb  2 09:42:04 2026: (VI_0) Entering MASTER STATE
Feb 02 09:42:05 compute-0 ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1-keepalived-nfs-cephfs-compute-0-pqolko[98163]: Mon Feb  2 09:42:05 2026: (VI_0) received an invalid passwd!

● ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1@keepalived.rgw.default.compute-0.pxmjnp.service - Ceph keepalived.rgw.default.compute-0.pxmjnp for d241d473-9fcb-5f74-b163-f1ca4454e7f1
     Loaded: loaded (/etc/systemd/system/ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1@.service; enabled; preset: disabled)
     Active: active (running) since Mon 2026-02-02 09:42:03 UTC; 43min ago
   Main PID: 99763 (conmon)
         IO: 0B read, 143.5K written
      Tasks: 4 (limit: 48560)
     Memory: 2.9M (peak: 23.9M)
        CPU: 10.865s
     CGroup: /system.slice/system-ceph\x2dd241d473\x2d9fcb\x2d5f74\x2db163\x2df1ca4454e7f1.slice/ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1@keepalived.rgw.default.compute-0.pxmjnp.service
             ├─libpod-payload-860a76d66e8eda9eaa418ca2983e711afeb6dee68d75c8d8ff31ac03764f810e
             │ ├─99765 /run/podman-init -- ./init.sh
             │ ├─99767 /usr/sbin/keepalived -n -l -f /etc/keepalived/keepalived.conf
             │ └─99769 /usr/sbin/keepalived -n -l -f /etc/keepalived/keepalived.conf
             └─runtime
               └─99763 /usr/bin/conmon --api-version 1 -c 860a76d66e8eda9eaa418ca2983e711afeb6dee68d75c8d8ff31ac03764f810e -u 860a76d66e8eda9eaa418ca2983e711afeb6dee68d75c8d8ff31ac03764f810e -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/860a76d66e8eda9eaa418ca2983e711afeb6dee68d75c8d8ff31ac03764f810e/userdata -p /run/containers/storage/overlay-containers/860a76d66e8eda9eaa418ca2983e711afeb6dee68d75c8d8ff31ac03764f810e/userdata/pidfile -n ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1-keepalived-rgw-default-compute-0-pxmjnp --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/860a76d66e8eda9eaa418ca2983e711afeb6dee68d75c8d8ff31ac03764f810e --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/860a76d66e8eda9eaa418ca2983e711afeb6dee68d75c8d8ff31ac03764f810e/userdata/oci-log --conmon-pidfile /run/ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1@keepalived.rgw.default.compute-0.pxmjnp.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 860a76d66e8eda9eaa418ca2983e711afeb6dee68d75c8d8ff31ac03764f810e

Feb 02 09:42:03 compute-0 ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1-keepalived-rgw-default-compute-0-pxmjnp[99763]: Mon Feb  2 09:42:03 2026: Configuration file /etc/keepalived/keepalived.conf
Feb 02 09:42:03 compute-0 systemd[1]: Started Ceph keepalived.rgw.default.compute-0.pxmjnp for d241d473-9fcb-5f74-b163-f1ca4454e7f1.
Feb 02 09:42:03 compute-0 ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1-keepalived-rgw-default-compute-0-pxmjnp[99763]: Mon Feb  2 09:42:03 2026: Failed to bind to process monitoring socket - errno 98 - Address already in use
Feb 02 09:42:03 compute-0 ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1-keepalived-rgw-default-compute-0-pxmjnp[99763]: Mon Feb  2 09:42:03 2026: NOTICE: setting config option max_auto_priority should result in better keepalived performance
Feb 02 09:42:03 compute-0 ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1-keepalived-rgw-default-compute-0-pxmjnp[99763]: Mon Feb  2 09:42:03 2026: Starting VRRP child process, pid=4
Feb 02 09:42:03 compute-0 ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1-keepalived-rgw-default-compute-0-pxmjnp[99763]: Mon Feb  2 09:42:03 2026: Startup complete
Feb 02 09:42:03 compute-0 ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1-keepalived-rgw-default-compute-0-pxmjnp[99763]: Mon Feb  2 09:42:03 2026: (VI_0) Entering BACKUP STATE (init)
Feb 02 09:42:03 compute-0 ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1-keepalived-rgw-default-compute-0-pxmjnp[99763]: Mon Feb  2 09:42:03 2026: VRRP_Script(check_backend) succeeded
Feb 02 09:42:05 compute-0 ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1-keepalived-rgw-default-compute-0-pxmjnp[99763]: Mon Feb  2 09:42:05 2026: (VI_0) received lower priority (90) advert from 192.168.122.102 - discarding
Feb 02 09:42:07 compute-0 ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1-keepalived-rgw-default-compute-0-pxmjnp[99763]: Mon Feb  2 09:42:07 2026: (VI_0) Entering MASTER STATE

● ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1@mds.cephfs.compute-0.clmmzw.service - Ceph mds.cephfs.compute-0.clmmzw for d241d473-9fcb-5f74-b163-f1ca4454e7f1
     Loaded: loaded (/etc/systemd/system/ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1@.service; enabled; preset: disabled)
     Active: active (running) since Mon 2026-02-02 09:41:06 UTC; 44min ago
   Main PID: 96757 (conmon)
         IO: 0B read, 177.5K written
      Tasks: 18 (limit: 48560)
     Memory: 29.4M (peak: 29.9M)
        CPU: 1.591s
     CGroup: /system.slice/system-ceph\x2dd241d473\x2d9fcb\x2d5f74\x2db163\x2df1ca4454e7f1.slice/ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1@mds.cephfs.compute-0.clmmzw.service
             ├─libpod-payload-7adc20511c464cc8d1a3fc124e35d564818f3952b55827abfa6fd9e805055a10
             │ ├─96759 /run/podman-init -- /usr/bin/ceph-mds -n mds.cephfs.compute-0.clmmzw -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ └─96761 /usr/bin/ceph-mds -n mds.cephfs.compute-0.clmmzw -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             └─runtime
               └─96757 /usr/bin/conmon --api-version 1 -c 7adc20511c464cc8d1a3fc124e35d564818f3952b55827abfa6fd9e805055a10 -u 7adc20511c464cc8d1a3fc124e35d564818f3952b55827abfa6fd9e805055a10 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/7adc20511c464cc8d1a3fc124e35d564818f3952b55827abfa6fd9e805055a10/userdata -p /run/containers/storage/overlay-containers/7adc20511c464cc8d1a3fc124e35d564818f3952b55827abfa6fd9e805055a10/userdata/pidfile -n ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1-mds-cephfs-compute-0-clmmzw --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/7adc20511c464cc8d1a3fc124e35d564818f3952b55827abfa6fd9e805055a10 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/7adc20511c464cc8d1a3fc124e35d564818f3952b55827abfa6fd9e805055a10/userdata/oci-log --conmon-pidfile /run/ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1@mds.cephfs.compute-0.clmmzw.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 7adc20511c464cc8d1a3fc124e35d564818f3952b55827abfa6fd9e805055a10

Feb 02 10:24:37 compute-0 ceph-mds[96761]: mds.cephfs.compute-0.clmmzw Can't run that command on an inactive MDS!
Feb 02 10:24:37 compute-0 ceph-mds[96761]: mds.cephfs.compute-0.clmmzw asok_command: dump_ops_in_flight {prefix=dump_ops_in_flight} (starting...)
Feb 02 10:24:37 compute-0 ceph-mds[96761]: mds.cephfs.compute-0.clmmzw Can't run that command on an inactive MDS!
Feb 02 10:24:37 compute-0 ceph-mds[96761]: mds.cephfs.compute-0.clmmzw asok_command: get subtrees {prefix=get subtrees} (starting...)
Feb 02 10:24:37 compute-0 ceph-mds[96761]: mds.cephfs.compute-0.clmmzw Can't run that command on an inactive MDS!
Feb 02 10:24:37 compute-0 ceph-mds[96761]: mds.cephfs.compute-0.clmmzw asok_command: ops {prefix=ops} (starting...)
Feb 02 10:24:37 compute-0 ceph-mds[96761]: mds.cephfs.compute-0.clmmzw Can't run that command on an inactive MDS!
Feb 02 10:24:38 compute-0 ceph-mds[96761]: mds.cephfs.compute-0.clmmzw asok_command: session ls {prefix=session ls} (starting...)
Feb 02 10:24:38 compute-0 ceph-mds[96761]: mds.cephfs.compute-0.clmmzw Can't run that command on an inactive MDS!
Feb 02 10:24:38 compute-0 ceph-mds[96761]: mds.cephfs.compute-0.clmmzw asok_command: status {prefix=status} (starting...)

● ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1@mgr.compute-0.djvyfo.service - Ceph mgr.compute-0.djvyfo for d241d473-9fcb-5f74-b163-f1ca4454e7f1
     Loaded: loaded (/etc/systemd/system/ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1@.service; enabled; preset: disabled)
     Active: active (running) since Mon 2026-02-02 09:37:47 UTC; 47min ago
   Main PID: 74781 (conmon)
         IO: 0B read, 2.9M written
      Tasks: 176 (limit: 48560)
     Memory: 567.1M (peak: 568.1M)
        CPU: 1min 58.688s
     CGroup: /system.slice/system-ceph\x2dd241d473\x2d9fcb\x2d5f74\x2db163\x2df1ca4454e7f1.slice/ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1@mgr.compute-0.djvyfo.service
             ├─libpod-payload-3dfd19b9ab30bf136f4a18ad3b4a13ee303004a583ad880116709be18eec72dc
             │ ├─74783 /run/podman-init -- /usr/bin/ceph-mgr -n mgr.compute-0.djvyfo -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ └─74785 /usr/bin/ceph-mgr -n mgr.compute-0.djvyfo -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             └─runtime
               └─74781 /usr/bin/conmon --api-version 1 -c 3dfd19b9ab30bf136f4a18ad3b4a13ee303004a583ad880116709be18eec72dc -u 3dfd19b9ab30bf136f4a18ad3b4a13ee303004a583ad880116709be18eec72dc -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/3dfd19b9ab30bf136f4a18ad3b4a13ee303004a583ad880116709be18eec72dc/userdata -p /run/containers/storage/overlay-containers/3dfd19b9ab30bf136f4a18ad3b4a13ee303004a583ad880116709be18eec72dc/userdata/pidfile -n ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1-mgr-compute-0-djvyfo --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/3dfd19b9ab30bf136f4a18ad3b4a13ee303004a583ad880116709be18eec72dc --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/3dfd19b9ab30bf136f4a18ad3b4a13ee303004a583ad880116709be18eec72dc/userdata/oci-log --conmon-pidfile /run/ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1@mgr.compute-0.djvyfo.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 3dfd19b9ab30bf136f4a18ad3b4a13ee303004a583ad880116709be18eec72dc

Feb 02 10:25:06 compute-0 ceph-mgr[74785]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Feb 02 10:25:06 compute-0 ceph-mgr[74785]: [pg_autoscaler INFO root] Pool 'default.rgw.control' root_id -1 using 0.0 of space, bias 1.0, pg target 0.0 quantized to 32 (current 32)
Feb 02 10:25:06 compute-0 ceph-mgr[74785]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Feb 02 10:25:06 compute-0 ceph-mgr[74785]: [pg_autoscaler INFO root] Pool 'default.rgw.meta' root_id -1 using 1.2718141564107572e-07 of space, bias 4.0, pg target 0.00015261769876929088 quantized to 32 (current 32)
Feb 02 10:25:06 compute-0 ceph-mgr[74785]: [pg_autoscaler INFO root] effective_target_ratio 0.0 0.0 0 64411926528
Feb 02 10:25:06 compute-0 ceph-mgr[74785]: [pg_autoscaler INFO root] Pool '.nfs' root_id -1 using 6.359070782053786e-08 of space, bias 1.0, pg target 1.907721234616136e-05 quantized to 32 (current 32)
Feb 02 10:25:06 compute-0 ceph-mgr[74785]: log_channel(cluster) log [DBG] : pgmap v1376: 353 pgs: 353 active+clean; 41 MiB data, 303 MiB used, 60 GiB / 60 GiB avail; 511 B/s rd, 0 op/s
Feb 02 10:25:06 compute-0 ceph-mgr[74785]: log_channel(audit) log [DBG] : from='client.29663 -' entity='client.admin' cmd=[{"prefix": "osd df", "output_method": "tree", "target": ["mon-mgr", ""], "format": "json-pretty"}]: dispatch
Feb 02 10:25:07 compute-0 ceph-mgr[74785]: log_channel(audit) log [DBG] : from='client.29626 -' entity='client.admin' cmd=[{"prefix": "osd blocked-by", "target": ["mon-mgr", ""], "format": "json-pretty"}]: dispatch
Feb 02 10:25:07 compute-0 ceph-mgr[74785]: log_channel(audit) log [DBG] : from='client.29681 -' entity='client.admin' cmd=[{"prefix": "osd df", "target": ["mon-mgr", ""], "format": "json-pretty"}]: dispatch

● ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1@mon.compute-0.service - Ceph mon.compute-0 for d241d473-9fcb-5f74-b163-f1ca4454e7f1
     Loaded: loaded (/etc/systemd/system/ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1@.service; enabled; preset: disabled)
     Active: active (running) since Mon 2026-02-02 09:37:45 UTC; 47min ago
   Main PID: 74485 (conmon)
         IO: 808.0K read, 497.9M written
      Tasks: 27 (limit: 48560)
     Memory: 104.8M (peak: 121.5M)
        CPU: 1min 1.363s
     CGroup: /system.slice/system-ceph\x2dd241d473\x2d9fcb\x2d5f74\x2db163\x2df1ca4454e7f1.slice/ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1@mon.compute-0.service
             ├─libpod-payload-79ef7165b184aa21ab9e464efe33891b6304e1ba848414549f299d1b301d6783
             │ ├─74487 /run/podman-init -- /usr/bin/ceph-mon -n mon.compute-0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false
             │ └─74489 /usr/bin/ceph-mon -n mon.compute-0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false
             └─runtime
               └─74485 /usr/bin/conmon --api-version 1 -c 79ef7165b184aa21ab9e464efe33891b6304e1ba848414549f299d1b301d6783 -u 79ef7165b184aa21ab9e464efe33891b6304e1ba848414549f299d1b301d6783 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/79ef7165b184aa21ab9e464efe33891b6304e1ba848414549f299d1b301d6783/userdata -p /run/containers/storage/overlay-containers/79ef7165b184aa21ab9e464efe33891b6304e1ba848414549f299d1b301d6783/userdata/pidfile -n ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1-mon-compute-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/79ef7165b184aa21ab9e464efe33891b6304e1ba848414549f299d1b301d6783 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/79ef7165b184aa21ab9e464efe33891b6304e1ba848414549f299d1b301d6783/userdata/oci-log --conmon-pidfile /run/ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1@mon.compute-0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 79ef7165b184aa21ab9e464efe33891b6304e1ba848414549f299d1b301d6783

Feb 02 10:25:06 compute-0 ceph-mon[74489]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/4140346219' entity='client.admin' cmd=[{"prefix": "mds stat", "format": "json-pretty"}]: dispatch
Feb 02 10:25:06 compute-0 ceph-mon[74489]: from='client.29575 -' entity='client.admin' cmd=[{"prefix": "osd df", "target": ["mon-mgr", ""], "format": "json-pretty"}]: dispatch
Feb 02 10:25:06 compute-0 ceph-mon[74489]: from='client.? 192.168.122.102:0/1205761716' entity='client.admin' cmd=[{"prefix": "osd dump", "format": "json-pretty"}]: dispatch
Feb 02 10:25:06 compute-0 ceph-mon[74489]: from='client.19830 -' entity='client.admin' cmd=[{"prefix": "fs status", "target": ["mon-mgr", ""], "format": "json-pretty"}]: dispatch
Feb 02 10:25:06 compute-0 ceph-mon[74489]: from='client.29633 -' entity='client.admin' cmd=[{"prefix": "osd blocked-by", "target": ["mon-mgr", ""], "format": "json-pretty"}]: dispatch
Feb 02 10:25:06 compute-0 ceph-mon[74489]: from='client.? 192.168.122.102:0/3400586964' entity='client.admin' cmd=[{"prefix": "osd numa-status", "format": "json-pretty"}]: dispatch
Feb 02 10:25:06 compute-0 ceph-mon[74489]: from='client.? 192.168.122.100:0/4140346219' entity='client.admin' cmd=[{"prefix": "mds stat", "format": "json-pretty"}]: dispatch
Feb 02 10:25:06 compute-0 ceph-mon[74489]: from='client.? 192.168.122.101:0/272857927' entity='client.admin' cmd=[{"prefix": "osd blocklist ls", "format": "json-pretty"}]: dispatch
Feb 02 10:25:06 compute-0 ceph-mon[74489]: mon.compute-0@0(leader) e3 handle_command mon_command({"prefix": "mon dump", "format": "json-pretty"} v 0)
Feb 02 10:25:06 compute-0 ceph-mon[74489]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/779687817' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json-pretty"}]: dispatch

● ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1@nfs.cephfs.2.0.compute-0.fdwwab.service - Ceph nfs.cephfs.2.0.compute-0.fdwwab for d241d473-9fcb-5f74-b163-f1ca4454e7f1
     Loaded: loaded (/etc/systemd/system/ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1@.service; enabled; preset: disabled)
     Active: active (running) since Mon 2026-02-02 10:11:55 UTC; 13min ago
    Process: 270821 ExecStartPre=/bin/rm -f /run/ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1@nfs.cephfs.2.0.compute-0.fdwwab.service-pid /run/ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1@nfs.cephfs.2.0.compute-0.fdwwab.service-cid (code=exited, status=0/SUCCESS)
    Process: 270822 ExecStart=/bin/bash /var/lib/ceph/d241d473-9fcb-5f74-b163-f1ca4454e7f1/nfs.cephfs.2.0.compute-0.fdwwab/unit.run (code=exited, status=0/SUCCESS)
   Main PID: 270907 (conmon)
         IO: 0B read, 191.5K written
      Tasks: 34 (limit: 48560)
     Memory: 16.8M (peak: 21.1M)
        CPU: 1.188s
     CGroup: /system.slice/system-ceph\x2dd241d473\x2d9fcb\x2d5f74\x2db163\x2df1ca4454e7f1.slice/ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1@nfs.cephfs.2.0.compute-0.fdwwab.service
             ├─libpod-payload-47ffd521b52a4e817e05a876d9da3b01cdfdfeae11aa3098649e39241d4ffff9
             │ ├─270910 /run/podman-init -- /usr/bin/ganesha.nfsd -F -L STDERR -N NIV_EVENT
             │ └─270912 /usr/bin/ganesha.nfsd -F -L STDERR -N NIV_EVENT
             └─runtime
               └─270907 /usr/bin/conmon --api-version 1 -c 47ffd521b52a4e817e05a876d9da3b01cdfdfeae11aa3098649e39241d4ffff9 -u 47ffd521b52a4e817e05a876d9da3b01cdfdfeae11aa3098649e39241d4ffff9 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/47ffd521b52a4e817e05a876d9da3b01cdfdfeae11aa3098649e39241d4ffff9/userdata -p /run/containers/storage/overlay-containers/47ffd521b52a4e817e05a876d9da3b01cdfdfeae11aa3098649e39241d4ffff9/userdata/pidfile -n ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1-nfs-cephfs-2-0-compute-0-fdwwab --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/47ffd521b52a4e817e05a876d9da3b01cdfdfeae11aa3098649e39241d4ffff9 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/47ffd521b52a4e817e05a876d9da3b01cdfdfeae11aa3098649e39241d4ffff9/userdata/oci-log --conmon-pidfile /run/ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1@nfs.cephfs.2.0.compute-0.fdwwab.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 47ffd521b52a4e817e05a876d9da3b01cdfdfeae11aa3098649e39241d4ffff9

Feb 02 10:24:56 compute-0 ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1-nfs-cephfs-2-0-compute-0-fdwwab[270907]: 02/02/2026 10:24:55 : epoch 6980786b : compute-0 : ganesha.nfsd-2[main] nfs_try_lift_grace :STATE :EVENT :check grace:reclaim complete(0) clid count(0)
Feb 02 10:24:56 compute-0 ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1-nfs-cephfs-2-0-compute-0-fdwwab[270907]: 02/02/2026 10:24:56 : epoch 6980786b : compute-0 : ganesha.nfsd-2[main] rados_cluster_grace_enforcing :CLIENT ID :EVENT :rados_cluster_grace_enforcing: ret=-45
Feb 02 10:25:01 compute-0 ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1-nfs-cephfs-2-0-compute-0-fdwwab[270907]: 02/02/2026 10:25:00 : epoch 6980786b : compute-0 : ganesha.nfsd-2[main] nfs_start_grace :STATE :EVENT :NFS Server Now IN GRACE, duration 90
Feb 02 10:25:01 compute-0 ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1-nfs-cephfs-2-0-compute-0-fdwwab[270907]: 02/02/2026 10:25:00 : epoch 6980786b : compute-0 : ganesha.nfsd-2[main] nfs_start_grace :STATE :EVENT :grace reload client info completed from backend
Feb 02 10:25:01 compute-0 ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1-nfs-cephfs-2-0-compute-0-fdwwab[270907]: 02/02/2026 10:25:00 : epoch 6980786b : compute-0 : ganesha.nfsd-2[main] nfs_try_lift_grace :STATE :EVENT :check grace:reclaim complete(0) clid count(0)
Feb 02 10:25:01 compute-0 ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1-nfs-cephfs-2-0-compute-0-fdwwab[270907]: 02/02/2026 10:25:01 : epoch 6980786b : compute-0 : ganesha.nfsd-2[main] rados_cluster_grace_enforcing :CLIENT ID :EVENT :rados_cluster_grace_enforcing: ret=-45
Feb 02 10:25:06 compute-0 ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1-nfs-cephfs-2-0-compute-0-fdwwab[270907]: 02/02/2026 10:25:06 : epoch 6980786b : compute-0 : ganesha.nfsd-2[main] nfs_start_grace :STATE :EVENT :NFS Server Now IN GRACE, duration 90
Feb 02 10:25:06 compute-0 ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1-nfs-cephfs-2-0-compute-0-fdwwab[270907]: 02/02/2026 10:25:06 : epoch 6980786b : compute-0 : ganesha.nfsd-2[main] nfs_start_grace :STATE :EVENT :grace reload client info completed from backend
Feb 02 10:25:06 compute-0 ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1-nfs-cephfs-2-0-compute-0-fdwwab[270907]: 02/02/2026 10:25:06 : epoch 6980786b : compute-0 : ganesha.nfsd-2[main] nfs_try_lift_grace :STATE :EVENT :check grace:reclaim complete(0) clid count(0)
Feb 02 10:25:06 compute-0 ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1-nfs-cephfs-2-0-compute-0-fdwwab[270907]: 02/02/2026 10:25:06 : epoch 6980786b : compute-0 : ganesha.nfsd-2[main] rados_cluster_grace_enforcing :CLIENT ID :EVENT :rados_cluster_grace_enforcing: ret=-45

● ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1@node-exporter.compute-0.service - Ceph node-exporter.compute-0 for d241d473-9fcb-5f74-b163-f1ca4454e7f1
     Loaded: loaded (/etc/systemd/system/ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1@.service; enabled; preset: disabled)
     Active: active (running) since Mon 2026-02-02 09:42:35 UTC; 42min ago
   Main PID: 104072 (conmon)
         IO: 2.1M read, 184.0K written
      Tasks: 7 (limit: 48560)
     Memory: 16.9M (peak: 24.6M)
        CPU: 8.300s
     CGroup: /system.slice/system-ceph\x2dd241d473\x2d9fcb\x2d5f74\x2db163\x2df1ca4454e7f1.slice/ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1@node-exporter.compute-0.service
             ├─libpod-payload-690ade5beb0aa03a99cf3b4b1da5291c57988034f4a9506f786da5a6fb824998
             │ ├─104074 /run/podman-init -- /bin/node_exporter --no-collector.timex --web.listen-address=:9100 --path.procfs=/host/proc --path.sysfs=/host/sys --path.rootfs=/rootfs
             │ └─104076 /bin/node_exporter --no-collector.timex --web.listen-address=:9100 --path.procfs=/host/proc --path.sysfs=/host/sys --path.rootfs=/rootfs
             └─runtime
               └─104072 /usr/bin/conmon --api-version 1 -c 690ade5beb0aa03a99cf3b4b1da5291c57988034f4a9506f786da5a6fb824998 -u 690ade5beb0aa03a99cf3b4b1da5291c57988034f4a9506f786da5a6fb824998 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/690ade5beb0aa03a99cf3b4b1da5291c57988034f4a9506f786da5a6fb824998/userdata -p /run/containers/storage/overlay-containers/690ade5beb0aa03a99cf3b4b1da5291c57988034f4a9506f786da5a6fb824998/userdata/pidfile -n ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1-node-exporter-compute-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/690ade5beb0aa03a99cf3b4b1da5291c57988034f4a9506f786da5a6fb824998 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/690ade5beb0aa03a99cf3b4b1da5291c57988034f4a9506f786da5a6fb824998/userdata/oci-log --conmon-pidfile /run/ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1@node-exporter.compute-0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 690ade5beb0aa03a99cf3b4b1da5291c57988034f4a9506f786da5a6fb824998

Feb 02 09:42:35 compute-0 ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1-node-exporter-compute-0[104072]: ts=2026-02-02T09:42:35.163Z caller=node_exporter.go:117 level=info collector=udp_queues
Feb 02 09:42:35 compute-0 ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1-node-exporter-compute-0[104072]: ts=2026-02-02T09:42:35.163Z caller=node_exporter.go:117 level=info collector=uname
Feb 02 09:42:35 compute-0 ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1-node-exporter-compute-0[104072]: ts=2026-02-02T09:42:35.163Z caller=node_exporter.go:117 level=info collector=vmstat
Feb 02 09:42:35 compute-0 ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1-node-exporter-compute-0[104072]: ts=2026-02-02T09:42:35.163Z caller=node_exporter.go:117 level=info collector=xfs
Feb 02 09:42:35 compute-0 ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1-node-exporter-compute-0[104072]: ts=2026-02-02T09:42:35.163Z caller=node_exporter.go:117 level=info collector=zfs
Feb 02 09:42:35 compute-0 ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1-node-exporter-compute-0[104072]: ts=2026-02-02T09:42:35.164Z caller=tls_config.go:274 level=info msg="Listening on" address=[::]:9100
Feb 02 09:42:35 compute-0 ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1-node-exporter-compute-0[104072]: ts=2026-02-02T09:42:35.164Z caller=tls_config.go:277 level=info msg="TLS is disabled." http2=false address=[::]:9100
Feb 02 09:42:35 compute-0 bash[104056]: 690ade5beb0aa03a99cf3b4b1da5291c57988034f4a9506f786da5a6fb824998
Feb 02 09:42:35 compute-0 podman[104056]: 2026-02-02 09:42:35.08058303 +0000 UTC m=+0.015961937 image pull 72c9c208898624938c9e4183d6686ea4a5fd3f912bc29bc3f00147924c521a3e quay.io/prometheus/node-exporter:v1.7.0
Feb 02 09:42:35 compute-0 systemd[1]: Started Ceph node-exporter.compute-0 for d241d473-9fcb-5f74-b163-f1ca4454e7f1.

● ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1@osd.1.service - Ceph osd.1 for d241d473-9fcb-5f74-b163-f1ca4454e7f1
     Loaded: loaded (/etc/systemd/system/ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1@.service; enabled; preset: disabled)
     Active: active (running) since Mon 2026-02-02 09:39:07 UTC; 45min ago
   Main PID: 82701 (conmon)
         IO: 151.2M read, 1.7G written
      Tasks: 60 (limit: 48560)
     Memory: 524.7M (peak: 568.2M)
        CPU: 28.804s
     CGroup: /system.slice/system-ceph\x2dd241d473\x2d9fcb\x2d5f74\x2db163\x2df1ca4454e7f1.slice/ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1@osd.1.service
             ├─libpod-payload-4642ed65ea9037166532825913a80a3f5fa996c66d25a8d6ec32643bd7f52763
             │ ├─82703 /run/podman-init -- /usr/bin/ceph-osd -n osd.1 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ └─82705 /usr/bin/ceph-osd -n osd.1 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             └─runtime
               └─82701 /usr/bin/conmon --api-version 1 -c 4642ed65ea9037166532825913a80a3f5fa996c66d25a8d6ec32643bd7f52763 -u 4642ed65ea9037166532825913a80a3f5fa996c66d25a8d6ec32643bd7f52763 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/4642ed65ea9037166532825913a80a3f5fa996c66d25a8d6ec32643bd7f52763/userdata -p /run/containers/storage/overlay-containers/4642ed65ea9037166532825913a80a3f5fa996c66d25a8d6ec32643bd7f52763/userdata/pidfile -n ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1-osd-1 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/4642ed65ea9037166532825913a80a3f5fa996c66d25a8d6ec32643bd7f52763 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/4642ed65ea9037166532825913a80a3f5fa996c66d25a8d6ec32643bd7f52763/userdata/oci-log --conmon-pidfile /run/ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1@osd.1.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 4642ed65ea9037166532825913a80a3f5fa996c66d25a8d6ec32643bd7f52763

Feb 02 10:24:42 compute-0 ceph-osd[82705]: do_command 'counter schema' '{prefix=counter schema}' result is 0 bytes
Feb 02 10:24:42 compute-0 ceph-osd[82705]: prioritycache tune_memory target: 4294967296 mapped: 108494848 unmapped: 30081024 heap: 138575872 old mem: 2845415832 new mem: 2845415832
Feb 02 10:24:42 compute-0 ceph-osd[82705]: monclient: tick
Feb 02 10:24:42 compute-0 ceph-osd[82705]: monclient: _check_auth_tickets
Feb 02 10:24:42 compute-0 ceph-osd[82705]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2026-02-02T10:24:10.567543+0000)
Feb 02 10:24:42 compute-0 ceph-osd[82705]: prioritycache tune_memory target: 4294967296 mapped: 108519424 unmapped: 30056448 heap: 138575872 old mem: 2845415832 new mem: 2845415832
Feb 02 10:24:42 compute-0 ceph-osd[82705]: monclient: tick
Feb 02 10:24:42 compute-0 ceph-osd[82705]: monclient: _check_auth_tickets
Feb 02 10:24:42 compute-0 ceph-osd[82705]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2026-02-02T10:24:11.567744+0000)
Feb 02 10:24:42 compute-0 ceph-osd[82705]: do_command 'log dump' '{prefix=log dump}'

● ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1@prometheus.compute-0.service - Ceph prometheus.compute-0 for d241d473-9fcb-5f74-b163-f1ca4454e7f1
     Loaded: loaded (/etc/systemd/system/ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1@.service; enabled; preset: disabled)
     Active: active (running) since Mon 2026-02-02 09:42:09 UTC; 42min ago
   Main PID: 100305 (conmon)
         IO: 10.7M read, 12.1M written
      Tasks: 16 (limit: 48560)
     Memory: 74.1M (peak: 87.2M)
        CPU: 13.600s
     CGroup: /system.slice/system-ceph\x2dd241d473\x2d9fcb\x2d5f74\x2db163\x2df1ca4454e7f1.slice/ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1@prometheus.compute-0.service
             ├─libpod-payload-214561532da1ee185d9ab0bf03f5b2d46320266c11cddad43265e8842ed9d667
             │ ├─100307 /run/podman-init -- /bin/prometheus --config.file=/etc/prometheus/prometheus.yml --storage.tsdb.path=/prometheus --storage.tsdb.retention.time=15d --storage.tsdb.retention.size=0 --web.external-url=http://compute-0.ctlplane.example.com:9095 --web.listen-address=192.168.122.100:9095
             │ └─100309 /bin/prometheus --config.file=/etc/prometheus/prometheus.yml --storage.tsdb.path=/prometheus --storage.tsdb.retention.time=15d --storage.tsdb.retention.size=0 --web.external-url=http://compute-0.ctlplane.example.com:9095 --web.listen-address=192.168.122.100:9095
             └─runtime
               └─100305 /usr/bin/conmon --api-version 1 -c 214561532da1ee185d9ab0bf03f5b2d46320266c11cddad43265e8842ed9d667 -u 214561532da1ee185d9ab0bf03f5b2d46320266c11cddad43265e8842ed9d667 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/214561532da1ee185d9ab0bf03f5b2d46320266c11cddad43265e8842ed9d667/userdata -p /run/containers/storage/overlay-containers/214561532da1ee185d9ab0bf03f5b2d46320266c11cddad43265e8842ed9d667/userdata/pidfile -n ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1-prometheus-compute-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/214561532da1ee185d9ab0bf03f5b2d46320266c11cddad43265e8842ed9d667 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/214561532da1ee185d9ab0bf03f5b2d46320266c11cddad43265e8842ed9d667/userdata/oci-log --conmon-pidfile /run/ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1@prometheus.compute-0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 214561532da1ee185d9ab0bf03f5b2d46320266c11cddad43265e8842ed9d667

Feb 02 09:42:09 compute-0 ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1-prometheus-compute-0[100305]: ts=2026-02-02T09:42:09.172Z caller=head.go:698 level=info component=tsdb msg="On-disk memory mappable chunks replay completed" duration=3.441µs
Feb 02 09:42:09 compute-0 ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1-prometheus-compute-0[100305]: ts=2026-02-02T09:42:09.172Z caller=head.go:706 level=info component=tsdb msg="Replaying WAL, this may take a while"
Feb 02 09:42:09 compute-0 ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1-prometheus-compute-0[100305]: ts=2026-02-02T09:42:09.173Z caller=head.go:778 level=info component=tsdb msg="WAL segment loaded" segment=0 maxSegment=0
Feb 02 09:42:09 compute-0 ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1-prometheus-compute-0[100305]: ts=2026-02-02T09:42:09.173Z caller=head.go:815 level=info component=tsdb msg="WAL replay completed" checkpoint_replay_duration=42.971µs wal_replay_duration=476.032µs wbl_replay_duration=210ns total_replay_duration=560.085µs
Feb 02 09:42:09 compute-0 ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1-prometheus-compute-0[100305]: ts=2026-02-02T09:42:09.177Z caller=main.go:1150 level=info fs_type=XFS_SUPER_MAGIC
Feb 02 09:42:09 compute-0 ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1-prometheus-compute-0[100305]: ts=2026-02-02T09:42:09.177Z caller=main.go:1153 level=info msg="TSDB started"
Feb 02 09:42:09 compute-0 ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1-prometheus-compute-0[100305]: ts=2026-02-02T09:42:09.177Z caller=main.go:1335 level=info msg="Loading configuration file" filename=/etc/prometheus/prometheus.yml
Feb 02 09:42:09 compute-0 ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1-prometheus-compute-0[100305]: ts=2026-02-02T09:42:09.212Z caller=main.go:1372 level=info msg="Completed loading of configuration file" filename=/etc/prometheus/prometheus.yml totalDuration=34.312235ms db_storage=1.861µs remote_storage=2.01µs web_handler=440ns query_engine=910ns scrape=2.706952ms scrape_sd=313.528µs notify=36.421µs notify_sd=25.191µs rules=30.609447ms tracing=15.27µs
Feb 02 09:42:09 compute-0 ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1-prometheus-compute-0[100305]: ts=2026-02-02T09:42:09.212Z caller=main.go:1114 level=info msg="Server is ready to receive web requests."
Feb 02 09:42:09 compute-0 ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1-prometheus-compute-0[100305]: ts=2026-02-02T09:42:09.212Z caller=manager.go:163 level=info component="rule manager" msg="Starting rule manager..."

● ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1@rgw.rgw.compute-0.vltabo.service - Ceph rgw.rgw.compute-0.vltabo for d241d473-9fcb-5f74-b163-f1ca4454e7f1
     Loaded: loaded (/etc/systemd/system/ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1@.service; enabled; preset: disabled)
     Active: active (running) since Mon 2026-02-02 09:40:18 UTC; 44min ago
   Main PID: 89247 (conmon)
         IO: 2.0M read, 4.3M written
      Tasks: 613 (limit: 48560)
     Memory: 123.4M (peak: 124.3M)
        CPU: 20.044s
     CGroup: /system.slice/system-ceph\x2dd241d473\x2d9fcb\x2d5f74\x2db163\x2df1ca4454e7f1.slice/ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1@rgw.rgw.compute-0.vltabo.service
             ├─libpod-payload-2dea2149c1564b5d8c5d4eba925c3597c22745b2ed907d0270e5352d9fbd157f
             │ ├─89250 /run/podman-init -- /usr/bin/radosgw -n client.rgw.rgw.compute-0.vltabo -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ └─89254 /usr/bin/radosgw -n client.rgw.rgw.compute-0.vltabo -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             └─runtime
               └─89247 /usr/bin/conmon --api-version 1 -c 2dea2149c1564b5d8c5d4eba925c3597c22745b2ed907d0270e5352d9fbd157f -u 2dea2149c1564b5d8c5d4eba925c3597c22745b2ed907d0270e5352d9fbd157f -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/2dea2149c1564b5d8c5d4eba925c3597c22745b2ed907d0270e5352d9fbd157f/userdata -p /run/containers/storage/overlay-containers/2dea2149c1564b5d8c5d4eba925c3597c22745b2ed907d0270e5352d9fbd157f/userdata/pidfile -n ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1-rgw-rgw-compute-0-vltabo --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/2dea2149c1564b5d8c5d4eba925c3597c22745b2ed907d0270e5352d9fbd157f --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/2dea2149c1564b5d8c5d4eba925c3597c22745b2ed907d0270e5352d9fbd157f/userdata/oci-log --conmon-pidfile /run/ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1@rgw.rgw.compute-0.vltabo.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 2dea2149c1564b5d8c5d4eba925c3597c22745b2ed907d0270e5352d9fbd157f

Feb 02 10:25:03 compute-0 radosgw[89254]: beast: 0x7f123bf7e5d0: 192.168.122.100 - anonymous [02/Feb/2026:10:25:03.402 +0000] "HEAD / HTTP/1.0" 200 0 - - - latency=0.001000027s
Feb 02 10:25:03 compute-0 radosgw[89254]: ====== starting new request req=0x7f123bf7e5d0 =====
Feb 02 10:25:03 compute-0 radosgw[89254]: ====== req done req=0x7f123bf7e5d0 op status=0 http_status=200 latency=0.001000026s ======
Feb 02 10:25:03 compute-0 radosgw[89254]: beast: 0x7f123bf7e5d0: 192.168.122.102 - anonymous [02/Feb/2026:10:25:03.607 +0000] "HEAD / HTTP/1.0" 200 0 - - - latency=0.001000026s
Feb 02 10:25:05 compute-0 radosgw[89254]: ====== starting new request req=0x7f123bf7e5d0 =====
Feb 02 10:25:05 compute-0 radosgw[89254]: ====== req done req=0x7f123bf7e5d0 op status=0 http_status=200 latency=0.000000000s ======
Feb 02 10:25:05 compute-0 radosgw[89254]: beast: 0x7f123bf7e5d0: 192.168.122.100 - anonymous [02/Feb/2026:10:25:05.406 +0000] "HEAD / HTTP/1.0" 200 0 - - - latency=0.000000000s
Feb 02 10:25:05 compute-0 radosgw[89254]: ====== starting new request req=0x7f123bf7e5d0 =====
Feb 02 10:25:05 compute-0 radosgw[89254]: ====== req done req=0x7f123bf7e5d0 op status=0 http_status=200 latency=0.001000026s ======
Feb 02 10:25:05 compute-0 radosgw[89254]: beast: 0x7f123bf7e5d0: 192.168.122.102 - anonymous [02/Feb/2026:10:25:05.610 +0000] "HEAD / HTTP/1.0" 200 0 - - - latency=0.001000026s

● ceph-osd-losetup-0.service - Ceph OSD losetup
     Loaded: loaded (/etc/systemd/system/ceph-osd-losetup-0.service; enabled; preset: disabled)
     Active: active (exited) since Mon 2026-02-02 09:37:07 UTC; 47min ago
   Main PID: 72551 (code=exited, status=0/SUCCESS)
        CPU: 21ms

Feb 02 09:37:07 compute-0 systemd[1]: Starting Ceph OSD losetup...
Feb 02 09:37:07 compute-0 bash[72552]: /dev/loop3: [64513]:4329562 (/var/lib/ceph-osd-0.img)
Feb 02 09:37:07 compute-0 systemd[1]: Finished Ceph OSD losetup.

● chronyd.service - NTP client/server
     Loaded: loaded (/usr/lib/systemd/system/chronyd.service; enabled; preset: enabled)
     Active: active (running) since Mon 2026-02-02 09:35:07 UTC; 49min ago
       Docs: man:chronyd(8)
             man:chrony.conf(5)
   Main PID: 58590 (chronyd)
         IO: 0B read, 0B written
      Tasks: 1 (limit: 48560)
     Memory: 1.0M (peak: 1.9M)
        CPU: 51ms
     CGroup: /system.slice/chronyd.service
             └─58590 /usr/sbin/chronyd -F 2

Feb 02 09:35:07 compute-0 systemd[1]: Starting NTP client/server...
Feb 02 09:35:07 compute-0 chronyd[58590]: chronyd version 4.8 starting (+CMDMON +REFCLOCK +RTC +PRIVDROP +SCFILTER +SIGND +NTS +SECHASH +IPV6 +DEBUG)
Feb 02 09:35:07 compute-0 chronyd[58590]: Frequency -26.541 +/- 0.392 ppm read from /var/lib/chrony/drift
Feb 02 09:35:07 compute-0 chronyd[58590]: Loaded seccomp filter (level 2)
Feb 02 09:35:07 compute-0 systemd[1]: Started NTP client/server.
Feb 02 09:37:16 compute-0 chronyd[58590]: Selected source 142.4.192.253 (pool.ntp.org)

● cloud-config.service - Cloud-init: Config Stage
     Loaded: loaded (/usr/lib/systemd/system/cloud-config.service; enabled; preset: disabled)
     Active: active (exited) since Mon 2026-02-02 09:00:50 UTC; 1h 24min ago
   Main PID: 1002 (code=exited, status=0/SUCCESS)
        CPU: 379ms

Feb 02 09:00:50 np0005604790.novalocal systemd[1]: Starting Cloud-init: Config Stage...
Feb 02 09:00:50 np0005604790.novalocal cloud-init[1132]: Cloud-init v. 24.4-8.el9 running 'modules:config' at Mon, 02 Feb 2026 09:00:50 +0000. Up 10.07 seconds.
Feb 02 09:00:50 np0005604790.novalocal systemd[1]: Finished Cloud-init: Config Stage.

● cloud-final.service - Cloud-init: Final Stage
     Loaded: loaded (/usr/lib/systemd/system/cloud-final.service; enabled; preset: disabled)
     Active: active (exited) since Mon 2026-02-02 09:00:51 UTC; 1h 24min ago
   Main PID: 1216 (code=exited, status=0/SUCCESS)
        CPU: 431ms

Feb 02 09:00:50 np0005604790.novalocal systemd[1]: Starting Cloud-init: Final Stage...
Feb 02 09:00:51 np0005604790.novalocal cloud-init[1342]: Cloud-init v. 24.4-8.el9 running 'modules:final' at Mon, 02 Feb 2026 09:00:51 +0000. Up 10.46 seconds.
Feb 02 09:00:51 np0005604790.novalocal cloud-init[1351]: #############################################################
Feb 02 09:00:51 np0005604790.novalocal cloud-init[1352]: -----BEGIN SSH HOST KEY FINGERPRINTS-----
Feb 02 09:00:51 np0005604790.novalocal cloud-init[1355]: 256 SHA256:omSMrbvUX6DkbYiDUcfIzqg62PqASGjkHmAkA+Igeug root@np0005604790.novalocal (ECDSA)
Feb 02 09:00:51 np0005604790.novalocal cloud-init[1365]: 3072 SHA256:pe0wca6M1MCvTd5biJ5mLMvoQbSfvZRYmJgb3DwJ8XM root@np0005604790.novalocal (RSA)
Feb 02 09:00:51 np0005604790.novalocal cloud-init[1366]: -----END SSH HOST KEY FINGERPRINTS-----
Feb 02 09:00:51 np0005604790.novalocal cloud-init[1367]: #############################################################
Feb 02 09:00:51 np0005604790.novalocal cloud-init[1342]: Cloud-init v. 24.4-8.el9 finished at Mon, 02 Feb 2026 09:00:51 +0000. Datasource DataSourceConfigDrive [net,ver=2][source=/dev/sr0].  Up 10.63 seconds
Feb 02 09:00:51 np0005604790.novalocal systemd[1]: Finished Cloud-init: Final Stage.

● cloud-init-local.service - Cloud-init: Local Stage (pre-network)
     Loaded: loaded (/usr/lib/systemd/system/cloud-init-local.service; enabled; preset: disabled)
     Active: active (exited) since Mon 2026-02-02 09:00:47 UTC; 1h 24min ago
   Main PID: 784 (code=exited, status=0/SUCCESS)
        CPU: 694ms

Feb 02 09:00:45 localhost systemd[1]: Starting Cloud-init: Local Stage (pre-network)...
Feb 02 09:00:46 localhost cloud-init[839]: Cloud-init v. 24.4-8.el9 running 'init-local' at Mon, 02 Feb 2026 09:00:46 +0000. Up 6.02 seconds.
Feb 02 09:00:47 np0005604790.novalocal systemd[1]: Finished Cloud-init: Local Stage (pre-network).

● cloud-init.service - Cloud-init: Network Stage
     Loaded: loaded (/usr/lib/systemd/system/cloud-init.service; enabled; preset: disabled)
     Active: active (exited) since Mon 2026-02-02 09:00:50 UTC; 1h 24min ago
   Main PID: 901 (code=exited, status=0/SUCCESS)
        CPU: 1.058s

Feb 02 09:00:50 np0005604790.novalocal cloud-init[921]: |Boo. o .         |
Feb 02 09:00:50 np0005604790.novalocal cloud-init[921]: |=o= + + +        |
Feb 02 09:00:50 np0005604790.novalocal cloud-init[921]: |=E+o o * =       |
Feb 02 09:00:50 np0005604790.novalocal cloud-init[921]: |+= .+ o S o      |
Feb 02 09:00:50 np0005604790.novalocal cloud-init[921]: | oo. . . . .     |
Feb 02 09:00:50 np0005604790.novalocal cloud-init[921]: |o..o      .      |
Feb 02 09:00:50 np0005604790.novalocal cloud-init[921]: |=+=              |
Feb 02 09:00:50 np0005604790.novalocal cloud-init[921]: |B+               |
Feb 02 09:00:50 np0005604790.novalocal cloud-init[921]: +----[SHA256]-----+
Feb 02 09:00:50 np0005604790.novalocal systemd[1]: Finished Cloud-init: Network Stage.

○ cpupower.service - Configure CPU power related settings
     Loaded: loaded (/usr/lib/systemd/system/cpupower.service; disabled; preset: disabled)
     Active: inactive (dead)

● crond.service - Command Scheduler
     Loaded: loaded (/usr/lib/systemd/system/crond.service; enabled; preset: enabled)
     Active: active (running) since Mon 2026-02-02 09:00:50 UTC; 1h 24min ago
   Main PID: 1009 (crond)
         IO: 36.0K read, 12.0K written
      Tasks: 1 (limit: 48560)
     Memory: 1.3M (peak: 4.8M)
        CPU: 163ms
     CGroup: /system.slice/crond.service
             └─1009 /usr/sbin/crond -n

Feb 02 09:01:01 np0005604790.novalocal run-parts[4315]: (/etc/cron.hourly) finished 0anacron
Feb 02 09:16:01 compute-0 anacron[4313]: Job `crUnit display-manager.service could not be found.
on.daily' started
Feb 02 09:16:01 compute-0 anacron[4313]: Job `cron.daily' terminated
Feb 02 09:36:01 compute-0 anacron[4313]: Job `cron.weekly' started
Feb 02 09:36:01 compute-0 anacron[4313]: Job `cron.weekly' terminated
Feb 02 09:56:01 compute-0 anacron[4313]: Job `cron.monthly' started
Feb 02 09:56:01 compute-0 anacron[4313]: Job `cron.monthly' terminated
Feb 02 09:56:01 compute-0 anacron[4313]: Normal exit (3 jobs run)
Feb 02 10:01:01 compute-0 CROND[255986]: (root) CMD (run-parts /etc/cron.hourly)
Feb 02 10:01:01 compute-0 CROND[255985]: (root) CMDEND (run-parts /etc/cron.hourly)

● dbus-broker.service - D-Bus System Message Bus
     Loaded: loaded (/usr/lib/systemd/system/dbus-broker.service; enabled; preset: enabled)
     Active: active (running) since Mon 2026-02-02 09:00:45 UTC; 1h 24min ago
TriggeredBy: ● dbus.socket
       Docs: man:dbus-broker-launch(1)
   Main PID: 772 (dbus-broker-lau)
         IO: 0B read, 0B written
      Tasks: 2 (limit: 48560)
     Memory: 2.9M (peak: 3.7M)
        CPU: 6.833s
     CGroup: /system.slice/dbus-broker.service
             ├─772 /usr/bin/dbus-broker-launch --scope system --audit
             └─780 dbus-broker --log 4 --controller 9 --machine-id bf0bc0bb03de29b24cba1cc9599cf5d0 --max-bytes 536870912 --max-fds 4096 --max-matches 131072 --audit

Feb 02 09:32:43 compute-0 dbus-broker-launch[772]: Noticed file-system modification, trigger reload.
Feb 02 09:33:27 compute-0 dbus-broker-launch[780]: avc:  op=load_policy lsm=selinux seqno=7 res=1
Feb 02 09:33:36 compute-0 dbus-broker-launch[780]: avc:  op=load_policy lsm=selinux seqno=8 res=1
Feb 02 09:48:25 compute-0 dbus-broker-launch[780]: avc:  op=load_policy lsm=selinux seqno=9 res=1
Feb 02 09:52:04 compute-0 dbus-broker-launch[780]: avc:  op=load_policy lsm=selinux seqno=10 res=1
Feb 02 09:52:15 compute-0 dbus-broker-launch[780]: avc:  op=load_policy lsm=selinux seqno=11 res=1
Feb 02 09:52:54 compute-0 dbus-broker-launch[772]: Noticed file-system modification, trigger reload.
Feb 02 09:52:54 compute-0 dbus-broker-launch[780]: avc:  op=load_policy lsm=selinux seqno=12 res=1
Feb 02 09:52:54 compute-0 dbus-broker-launch[772]: Noticed file-system modification, trigger reload.
Feb 02 09:54:20 compute-0 dbus-broker-launch[780]: avc:  op=load_policy lsm=selinux seqno=13 res=1

○ dm-event.service - Device-mapper event daemon
     Loaded: loaded (/usr/lib/systemd/system/dm-event.service; static)
     Active: inactive (dead)
TriggeredBy: ● dm-event.socket
       Docs: man:dmeventd(8)

○ dnf-makecache.service - dnf makecache
     Loaded: loaded (/usr/lib/systemd/system/dnf-makecache.service; static)
     Active: inactive (dead) since Mon 2026-02-02 09:32:14 UTC; 52min ago
TriggeredBy: ● dnf-makecache.timer
   Main PID: 34425 (code=exited, status=0/SUCCESS)
        CPU: 1.872s

Feb 02 09:32:12 compute-0 dnf[34425]: NFV SIG OpenvSwitch                              91 kB/s | 3.0 kB     00:00
Feb 02 09:32:12 compute-0 dnf[34425]: repo-setup-centos-appstream                     113 kB/s | 4.4 kB     00:00
Feb 02 09:32:13 compute-0 dnf[34425]: repo-setup-centos-baseos                        184 kB/s | 3.9 kB     00:00
Feb 02 09:32:13 compute-0 dnf[34425]: repo-setup-centos-highavailability              185 kB/s | 3.9 kB     00:00
Feb 02 09:32:13 compute-0 dnf[34425]: repo-setup-centos-powertools                    175 kB/s | 4.3 kB     00:00
Feb 02 09:32:13 compute-0 dnf[34425]: Extra Packages for Enterprise Linux 9 - x86_64   97 kB/s |  30 kB     00:00
Feb 02 09:32:13 compute-0 dnf[34425]: Metadata cache created.
Feb 02 09:32:14 compute-0 systemd[1]: dnf-makecache.service: Deactivated successfully.
Feb 02 09:32:14 compute-0 systemd[1]: Finished dnf makecache.
Feb 02 09:32:14 compute-0 systemd[1]: dnf-makecache.service: Consumed 1.872s CPU time.

○ dracut-cmdline.service - dracut cmdline hook
     Loaded: loaded (/usr/lib/systemd/system/dracut-cmdline.service; static)
     Active: inactive (dead) since Mon 2026-02-02 09:00:44 UTC; 1h 24min ago
   Duration: 1.758s
       Docs: man:dracut-cmdline.service(8)
             man:dracut.bootup(7)
   Main PID: 322 (code=exited, status=0/SUCCESS)
        CPU: 121ms

Feb 02 09:00:42 localhost systemd[1]: Starting dracut cmdline hook...
Feb 02 09:00:42 localhost dracut-cmdline[322]: dracut-9 dracut-057-102.git20250818.el9
Feb 02 09:00:42 localhost dracut-cmdline[322]: Using kernel command line parameters:    BOOT_IMAGE=(hd0,msdos1)/boot/vmlinuz-5.14.0-665.el9.x86_64 root=UUID=822f14ea-6e7e-41df-b0d8-fbe282d9ded8 ro console=ttyS0,115200n8 no_timer_check net.ifnames=0 crashkernel=1G-2G:192M,2G-64G:256M,64G-:512M
Feb 02 09:00:42 localhost systemd[1]: Finished dracut cmdline hook.
Feb 02 09:00:44 localhost systemd[1]: dracut-cmdline.service: Deactivated successfully.
Feb 02 09:00:44 localhost systemd[1]: Stopped dracut cmdline hook.

○ dracut-initqueue.service - dracut initqueue hook
     Loaded: loaded (/usr/lib/systemd/system/dracut-initqueue.service; static)
     Active: inactive (dead) since Mon 2026-02-02 09:00:44 UTC; 1h 24min ago
   Duration: 918ms
       Docs: man:dracut-initqueue.service(8)
             man:dracut.bootup(7)
   Main PID: 498 (code=exited, status=0/SUCCESS)
        CPU: 30ms

Feb 02 09:00:42 localhost systemd[1]: Starting dracut initqueue hook...
Feb 02 09:00:43 localhost systemd[1]: Finished dracut initqueue hook.
Feb 02 09:00:44 localhost systemd[1]: dracut-initqueue.service: Deactivated successfully.
Feb 02 09:00:44 localhost systemd[1]: Stopped dracut initqueue hook.

○ dracut-mount.service - dracut mount hook
     Loaded: loaded (/usr/lib/systemd/system/dracut-mount.service; static)
     Active: inactive (dead) since Mon 2026-02-02 09:00:44 UTC; 1h 24min ago
   Duration: 181ms
       Docs: man:dracut-mount.service(8)
             man:dracut.bootup(7)
   Main PID: 569 (code=exited, status=0/SUCCESS)
        CPU: 22ms

Feb 02 09:00:43 localhost systemd[1]: Starting dracut mount hook...
Feb 02 09:00:43 localhost systemd[1]: Finished dracut mount hook.
Feb 02 09:00:44 localhost systemd[1]: dracut-mount.service: Deactivated successfully.
Feb 02 09:00:44 localhost systemd[1]: Stopped dracut mount hook.

○ dracut-pre-mount.service - dracut pre-mount hook
     Loaded: loaded (/usr/lib/systemd/system/dracut-pre-mount.service; static)
     Active: inactive (dead) since Mon 2026-02-02 09:00:44 UTC; 1h 24min ago
   Duration: 874ms
       Docs: man:dracut-pre-mount.service(8)
             man:dracut.bootup(7)
   Main PID: 546 (code=exited, status=0/SUCCESS)
        CPU: 8ms

Feb 02 09:00:43 localhost systemd[1]: Starting dracut pre-mount hook...
Feb 02 09:00:43 localhost systemd[1]: Finished dracut pre-mount hook.
Feb 02 09:00:44 localhost systemd[1]: dracut-pre-mount.service: Deactivated successfully.
Feb 02 09:00:44 localhost systemd[1]: Stopped dracut pre-mount hook.

○ dracut-pre-pivot.service - dracut pre-pivot and cleanup hook
     Loaded: loaded (/usr/lib/systemd/system/dracut-pre-pivot.service; static)
     Active: inactive (dead) since Mon 2026-02-02 09:00:44 UTC; 1h 24min ago
   Duration: 42ms
       Docs: man:dracut-pre-pivot.service(8)
             man:dracut.bootup(7)
   Main PID: 575 (code=exited, status=0/SUCCESS)
        CPU: 90ms

Feb 02 09:00:43 localhost systemd[1]: Starting dracut pre-pivot and cleanup hook...
Feb 02 09:00:44 localhost systemd[1]: Finished dracut pre-pivot and cleanup hook.
Feb 02 09:00:44 localhost systemd[1]: dracut-pre-pivot.service: Deactivated successfully.
Feb 02 09:00:44 localhost systemd[1]: Stopped dracut pre-pivot and cleanup hook.

○ dracut-pre-trigger.service - dracut pre-trigger hook
     Loaded: loaded (/usr/lib/systemd/system/dracut-pre-trigger.service; static)
     Active: inactive (dead) since Mon 2026-02-02 09:00:44 UTC; 1h 24min ago
   Duration: 1.414s
       Docs: man:dracut-pre-trigger.service(8)
             man:dracut.bootup(7)
   Main PID: 464 (code=exited, status=0/SUCCESS)
        CPU: 13ms

Feb 02 09:00:42 localhost systemd[1]: Starting dracut pre-trigger hook...
Feb 02 09:00:42 localhost systemd[1]: Finished dracut pre-trigger hook.
Feb 02 09:00:44 localhost systemd[1]: dracut-pre-trigger.service: Deactivated successfully.
Feb 02 09:00:44 localhost systemd[1]: Stopped dracut pre-trigger hook.

○ dracut-pre-udev.service - dracut pre-udev hook
     Loaded: loaded (/usr/lib/systemd/system/dracut-pre-udev.service; static)
     Active: inactive (dead) since Mon 2026-02-02 09:00:44 UTC; 1h 24min ago
   Duration: 1.498s
       Docs: man:dracut-pre-udev.service(8)
             man:dracut.bootup(7)
   Main PID: 411 (code=exited, status=0/SUCCESS)
        CPU: 255ms

Feb 02 09:00:42 localhost systemd[1]: Starting dracut pre-udev hook...
Feb 02 09:00:42 localhost rpc.statd[439]: Version 2.5.4 starting
Feb 02 09:00:42 localhost rpc.statd[439]: Initializing NSM state
Feb 02 09:00:42 localhost rpc.idmapd[444]: Setting log level to 0
Feb 02 09:00:42 localhost systemd[1]: Finished dracut pre-udev hook.
Feb 02 09:00:43 localhost rpc.idmapd[444]: exiting on signal 15
Feb 02 09:00:44 localhost systemd[1]: dracut-pre-udev.service: Deactivated successfully.
Feb 02 09:00:44 localhost systemd[1]: Stopped dracut pre-udev hook.

○ dracut-shutdown-onfailure.service - Service executing upon dracut-shutdown failure to perform cleanup
     Loaded: loaded (/usr/lib/systemd/system/dracut-shutdown-onfailure.service; static)
     Active: inactive (dead)
       Docs: man:dracut-shutdown.service(8)

● dracut-shutdown.service - Restore /run/initramfs on shutdown
     Loaded: loaded (/usr/lib/systemd/system/dracut-shutdown.service; static)
     Active: active (exited) since Mon 2026-02-02 09:00:45 UTC; 1h 24min ago
       Docs: man:dracut-shutdown.service(8)
   Main PID: 785 (code=exited, status=0/SUCCESS)
        CPU: 4ms

Feb 02 09:00:45 localhost systemd[1]: Starting Restore /run/initramfs on shutdown...
Feb 02 09:00:46 localhost systemd[1]: Finished Restore /run/initramfs on shutdown.

○ e39122d9482da8df802204ab6c35fb7c982874580968140e6f06bdfc8eefae36-72a4a8d8f632219c.service - /usr/bin/podman healthcheck run e39122d9482da8df802204ab6c35fb7c982874580968140e6f06bdfc8eefae36
     Loaded: loaded (/run/systemd/transient/e39122d9482da8df802204ab6c35fb7c982874580968140e6f06bdfc8eefae36-72a4a8d8f632219c.service; transient)
  Transient: yes
     Active: inactive (dead) since Mon 2026-02-02 10:24:53 UTC; 13s ago
   Duration: 117ms
TriggeredBy: ● e39122d9482da8df802204ab6c35fb7c982874580968140e6f06bdfc8eefae36-72a4a8d8f632219c.timer
    Process: 294442 ExecStart=/usr/bin/podman healthcheck run e39122d9482da8df802204ab6c35fb7c982874580968140e6f06bdfc8eefae36 (code=exited, status=0/SUCCESS)
   Main PID: 294442 (code=exited, status=0/SUCCESS)
        CPU: 96ms

Feb 02 10:24:53 compute-0 podman[294442]: 2026-02-02 10:24:53.375128539 +0000 UTC m=+0.089392181 container health_status e39122d9482da8df802204ab6c35fb7c982874580968140e6f06bdfc8eefae36 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller@sha256:099d88ae13fa2b3409da5310cdcba7fa01d2c87a8bc98296299a57054b9a075e, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, config_id=ovn_controller, org.label-schema.vendor=CentOS, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'EDPM_CONFIG_HASH': 'db4758ee7523fe447444c4bd2b867b543b1eee4e3bbcf6676cd1b27bf6147d86-65dc6d8b666d074a9e865f271939acafedbf905c57c15ae47c3f2766afb95121-65dc6d8b666d074a9e865f271939acafedbf905c57c15ae47c3f2766afb95121-65dc6d8b666d074a9e865f271939acafedbf905c57c15ae47c3f2766afb95121'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller@sha256:099d88ae13fa2b3409da5310cdcba7fa01d2c87a8bc98296299a57054b9a075e', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, org.label-schema.schema-version=1.0, io.buildah.version=1.41.3, org.label-schema.build-date=20260127, tcib_managed=true, container_name=ovn_controller, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, tcib_build_tag=b85d0548925081ae8c6bdd697658cec4)

● edpm-container-shutdown.service - EDPM Container Shutdown
     Loaded: loaded (/etc/systemd/system/edpm-container-shutdown.service; enabled; preset: enabled)
     Active: active (exited) since Mon 2026-02-02 09:35:34 UTC; 49min ago
       Docs: https://github.com/openstack-k8s-operators/docs
   Main PID: 61584 (code=exited, status=0/SUCCESS)
        CPU: 2ms

Feb 02 09:35:34 compute-0 systemd[1]: Starting EDPM Container Shutdown...
Feb 02 09:35:34 compute-0 systemd[1]: Finished EDPM Container Shutdown.

○ edpm_libvirt_guests.service - Suspend libvirt Guests in edpm
     Loaded: loaded (/etc/systemd/system/edpm_libvirt_guests.service; enabled; preset: disabled)
     Active: inactive (dead)
       Docs: man:libvirtd(8)
             https://libvirt.org

● edpm_nova_compute.service - nova_compute container
     Loaded: loaded (/etc/systemd/system/edpm_nova_compute.service; enabled; preset: disabled)
     Active: active (running) since Mon 2026-02-02 09:57:59 UTC; 27min ago
    Process: 252657 ExecStart=/usr/bin/podman start nova_compute (code=exited, status=0/SUCCESS)
   Main PID: 252672 (conmon)
         IO: 0B read, 97.5K written
      Tasks: 1 (limit: 48560)
     Memory: 684.0K (peak: 16.9M)
        CPU: 588ms
     CGroup: /system.slice/edpm_nova_compute.service
             └─252672 /usr/bin/conmon --api-version 1 -c 46ea9bbd396be6c9cf42e800f6ecffbc7ecf6b0a7dd6f731e2744db95bbe8ea3 -u 46ea9bbd396be6c9cf42e800f6ecffbc7ecf6b0a7dd6f731e2744db95bbe8ea3 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/46ea9bbd396be6c9cf42e800f6ecffbc7ecf6b0a7dd6f731e2744db95bbe8ea3/userdata -p /run/containers/storage/overlay-containers/46ea9bbd396be6c9cf42e800f6ecffbc7ecf6b0a7dd6f731e2744db95bbe8ea3/userdata/pidfile -n nova_compute --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/46ea9bbd396be6c9cf42e800f6ecffbc7ecf6b0a7dd6f731e2744db95bbe8ea3 --full-attach -s -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/46ea9bbd396be6c9cf42e800f6ecffbc7ecf6b0a7dd6f731e2744db95bbe8ea3/userdata/oci-log --conmon-pidfile /run/nova_compute.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg 46ea9bbd396be6c9cf42e800f6ecffbc7ecf6b0a7dd6f731e2744db95bbe8ea3

Feb 02 10:24:43 compute-0 nova_compute[252672]: 2026-02-02 10:24:43.114 252676 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263[00m
Feb 02 10:24:43 compute-0 nova_compute[252672]: 2026-02-02 10:24:43.718 252676 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263[00m
Feb 02 10:24:48 compute-0 nova_compute[252672]: 2026-02-02 10:24:48.169 252676 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263[00m
Feb 02 10:24:48 compute-0 nova_compute[252672]: 2026-02-02 10:24:48.719 252676 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263[00m
Feb 02 10:24:53 compute-0 nova_compute[252672]: 2026-02-02 10:24:53.173 252676 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263[00m
Feb 02 10:24:53 compute-0 nova_compute[252672]: 2026-02-02 10:24:53.721 252676 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263[00m
Feb 02 10:24:58 compute-0 nova_compute[252672]: 2026-02-02 10:24:58.213 252676 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263[00m
Feb 02 10:24:58 compute-0 nova_compute[252672]: 2026-02-02 10:24:58.724 252676 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263[00m
Feb 02 10:25:03 compute-0 nova_compute[252672]: 2026-02-02 10:25:03.217 252676 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263[00m
Feb 02 10:25:03 compute-0 nova_compute[252672]: 2026-02-02 10:25:03.726 252676 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263[00m

● edpm_ovn_controller.service - ovn_controller container
     Loaded: loaded (/etc/systemd/system/edpm_ovn_controller.service; enabled; preset: disabled)
     Active: active (running) since Mon 2026-02-02 09:49:28 UTC; 35min ago
   Main PID: 154631 (conmon)
         IO: 0B read, 140.5K written
      Tasks: 1 (limit: 48560)
     Memory: 692.0K (peak: 20.8M)
        CPU: 333ms
     CGroup: /system.slice/edpm_ovn_controller.service
             └─154631 /usr/bin/conmon --api-version 1 -c e39122d9482da8df802204ab6c35fb7c982874580968140e6f06bdfc8eefae36 -u e39122d9482da8df802204ab6c35fb7c982874580968140e6f06bdfc8eefae36 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/e39122d9482da8df802204ab6c35fb7c982874580968140e6f06bdfc8eefae36/userdata -p /run/containers/storage/overlay-containers/e39122d9482da8df802204ab6c35fb7c982874580968140e6f06bdfc8eefae36/userdata/pidfile -n ovn_controller --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/e39122d9482da8df802204ab6c35fb7c982874580968140e6f06bdfc8eefae36 --full-attach -s -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/e39122d9482da8df802204ab6c35fb7c982874580968140e6f06bdfc8eefae36/userdata/oci-log --conmon-pidfile /run/ovn_controller.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg e39122d9482da8df802204ab6c35fb7c982874580968140e6f06bdfc8eefae36

Feb 02 10:12:48 compute-0 ovn_controller[154631]: 2026-02-02T10:12:48Z|00084|binding|INFO|Releasing lport ce0ea125-e6c2-41cd-b9ad-71cce6387108 from this chassis (sb_readonly=0)
Feb 02 10:12:56 compute-0 ovn_controller[154631]: 2026-02-02T10:12:56Z|00012|pinctrl(ovn_pinctrl0)|INFO|DHCPOFFER fa:16:3e:b3:52:4f 10.100.0.4
Feb 02 10:12:56 compute-0 ovn_controller[154631]: 2026-02-02T10:12:56Z|00013|pinctrl(ovn_pinctrl0)|INFO|DHCPACK fa:16:3e:b3:52:4f 10.100.0.4
Feb 02 10:13:01 compute-0 ovn_controller[154631]: 2026-02-02T10:13:01Z|00085|binding|INFO|Releasing lport ce0ea125-e6c2-41cd-b9ad-71cce6387108 from this chassis (sb_readonly=0)
Feb 02 10:13:01 compute-0 ovn_controller[154631]: 2026-02-02T10:13:01Z|00086|binding|INFO|Releasing lport ce0ea125-e6c2-41cd-b9ad-71cce6387108 from this chassis (sb_readonly=0)
Feb 02 10:13:03 compute-0 ovn_controller[154631]: 2026-02-02T10:13:03Z|00087|binding|INFO|Releasing lport ce0ea125-e6c2-41cd-b9ad-71cce6387108 from this chassis (sb_readonly=0)
Feb 02 10:13:05 compute-0 ovn_controller[154631]: 2026-02-02T10:13:05Z|00088|binding|INFO|Releasing lport 792f51ec-051b-472a-bfc0-65b93275a823 from this chassis (sb_readonly=0)
Feb 02 10:13:05 compute-0 ovn_controller[154631]: 2026-02-02T10:13:05Z|00089|binding|INFO|Setting lport 792f51ec-051b-472a-bfc0-65b93275a823 down in Southbound
Feb 02 10:13:05 compute-0 ovn_controller[154631]: 2026-02-02T10:13:05Z|00090|binding|INFO|Removing iface tap792f51ec-05 ovn-installed in OVS
Feb 02 10:13:43 compute-0 ovn_controller[154631]: 2026-02-02T10:13:43Z|00091|memory_trim|INFO|Detected inactivity (last active 30006 ms ago): trimming memory

● edpm_ovn_metadata_agent.service - ovn_metadata_agent container
     Loaded: loaded (/etc/systemd/system/edpm_ovn_metadata_agent.service; enabled; preset: disabled)
     Active: active (running) since Mon 2026-02-02 09:50:43 UTC; 34min ago
   Main PID: 165359 (conmon)
         IO: 0B read, 111.0K written
      Tasks: 1 (limit: 48560)
     Memory: 712.0K (peak: 17.9M)
        CPU: 355ms
     CGroup: /system.slice/edpm_ovn_metadata_agent.service
             └─165359 /usr/bin/conmon --api-version 1 -c 29bea7eb4976451e3dfdb1e9a3aba30b10e64cbce131bf8d09548b4a224f8ee8 -u 29bea7eb4976451e3dfdb1e9a3aba30b10e64cbce131bf8d09548b4a224f8ee8 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/29bea7eb4976451e3dfdb1e9a3aba30b10e64cbce131bf8d09548b4a224f8ee8/userdata -p /run/containers/storage/overlay-containers/29bea7eb4976451e3dfdb1e9a3aba30b10e64cbce131bf8d09548b4a224f8ee8/userdata/pidfile -n ovn_metadata_agent --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/29bea7eb4976451e3dfdb1e9a3aba30b10e64cbce131bf8d09548b4a224f8ee8 --full-attach -s -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/29bea7eb4976451e3dfdb1e9a3aba30b10e64cbce131bf8d09548b4a224f8ee8/userdata/oci-log --conmon-pidfile /run/ovn_metadata_agent.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-cUnit fcoe.service could not be found.
ommand-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg 29bea7eb4976451e3dfdb1e9a3aba30b10e64cbce131bf8d09548b4a224f8ee8

Feb 02 10:21:45 compute-0 ovn_metadata_agent[165359]: 2026-02-02 10:21:45.397 165364 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" "released" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: held 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423[00m
Feb 02 10:22:45 compute-0 ovn_metadata_agent[165359]: 2026-02-02 10:22:45.397 165364 DEBUG oslo_concurrency.lockutils [-] Acquiring lock "_check_child_processes" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404[00m
Feb 02 10:22:45 compute-0 ovn_metadata_agent[165359]: 2026-02-02 10:22:45.398 165364 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" acquired by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409[00m
Feb 02 10:22:45 compute-0 ovn_metadata_agent[165359]: 2026-02-02 10:22:45.398 165364 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" "released" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: held 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423[00m
Feb 02 10:23:45 compute-0 ovn_metadata_agent[165359]: 2026-02-02 10:23:45.399 165364 DEBUG oslo_concurrency.lockutils [-] Acquiring lock "_check_child_processes" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404[00m
Feb 02 10:23:45 compute-0 ovn_metadata_agent[165359]: 2026-02-02 10:23:45.399 165364 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" acquired by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409[00m
Feb 02 10:23:45 compute-0 ovn_metadata_agent[165359]: 2026-02-02 10:23:45.399 165364 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" "released" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: held 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423[00m
Feb 02 10:24:45 compute-0 ovn_metadata_agent[165359]: 2026-02-02 10:24:45.400 165364 DEBUG oslo_concurrency.lockutils [-] Acquiring lock "_check_child_processes" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404[00m
Feb 02 10:24:45 compute-0 ovn_metadata_agent[165359]: 2026-02-02 10:24:45.401 165364 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" acquired by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409[00m
Feb 02 10:24:45 compute-0 ovn_metadata_agent[165359]: 2026-02-02 10:24:45.401 165364 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" "released" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: held 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423[00m

○ emergency.service - Emergency Shell
     Loaded: loaded (/usr/lib/systemd/system/emergency.service; static)
     Active: inactive (dead)
       Docs: man:sulogin(8)

● getty@tty1.service - Getty on tty1
     Loaded: loaded (/usr/lib/systemd/system/getty@.servUnit hv_kvp_daemon.service could not be found.
ice; enabled; preset: enabled)
     Active: active (running) since Mon 2026-02-02 09:00:50 UTC; 1h 24min ago
       Docs: man:agetty(8)
             man:systemd-getty-generator(8)
             http://0pointer.de/blog/projects/serial-console.html
   Main PID: 1010 (agetty)
         IO: 0B read, 0B written
      Tasks: 1 (limit: 48560)
     Memory: 212.0K (peak: 436.0K)
        CPU: 7ms
     CGroup: /system.slice/system-getty.slice/getty@tty1.service
             └─1010 /sbin/agetty -o "-p -- \\u" --noclear - linux

Feb 02 09:00:50 np0005604790.novalocal systemd[1]: Started Getty on tty1.

● gssproxy.service - GSSAPI Proxy Daemon
     Loaded: loaded (/usr/lib/systemd/system/gssproxy.service; disabled; preset: disabled)
     Active: active (running) since Mon 2026-02-02 09:00:47 UTC; 1h 24min ago
   Main PID: 879 (gssproxy)
         IO: 0B read, 0B written
      Tasks: 6 (limit: 48560)
     Memory: 1.8M (peak: 3.8M)
        CPU: 19ms
     CGroup: /system.slice/gssproxy.service
             └─879 /usr/sbin/gssproxy -D

Feb 02 09:00:47 np0005604790.novalocal systemd[1]: Starting GSSAPI Proxy Daemon...
Feb 02 09:00:47 np0005604790.novalocal systemd[1]: Started GSSAPI Proxy Daemon.

○ import-state.service - Import network configuration from initramfs
     Loaded: loaded (/usr/lib/systemd/system/import-state.service; enabled; preset: enabled)
     Active: inactive (dead)

○ initrd-cleanup.service - Cleaning Up and Shutting Down Daemons
     Loaded: loaded (/usr/lib/systemd/system/initrd-cleanup.service; static)
     Active: inactive (dead) since Mon 2026-02-02 09:00:44 UTC; 1h 24min ago
   Main PID: 618 (code=exited, status=0/SUCCESS)
        CPU: 13ms

Feb 02 09:00:44 localhost systemd[1]: Starting Cleaning Up and Shutting Down Daemons...
Feb 02 09:00:44 localhost systemd[1]: initrd-cleanup.service: Deactivated successfully.
Feb 02 09:00:44 localhost systemd[1]: Finished Cleaning Up and Shutting Down Daemons.

○ initrd-parse-etc.service - Mountpoints Configured in the Real Root
     Loaded: loaded (/usr/lib/systemd/system/initrd-parse-etc.service; static)
     Active: inactive (dead) since Mon 2026-02-02 09:00:43 UTC; 1h 24min ago
   Main PID: 568 (code=exited, status=0/SUCCESS)
        CPU: 11ms

Feb 02 09:00:43 localhost systemd[1]: Starting Mountpoints Configured in the Real Root...
Feb 02 09:00:43 localhost systemd[1]: initrd-parse-etc.service: Deactivated successfully.
Feb 02 09:00:43 localhost systemd[1]: Finished Mountpoints Configured in the Real Root.

○ initrd-switch-root.service - Switch Root
     Loaded: loaded (/usr/lib/systemd/system/initrd-switch-root.service; static)
     Active: inactive (dead) since Mon 2026-02-02 09:00:44 UTC; 1h 24min ago
   Main PID: 621 (code=exited, status=0/SUCCESS)
        CPU: 6ms

Feb 02 09:00:44 localhost systemd[1]: Starting Switch Root...

○ initrd-udevadm-cleanup-db.service - Cleanup udev Database
     Loaded: loaded (/usr/lib/systemd/system/initrd-udevadm-cleanup-db.service; static)
     Active: inactive (dead) since Mon 2026-02-02 09:00:44 UTC; 1h 24min ago
   Main PID: 620 (code=exited, status=0/SUCCESS)
        CPU: 18ms

Feb 02 09:00:44 localhost systemd[1]: Starting Cleanup udev Database...
Feb 02 09:00:44 localhost systemd[1]: initrd-udevadm-cleanup-db.service: Deactivated successfully.
Feb 02 09:00:44 localhost systemd[1]: Finished Cleanup udev Database.

○ ip6tables.service - IPv6 firewall with ip6tables
     Loaded: loaded (/usr/lib/systemd/system/ip6tables.service; disabled; preset: disabled)
     Active: inactive (dead)

○ iptables.service - IPv4 firewall with iptables
     Loaded: loaded (/usr/lib/systemd/system/iptables.service; disabled; preset: disabled)
     Active: inactive (dead) since Mon 2026-02-02 09:35:43 UTC; 49min ago
   Duration: 34min 57.243s
   Main PID: 787 (code=exited, status=0/SUCCESS)
        CPU: 71ms

Feb 02 09:00:45 localhost systemd[1]: Starting IPv4 firewall with iptables...
Feb 02 09:00:46 localhost iptables.init[787]: iptables: Applying firewall rules: [  OK  ]
Feb 02 09:00:46 localhost systemd[1]: Finished IPv4 firewall with iptables.
Feb 02 09:35:43 compute-0 systemd[1]: Stopping IPv4 firewall with iptables...
Feb 02 09:35:43 compute-0 iptables.init[62833]: iptables: Setting chains to policy ACCEPT: raw mangle filter nat [  OK  ]
Feb 02 09:35:43 compute-0 iptables.init[62833]: iptables: Flushing firewall rules: [  OK  ]
Feb 02 09:35:43 compute-0 systemd[1]: iptables.service: Deactivated successfully.
Feb 02 09:35:43 compute-0 systemd[1]: Stopped IPv4 firewall with iptables.

● irqbalance.service - irqbalance daemon
     Loaded: loaded (/usr/lib/systemd/system/irqbalance.service; enabled; preset: enabled)
     Active: active (running) since Mon 2026-02-02 09:00:45 UTC; 1h 24min ago
       Docs: man:irqbalance(1)
             https://github.com/Irqbalance/irqbalance
   Main PID: 788 (irqbalance)
         IO: 0B read, 0B written
      Tasks: 2 (limit: 48560)
     Memory: 1.1M (peak: 1.6M)
        CPU: 380ms
     CGroup: /system.slice/irqbalance.service
             └─788 /usr/sbin/irqbalance

Feb 02 09:00:56 np0005604790.novalocal irqbalance[788]: Cannot change IRQ 32 affinity: Operation not permitted
Feb 02 09:00:56 np0005604790.novalocal irqbalance[788]: IRQ 32 affinity is now unmanaged
Feb 02 09:00:56 np0005604790.novalocal irqbalance[788]: Cannot change IRQ 30 affinity: Operation not permitted
Feb 02 09:00:56 np0005604790.novalocal irqbalance[788]: IRQ 30 affinity is now unmanaged
Feb 02 09:00:56 np0005604790.novalocal irqbalance[788]: Cannot change IRQ 29 affinity: Operation not permitted
Feb 02 09:00:56 np0005604790.novalocal irqbalance[788]: IRQ 29 affinity is now unmanaged
Feb 02 09:01:06 np0005604790.novalocal irqbalance[788]: Cannot change IRQ 26 affinity: Operation not permitted
Feb 02 09:01:06 np0005604790.novalocal irqbalance[788]: IRQ 26 affinity is now unmanaged
Feb 02 09:14:26 np0005604790.novalocal irqbalance[788]: Cannot change IRQ 27 affinity: Operation not permitted
Feb 02 09:14:26 np0005604790.novalocal irqbalance[788]: IRQ 27 affinity is now unmanaged

○ iscsi-init.service - One time configuration for iscsi.service
     Loaded: loaded (/usr/lib/systemd/system/iscsi-init.service; disabled; preset: disabled)
     Active: inactive (dead)
  Condition: start condition failed at Mon 2026-02-02 09:56:17 UTC; 28min ago

Feb 02 09:55:39 compute-0 systemd[1]: One time configuration for iscsi.service was skipped because of an unmet condition check (ConditionPathExists=!/etc/iscsi/initiatorname.iscsi).
Feb 02 09:56:17 compute-0 systemd[1]: One time configuration for iscsi.service was skipped because of an unmet condition check (ConditionPathExists=!/etc/iscsi/initiatorname.iscsi).

○ iscsi-onboot.service - Special handling of early boot iSCSI sessions
     Loaded: loaded (/usr/lib/systemd/system/iscsi-onboot.service; enabled; preset: enabled)
     Active: inactive (dead)
       Docs: man:iscsiadm(8)
             man:iscsid(8)

● iscsi-shutdown.service - Logout off all iSCSI sessions on shutdown
     Loaded: loaded (/usr/lib/systemd/system/iscsi-shutdown.service; static)
     Active: active (exited) since Mon 2026-02-02 09:55:39 UTC; 29min ago
       Docs: man:iscsid(8)
             man:iscsiadm(8)
   Main PID: 229887 (code=exited, status=0/SUCCESS)
        CPU: 4ms

Feb 02 09:55:39 compute-0 systemd[1]: Starting Logout off all iSCSI sessions on shutdown...
Feb 02 09:55:39 compute-0 systemd[1]: Finished Logout off all iSCSI sessions on shutdown.

○ iscsi-starter.service
     Loaded: loaded (/usr/lib/systemd/system/iscsi-starter.service; enabled; preset: disabled)
     Active: inactive (dead)

○ iscsi.service - Login and scanning of iSCSI devices
     Loaded: loaded (/usr/lib/systemd/system/iscsi.service; indirect; preset: enabled)
     Active: inactive (dead)
       Docs: man:iscsiadm(8)
             man:iscsid(8)

● iscsid.service - Open-iSCSI
     Loaded: loaded (/usr/lib/systemd/system/iscsid.service; enabled; preset: disabled)
     Active: active (running) since Mon 2026-02-02 09:56:17 UTC; 28min ago
TriggeredBy: ● iscsid.socket
       Docs: man:iscsid(8)
             man:iscsiuio(8)
             man:iUnit lvm2-activation-early.service could not be found.
scsiadm(8)
   Main PID: 236646 (iscsid)
     Status: "Ready to process requests"
         IO: 0B read, 0B written
      Tasks: 1 (limit: 48560)
     Memory: 1.9M (peak: 1.9M)
        CPU: 12ms
     CGroup: /system.slice/iscsid.service
             └─236646 /usr/sbin/iscsid -f

Feb 02 09:56:17 compute-0 systemd[1]: Starting Open-iSCSI...
Feb 02 09:56:17 compute-0 systemd[1]: Started Open-iSCSI.

○ iscsiuio.service - iSCSI UserSpace I/O driver
     Loaded: loaded (/usr/lib/systemd/system/iscsiuio.service; disabled; preset: disabled)
     Active: inactive (dead)
TriggeredBy: ○ iscsiuio.socket
       Docs: man:iscsiuio(8)

● kdump.service - Crash recovery kernel arming
     Loaded: loaded (/usr/lib/systemd/system/kdump.service; enabled; preset: enabled)
     Active: active (exited) since Mon 2026-02-02 09:01:00 UTC; 1h 24min ago
   Main PID: 1008 (code=exited, status=0/SUCCESS)
        CPU: 14.681s

Feb 02 09:00:59 np0005604790.novalocal dracut[1267]: Linked:         0 files
Feb 02 09:00:59 np0005604790.novalocal dracut[1267]: Compared:       0 xattrs
Feb 02 09:00:59 np0005604790.novalocal dracut[1267]: Compared:       0 files
Feb 02 09:00:59 np0005604790.novalocal dracut[1267]: Saved:          0 B
Feb 02 09:00:59 np0005604790.novalocal dracut[1267]: Duration:       0.000543 seconds
Feb 02 09:00:59 np0005604790.novalocal dracut[1267]: *** Hardlinking files done ***
Feb 02 09:00:59 np0005604790.novalocal dracut[1267]: *** Creating initramfs image file '/boot/initramfs-5.14.0-665.el9.x86_64kdump.img' done ***
Feb 02 09:01:00 np0005604790.novalocal kdumpctl[1018]: kdump: kexec: loaded kdump kernel
Feb 02 09:01:00 np0005604790.novalocal kdumpctl[1018]: kdump: Starting kdump: [OK]
Feb 02 09:01:00 np0005604790.novalocal systemd[1]: Finished Crash recovery kernel arming.

● kmod-static-nodes.service - Create List of Static Device Nodes
     Loaded: loaded (/usr/lib/systemd/system/kmod-static-nodes.service; static)
     Active: active (exited) since Mon 2026-02-02 09:00:45 UTC; 1h 24min ago
   Main PID: 671 (code=exited, status=0/SUCCESS)
        CPU: 4ms

● ldconfig.service - Rebuild Dynamic Linker Cache
     Loaded: loaded (/usr/lib/systemd/system/ldconfig.service; static)
     Active: active (exited) since Mon 2026-02-02 09:00:45 UTC; 1h 24min ago
       Docs: man:ldconfig(8)
   Main PID: 694 (code=exited, status=0/SUCCESS)
        CPU: 42ms

Feb 02 09:00:45 localhost systemd[1]: Starting Rebuild Dynamic Linker Cache...
Feb 02 09:00:45 localhost systemd[1]: Finished Rebuild Dynamic Linker Cache.

○ libvirtd.service
     Loaded: masked (Reason: Unit libvirtd.service is masked.)
     Active: inactive (dead)
TriggeredBy: ○ libvirtd.socket
             ○ libvirtd-admin.socket
             ○ libvirtd-ro.socket

○ loadmodules.service - Load legacy module configuration
     Loaded: loaded (/usr/lib/systemd/system/loadmodules.service; enabled; preset: enabled)
     Active: inactive (dead)

○ logrotate.service - Rotate log files
     Loaded: loaded (/usr/lib/systemd/system/logrotate.service; static)
     Active: inactive (dead)
TriggeredBy: ● logrotate.timer
       Docs: man:logrotate(8)
             man:logrotate.conf(5)

○ lvm2-lvmpolld.service - LVM2 poll daemon
     Loaded: loaded (/usr/lib/systemd/system/lvm2-lvmpolld.service; static)
     Active: inactive (dead)
TriggeredBy: ● lvm2-lvmpolld.socket
       Docs: man:lvmpolld(8)

● lvm2-monitor.service - Monitoring of LVM2 mirrors, snapshots etc. using dmeventd or progress polling
     Loaded: loaded (/usr/lib/systemd/system/lvm2-monitor.service; enabled; preset: enabled)
     Active: active (exited) since Mon 2026-02-02 09:31:15 UTC; 53min ago
       Docs: man:dmeventd(8)
             man:lvcreate(8)
             man:lvchange(8)
             man:vgchange(8)
   Main PID: 34032 (code=exited, status=0/SUCCESS)
        CPU: 19ms

Feb 02 09:31:15 compute-0 systemd[1]: Starting Monitoring of LVM2 mirrors, snapshots etc. using dmeventd or progress polling...
Feb 02 09:31:15 compute-0 systemd[1]: Finished Monitoring of LVM2 mirrors, snapshots etc. using dmeventd or progress polling.

○ mdmonitor.service - Software RAID monitoring and management
     Loaded: loaded (/usr/lib/systemd/system/mdmonitor.service; enabled; preset: enabled)
     Active: inactive (dead)

○ microcode.service - Load CPU microcode update
     Loaded: loaded (/usr/lib/systemd/system/microcode.service; enabled; preset: enabled)
     Active: inactive (dead)
  Condition: start condition failed at Mon 2026-02-02 09:00:45 UTC; 1h 24min ago

Feb 02 09:00:45 localhost systemd[1]: Load CPU microcode update was skipped because of an unmet condition check (ConditionPathExists=/sys/devices/system/cpu/microcode/reload).

○ modprobe@configfs.service - Load Kernel Module configfs
     Loaded: loaded (/usr/lib/systemd/system/modprobe@.service; static)
     Active: inactive (dead) since Mon 2026-02-02 09:00:45 UTC; 1h 24min ago
       Docs: man:modprobe(8)
   Main PID: 742 (code=exited, status=0/SUCCESS)
        CPU: 11ms

Feb 02 09:00:45 localhost systemd[1]: Starting Load Kernel Module configfs...
Feb 02 09:00:45 localhost systemd[1]: modprobe@configfs.service: Deactivated successfully.
Feb 02 09:00:45 localhost systemd[1]: Finished Load Kernel Module configfs.

○ modprobe@drm.service - Load Kernel Module drm
     Loaded: loaded (/usr/lib/systemd/system/modprobe@.service; static)
     Active: inactive (dead) since Mon 2026-02-02 09:00:45 UTC; 1h 24min ago
       Docs: man:modprobe(8)
   Main PID: 673 (code=exited, status=0/SUCCESS)
        CPU: 71ms

Feb 02 09:00:45 localhost systemd[1]: modprobe@drm.service: Deactivated successfully.
Feb 02 09:00:45 localhost systemd[1]: Finished Load Kernel Module drm.

○ modprobe@efi_pstore.service - Load Kernel Module efi_pstore
     Loaded: loaded (/usr/lib/systemd/system/modprobe@.service; static)
     Active: inactive (dead) since Mon 2026-02-02 09:00:45 UTC; 1h 24min ago
       Docs: man:modprobe(8)
   Main PID: 674 (code=exited, status=0/SUCCESS)
        CPU: 4ms

Feb 02 09:00:45 localhost systemd[1]: modprobe@efi_pstore.service: Deactivated successfully.
Feb 02 09:00:45 localhost systemd[1]: Finished Load Kernel Module efi_pstore.

○ modprobe@fuse.service - Load Kernel Module fuse
     Loaded: loaded (/usr/lib/systemd/system/modprobe@.service; static)
     Active: inactive (dead) since Mon 2026-02-02 09:00:45 UTC; 1h 24min ago
       Docs: man:modprobe(8)
   Main PID: 675 (code=exited, status=0/SUCCESS)
        CPU: 59ms

Feb 02 09:00:45 localhost systemd[1]: modprobe@fuse.service: Deactivated successfully.
Feb 02 09:00:45 localhost systemd[1]: Finished Load Kernel Module fuse.

● multipathd.service - Device-Mapper Multipath Device Controller
     Loaded: loaded (/usr/lib/systemd/system/multipathd.service; enabled; preset: enabled)
     Active: active (running) since Mon 2026-02-02 09:56:18 UTC; 28min ago
TriggeredBy: ● multipathd.socket
   Main PID: 236888 (multipathd)
     Status: "up"
         IO: 0B read, 0B written
      Tasks: 7
     Memory: 18.4M (peak: 18.7M)
        CPU: 293ms
     CGroup: /system.slice/multipathd.service
             └─236888 /sbin/multipathd -d -s

Feb 02 09:56:18 compute-0 systemd[1]: Starting Device-Mapper Multipath Device Controller...
Feb 02 09:56:18 compute-0 multipathd[236888]: --------start up--------
Feb 02 09:56:18 compute-0 multipathd[236888]: read /etc/multipath.conf
Feb 02 09:56:18 compute-0 multipathd[236888]: path checkers start up
Feb 02 09:56:18 compute-0 systemd[1]: Started Device-Mapper Multipath Device Controller.

○ netns-placeholder.service - Create netns directory
     Loaded: loaded (/etc/systemd/system/netns-placeholder.service; enabled; preset: enabled)
     Active: inactive (dead) since Mon 2026-02-02 09:50:13 UTC; 34min ago
   Main PID: 161934 (code=exited, status=0/SUCCESS)
        CPU: 7ms

Feb 02 09:50:13 compute-0 systemd[1]: Starting Create netns directory...
Feb 02 09:50:13 compute-0 systemd[1]: netns-placeholder.service: Deactivated successfully.
Feb 02 09:50:13 compute-0 systemd[1]: Finished Create netns directory.

○ network.service - LSB: Bring up/down networking
     Loaded: loaded (/etc/rc.d/init.d/network; generated)
     Active: inactive (dead)
       Docs: man:systemd-sysv-generator(8)

● NetworkManager-wait-online.service - Network Manager Wait Online
     Loaded: loaded (/usr/lib/systemd/system/NetworkManager-wait-online.service; enabled; preset: disabled)
     Active: active (exited) since Mon 2026-02-02 09:33:44 UTC; 51min ago
       Docs: man:NetworkManager-wait-online.service(8)
   Main PID: 49043 (code=exited, status=0/SUCCESS)
        CPU: 19ms

Feb 02 09:33:44 compute-0 systemd[1]: Starting Network Manager Wait Online...
Feb 02 09:33:44 compute-0 systemd[1]: Finished Network Manager Wait Online.

● NetworkManager.service - Network Manager
     Loaded: loaded (/usr/lib/systemd/system/NetworkManager.service; enabled; preset: enabled)
    Drop-In: /usr/lib/systemd/system/NetworkManager.service.d
             └─NetworkManager-ovs.conf
     Active: active (running) since Mon 2026-02-02 09:33:44 UTC; 51min ago
       Docs: man:NetworkManager(8)
   Main PID: 49024 (NetworkManager)
         IO: 104.0K read, 276.5K written
      Tasks: 3 (limit: 48560)
     Memory: 5.5M (peak: 6.4M)
        CPU: 29.897s
     CGroup: /system.slice/NetworkManager.service
             └─49024 /usr/sbin/NetworkManager --no-daemon

Feb 02 10:12:42 compute-0 NetworkManager[49024]: <info>  [1770027162.9341] device (tap792f51ec-05): state change: unmanaged -> unavailable (reason 'connection-assumed', managed-type: 'external')
Feb 02 10:12:42 compute-0 NetworkManager[49024]: <info>  [1770027162.9350] device (tap792f51ec-05): state change: unavailable -> disconnected (reason 'none', managed-type: 'external')
Feb 02 10:12:43 compute-0 NetworkManager[49024]: <info>  [1770027163.0019] manager: (tap31e2c386-20): new Veth device (/org/freedesktop/NetworkManager/Devices/53)
Feb 02 10:12:43 compute-0 NetworkManager[49024]: <info>  [1770027163.0520] device (tap31e2c386-20): carrier: link connected
Feb 02 10:12:43 compute-0 NetworkManager[49024]: <info>  [1770027163.2243] manager: (tap31e2c386-20): new Open vSwitch Port device (/org/freedesktop/NetworkManager/Devices/54)
Feb 02 10:12:48 compute-0 NetworkManager[49024]: <info>  [1770027168.0102] manager: (patch-br-int-to-provnet-3738ab71-03c6-44c1-bc4f-10cf3e96782e): new Open vSwitch Port device (/org/freedesktop/NetworkManager/Devices/55)
Feb 02 10:12:48 compute-0 NetworkManager[49024]: <info>  [1770027168.0116] manager: (patch-provnet-3738ab71-03c6-44c1-bc4f-10cf3e96782e-to-br-int): new Open vSwitch Port device (/org/freedesktop/NetworkManager/Devices/56)
Feb 02 10:13:03 compute-0 NetworkManager[49024]: <info>  [1770027183.7132] manager: (patch-provnet-3738ab71-03c6-44c1-bc4f-10cf3e96782e-to-br-int): new Open vSwitch Port device (/org/freedesktop/NetworkManager/Devices/57)
Feb 02 10:13:03 compute-0 NetworkManager[49024]: <info>  [1770027183.7152] manager: (patch-br-int-to-provnet-3738ab71-03c6-44c1-bc4f-10cf3e96782e): new Open vSwitch Port device (/org/freedesktop/NetworkManager/Devices/58)
Feb 02 10:13:05 compute-0 NetworkManager[49024]: <info>  [1770027185.1201] device (tap792f51ec-05): state change: disconnected -> unmanaged (reason 'unmanaged', managed-type: 'removed')

○ nfs-idmapd.service - NFSv4 ID-name mapping service
     Loaded: loaded (/usr/lib/systemd/system/nfs-idmapd.service; static)
     Active: inactive (dead)
       Docs: man:idmapd(8)

○ nfs-mountd.service - NFS Mount Daemon
     Loaded: loaded (/usr/lib/systemd/system/nfs-mountd.service; static)
     Active: inactive (dead)
       Docs: man:rpc.mountd(8)

○ nfs-server.service - NFS server and services
     Loaded: loaded (/usr/lib/systemd/system/nfs-server.service; disabled; preset: disabled)
     Active: inactive (dead)
       Docs: man:rpc.nfsd(8)
             man:exportfs(8)

○ nfs-utils.service - NFS server and client services
     Loaded: loaded (/usr/lib/systemd/system/nfs-utils.service; static)
     Active: inactive (dead)

○ nfsdcld.service - NFSv4 Client Tracking Daemon
     Loaded: loaded (/usr/lib/systemd/system/nfsdcld.service; static)
     Active: inactive (dead)
       Docs: manUnit ntpd.service could not be found.
Unit ntpdate.service could not be found.
:nfsdcld(8)

● nftables.service - Netfilter Tables
     Loaded: loaded (/usr/lib/systemd/system/nftables.service; enabled; preset: disabled)
     Active: active (exited) since Mon 2026-02-02 09:35:45 UTC; 49min ago
       Docs: man:nft(8)
   Main PID: 63222 (code=exited, status=0/SUCCESS)
        CPU: 20ms

Feb 02 09:35:45 compute-0 systemd[1]: Starting Netfilter Tables...
Feb 02 09:35:45 compute-0 systemd[1]: Finished Netfilter Tables.

● nis-domainname.service - Read and set NIS domainname from /etc/sysconfig/network
     Loaded: loaded (/usr/lib/systemd/system/nis-domainname.service; enabled; preset: enabled)
     Active: active (exited) since Mon 2026-02-02 09:00:45 UTC; 1h 24min ago
   Main PID: 676 (code=exited, status=0/SUCCESS)
        CPU: 3ms

Feb 02 09:00:45 localhost systemd[1]: Finished Read and set NIS domainname from /etc/sysconfig/network.

○ nvmefc-boot-connections.service - Auto-connect to subsystems on FC-NVME devices found during boot
     Loaded: loaded (/usr/lib/systemd/system/nvmefc-boot-connections.service; enabled; preset: enabled)
     Active: inactive (dead)

● openvswitch.service - Open vSwitch
     Loaded: loaded (/usr/lib/systemd/system/openvswitch.service; enabled; preset: disabled)
     Active: active (exited) since Mon 2026-02-02 09:33:31 UTC; 51min ago
   Main PID: 47340 (code=exited, status=0/SUCCESS)
        CPU: 1ms

Feb 02 09:33:31 compute-0 systemd[1]: Starting Open vSwitch...
Feb 02 09:33:31 compute-0 systemd[1]: Finished Open vSwitch.

● ovs-delete-transient-ports.service - Open vSwitch Delete Transient Ports
     Loaded: loaded (/usr/lib/systemd/system/ovs-delete-transient-ports.service; static)
     Active: active (exited) since Mon 2026-02-02 09:33:31 UTC; 51min ago
   Main PID: 47278 (code=exited, status=0/SUCCESS)
        CPU: 19ms

Feb 02 09:33:31 compute-0 systemd[1]: Starting Open vSwitch Delete Transient Ports...
Feb 02 09:33:31 compute-0 systemd[1]: Finished Open vSwitch Delete Transient Ports.

● ovs-vswitchd.service - Open vSwitch Forwarding Unit
     Loaded: loaded (/usr/lib/systemd/system/ovs-vswitchd.service; static)
     Active: active (running) since Mon 2026-02-02 09:33:31 UTC; 51min ago
   Main PID: 47331 (ovs-vswitchd)
         IO: 3.4M read, 152.0K written
      Tasks: 13 (limit: 48560)
     Memory: 244.2M (peak: 249.0M)
        CPU: 9.952s
     CGroup: /system.slice/ovs-vswitchd.service
             └─47331 ovs-vswitchd unix:/var/run/openvswitch/db.sock -vconsole:emer -vsyslog:err -vfile:info --mlockall --user openvswitch:hugetlbfs --no-chdir --log-file=/var/log/openvswitch/ovs-vswitchd.log --pidfile=/var/run/openvswitch/ovs-vswitchd.pid --detach

Feb 02 09:33:31 compute-0 systemd[1]: Starting Open vSwitch Forwarding Unit...
Feb 02 09:33:31 compute-0 ovs-ctl[47321]: Inserting openvswitch module [  OK  ]
Feb 02 09:33:31 compute-0 ovs-ctl[47290]: Starting ovs-vswitchd [  OK  ]
Feb 02 09:33:31 compute-0 ovs-vsctl[47338]: ovs|00001|vsctl|INFO|Called as ovs-vsctl --no-wait add Open_vSwitch . external-ids hostname=compute-0
Feb 02 09:33:31 compute-0 ovs-ctl[47290]: Enabling remote OVSDB managers [  OK  ]
Feb 02 09:33:31 compute-0 systemd[1]: Started Open vSwitch Forwarding Unit.

● ovsdb-server.service - Open vSwitch Database Unit
     Loaded: loaded (/usr/lib/systemd/system/ovsdb-server.service; static)
     Active: active (running) since Mon 2026-02-02 09:33:31 UTC; 51min ago
   Main PID: 47250 (ovsdb-server)
         IO: 1.2M read, 286.5K written
      Tasks: 1 (limit: 48560)
     Memory: 4.8M (peak: 38.7M)
        CPU: 23.337s
     CGroup: /system.slice/ovsdb-server.service
             └─47250 ovsdb-server /etc/openvswitch/conf.db -vconsole:emer -vsyslog:err -vfile:info --remote=punix:/var/run/openvswitch/db.sock --private-key=db:Open_vSwitch,SSL,private_key --certificate=db:Open_vSwitch,SSL,certificate --bootstrap-ca-cert=db:Open_vSwitch,SSL,ca_cert --user openvswitch:hugetlbfs --no-chdir --log-file=/var/log/openvswitch/ovsdb-server.log --pidfile=/var/run/openvswitch/ovsdb-server.pid --detach

Feb 02 09:33:30 compute-0 chown[47197]: /usrUnit pacemaker.service could not be found.
Unit plymouth-quit-wait.service could not be found.
Unit plymouth-start.service could not be found.
Unit power-profiles-daemon.service could not be found.
/bin/chown: cannot access '/run/openvswitch': No such file or directory
Feb 02 09:33:30 compute-0 ovs-ctl[47202]: /etc/openvswitch/conf.db does not exist ... (warning).
Feb 02 09:33:30 compute-0 ovs-ctl[47202]: Creating empty database /etc/openvswitch/conf.db [  OK  ]
Feb 02 09:33:30 compute-0 ovs-ctl[47202]: Starting ovsdb-server [  OK  ]
Feb 02 09:33:30 compute-0 ovs-vsctl[47251]: ovs|00001|vsctl|INFO|Called as ovs-vsctl --no-wait -- init -- set Open_vSwitch . db-version=8.5.1
Feb 02 09:33:31 compute-0 ovs-vsctl[47271]: ovs|00001|vsctl|INFO|Called as ovs-vsctl --no-wait set Open_vSwitch . ovs-version=3.3.5-115.el9s "external-ids:system-id=\"031ca08d-19ea-44b4-b1bd-33ab088eb6a6\"" "external-ids:rundir=\"/var/run/openvswitch\"" "system-type=\"centos\"" "system-version=\"9\""
Feb 02 09:33:31 compute-0 ovs-ctl[47202]: Configuring Open vSwitch system IDs [  OK  ]
Feb 02 09:33:31 compute-0 ovs-ctl[47202]: Enabling remote OVSDB managers [  OK  ]
Feb 02 09:33:31 compute-0 ovs-vsctl[47277]: ovs|00001|vsctl|INFO|Called as ovs-vsctl --no-wait add Open_vSwitch . external-ids hostname=compute-0
Feb 02 09:33:31 compute-0 systemd[1]: Started Open vSwitch Database Unit.

● polkit.service - Authorization Manager
     Loaded: loaded (/usr/lib/systemd/system/polkit.service; static)
     Active: active (running) since Mon 2026-02-02 09:32:48 UTC; 52min ago
       Docs: man:polkit(8)
   Main PID: 43515 (polkitd)
         IO: 19.0M read, 0B written
      Tasks: 12 (limit: 48560)
     Memory: 25.0M (peak: 26.8M)
        CPU: 2.029s
     CGroup: /system.slice/polkit.service
             └─43515 /usr/lib/polkit-1/polkitd --no-debug

Feb 02 09:52:58 compute-0 polkitd[43515]: Collecting garbage unconditionally...
Feb 02 09:52:58 compute-0 polkitd[43515]: Loading rules from directory /etc/polkit-1/rules.d
Feb 02 09:52:58 compute-0 polkitd[43515]: Loading rules from directory /usr/share/polkit-1/rules.d
Feb 02 09:52:58 compute-0 polkitd[43515]: Finished loading, compiling and executing 3 rules
Feb 02 09:54:38 compute-0 polkitd[43515]: Registered Authentication Agent for unix-process:220354:323755 (system bus name :1.2835 [pkttyagent --process 220354 --notify-fd 4 --fallback], object path /org/freedesktop/PolicyKit1/AuthenticationAgent, locale en_US.UTF-8)
Feb 02 09:54:38 compute-0 polkitd[43515]: Unregistered Authentication Agent for unix-process:220354:323755 (system bus name :1.2835, object path /org/freedesktop/PolicyKit1/AuthenticationAgent, locale en_US.UTF-8) (disconnected from bus)
Feb 02 09:54:38 compute-0 polkitd[43515]: Registered Authentication Agent for unix-process:220353:323754 (system bus name :1.2836 [pkttyagent --process 220353 --notify-fd 4 --fallback], object path /org/freedesktop/PolicyKit1/AuthenticationAgent, locale en_US.UTF-8)
Feb 02 09:54:38 compute-0 polkitd[43515]: Unregistered Authentication Agent for unix-process:220353:323754 (system bus name :1.2836, object path /org/freedesktop/PolicyKit1/AuthenticationAgent, locale en_US.UTF-8) (disconnected from bus)
Feb 02 09:54:40 compute-0 polkitd[43515]: Registered Authentication Agent for unix-process:220822:324017 (system bus name :1.2839 [pkttyagent --process 220822 --notify-fd 4 --fallback], object path /org/freedesktop/PolicyKit1/AuthenticationAgent, locale en_US.UTF-8)
Feb 02 09:54:40 compute-0 polkitd[43515]: Unregistered Authentication Agent for unix-process:220822:324017 (system bus name :1.2839, object path /org/freedesktop/PolicyKit1/AuthenticationAgent, locale en_US.UTF-8) (disconnected from bus)

○ raid-check.service - RAID setup health check
     Loaded: loaded (/usr/lib/systemd/system/raid-check.service; static)
     Active: inactive (dead)
TriggeredBy: ○ raid-check.timer

○ rbdmap.service - Map RBD devices
     Loaded: loaded (/usr/lib/systemd/system/rbdmap.service; disabled; preset: disabled)
     Active: inactive (dead)

○ rc-local.service - /etc/rc.d/rc.local Compatibility
     Loaded: loaded (/usr/lib/systemd/system/rc-local.service; static)
     Active: inactive (dead)
       Docs: man:systemd-rc-local-generator(8)

○ rescue.service - RescuUnit rpc-svcgssd.service could not be found.
e Shell
     Loaded: loaded (/usr/lib/systemd/system/rescue.service; static)
     Active: inactive (dead)
       Docs: man:sulogin(8)

○ rpc-gssd.service - RPC security service for NFS client and server
     Loaded: loaded (/usr/lib/systemd/system/rpc-gssd.service; static)
     Active: inactive (dead)
  Condition: start condition failed at Mon 2026-02-02 09:00:47 UTC; 1h 24min ago
       Docs: man:rpc.gssd(8)

Feb 02 09:00:47 np0005604790.novalocal systemd[1]: RPC security service for NFS client and server was skipped because of an unmet condition check (ConditionPathExists=/etc/krb5.keytab).

● rpc-statd-notify.service - Notify NFS peers of a restart
     Loaded: loaded (/usr/lib/systemd/system/rpc-statd-notify.service; static)
     Active: active (exited) since Mon 2026-02-02 09:00:50 UTC; 1h 24min ago
       Docs: man:sm-notify(8)
             man:rpc.statd(8)
        CPU: 7ms

Feb 02 09:00:50 np0005604790.novalocal systemd[1]: Starting Notify NFS peers of a restart...
Feb 02 09:00:50 np0005604790.novalocal sm-notify[1004]: Version 2.5.4 starting
Feb 02 09:00:50 np0005604790.novalocal systemd[1]: Started Notify NFS peers of a restart.

○ rpc-statd.service - NFS status monitor for NFSv2/3 locking.
     Loaded: loaded (/usr/lib/systemd/system/rpc-statd.service; static)
     Active: inactive (dead)
       Docs: man:rpc.statd(8)

● rpcbind.service - RPC Bind
     Loaded: loaded (/usr/lib/systemd/system/rpcbind.service; enabled; preset: enabled)
     Active: active (running) since Mon 2026-02-02 09:00:45 UTC; 1h 24min ago
TriggeredBy: ● rpcbind.socket
       Docs: man:rpcbind(8)
   Main PID: 699 (rpcbind)
         IO: 0B read, 0B written
      Tasks: 1 (limit: 48560)
     Memory: 2.6M (peak: 3.1M)
        CPU: 52ms
     CGroup: /system.slice/rpcbind.service
             └─699 /usr/bin/rpcbind -w -f

Feb 02 09:00:45 localhost systemd[1]: Starting RPC Bind...
Feb 02 09:00:45 localhost systemd[1]: Started RPC Bind.

● rsyslog.service - System Logging Service
     Loaded: loaded (/usr/lib/systemd/system/rsyslog.service; enabled; preset: enabled)
     Active: active (running) since Mon 2026-02-02 09:00:50 UTC; 1h 24min ago
       Docs: man:rsyslogd(8)
             https://www.rsyslog.com/doc/
   Main PID: 1005 (rsyslogd)
         IO: 4.0K read, 15.6M written
      Tasks: 3 (limit: 48560)
     Memory: 19.9M (peak: 20.6M)
        CPU: 12.898s
     CGroup: /system.slice/rsyslog.service
             └─1005 /usr/sbin/rsyslogd -n

Feb 02 09:57:12 compute-0 rsyslogd[1005]: imjournal: journal files changed, reloading...  [v8.2510.0-2.el9 try https://www.rsyslog.com/e/0 ]
Feb 02 09:57:56 compute-0 rsyslogd[1005]: imjournal: journal files changed, reloading...  [v8.2510.0-2.el9 try https://www.rsyslog.com/e/0 ]
Feb 02 09:58:02 compute-0 rsyslogd[1005]: imjournal from <np0005604790:nova_compute>: begin to drop messages due to rate-limiting
Feb 02 10:01:50 compute-0 rsyslogd[1005]: imjournal: journal files changed, reloading...  [v8.2510.0-2.el9 try https://www.rsyslog.com/e/0 ]
Feb 02 10:01:50 compute-0 rsyslogd[1005]: imjournal: journal files changed, reloading...  [v8.2510.0-2.el9 try https://www.rsyslog.com/e/0 ]
Feb 02 10:03:30 compute-0 rsyslogd[1005]: imjournal: 7085 messages lost due to rate-limiting (20000 allowed within 600 seconds)
Feb 02 10:13:05 compute-0 rsyslogd[1005]: imjournal: journal files changed, reloading...  [v8.2510.0-2.el9 try https://www.rsyslog.com/e/0 ]
Feb 02 10:13:05 compute-0 rsyslogd[1005]: imjournal: journal files changed, reloading...  [v8.2510.0-2.el9 try https://www.rsyslog.com/e/0 ]
Feb 02 10:16:10 compute-0 rsyslogd[1005]: imjournal: journal files changed, reloading...  [v8.2510.0-2.el9 try https://www.rsyslog.com/e/0 ]
Feb 02 10:24:42 compute-0 rsyslogd[1005]: imjournal: journal files changed, reloading...  [v8.2510.0-2.el9 try https://www.rsyslog.com/e/0 ]

○ selinux-autorelabel-mark.service - Mark the need to relabel after reboot
     Loaded: loaded (/usr/lib/systemd/system/selinux-autorelabel-mark.service; enabled; preset: enabled)
     Active: inactive (dead)
  Condition: staUnit sntp.service could not be found.
Unit sshd-keygen.service could not be found.
rt condition failed at Mon 2026-02-02 09:00:45 UTC; 1h 24min ago

Feb 02 09:00:45 localhost systemd[1]: Mark the need to relabel after reboot was skipped because of an unmet condition check (ConditionSecurity=!selinux).

● serial-getty@ttyS0.service - Serial Getty on ttyS0
     Loaded: loaded (/usr/lib/systemd/system/serial-getty@.service; enabled-runtime; preset: disabled)
     Active: active (running) since Mon 2026-02-02 09:00:50 UTC; 1h 24min ago
       Docs: man:agetty(8)
             man:systemd-getty-generator(8)
             http://0pointer.de/blog/projects/serial-console.html
   Main PID: 1011 (agetty)
         IO: 0B read, 0B written
      Tasks: 1 (limit: 48560)
     Memory: 296.0K (peak: 552.0K)
        CPU: 8ms
     CGroup: /system.slice/system-serial\x2dgetty.slice/serial-getty@ttyS0.service
             └─1011 /sbin/agetty -o "-p -- \\u" --keep-baud 115200,57600,38400,9600 - vt220

Feb 02 09:00:50 np0005604790.novalocal systemd[1]: Started Serial Getty on ttyS0.

○ sshd-keygen@ecdsa.service - OpenSSH ecdsa Server Key Generation
     Loaded: loaded (/usr/lib/systemd/system/sshd-keygen@.service; disabled; preset: disabled)
    Drop-In: /usr/lib/systemd/system/sshd-keygen@.service.d
             └─disable-sshd-keygen-if-cloud-init-active.conf
     Active: inactive (dead)
  Condition: start condition failed at Mon 2026-02-02 09:53:02 UTC; 32min ago

Feb 02 09:00:45 localhost systemd[1]: OpenSSH ecdsa Server Key Generation was skipped because of an unmet condition check (ConditionPathExists=!/run/systemd/generator.early/multi-user.target.wants/cloud-init.target).
Feb 02 09:53:02 compute-0 systemd[1]: OpenSSH ecdsa Server Key Generation was skipped because of an unmet condition check (ConditionPathExists=!/run/systemd/generator.early/multi-user.target.wants/cloud-init.target).

○ sshd-keygen@ed25519.service - OpenSSH ed25519 Server Key Generation
     Loaded: loaded (/usr/lib/systemd/system/sshd-keygen@.service; disabled; preset: disabled)
    Drop-In: /usr/lib/systemd/system/sshd-keygen@.service.d
             └─disable-sshd-keygen-if-cloud-init-active.conf
     Active: inactive (dead)
  Condition: start condition failed at Mon 2026-02-02 09:53:02 UTC; 32min ago

Feb 02 09:00:45 localhost systemd[1]: OpenSSH ed25519 Server Key Generation was skipped because of an unmet condition check (ConditionPathExists=!/run/systemd/generator.early/multi-user.target.wants/cloud-init.target).
Feb 02 09:53:02 compute-0 systemd[1]: OpenSSH ed25519 Server Key Generation was skipped because of an unmet condition check (ConditionPathExists=!/run/systemd/generator.early/multi-user.target.wants/cloud-init.target).

○ sshd-keygen@rsa.service - OpenSSH rsa Server Key Generation
     Loaded: loaded (/usr/lib/systemd/system/sshd-keygen@.service; disabled; preset: disabled)
    Drop-In: /usr/lib/systemd/system/sshd-keygen@.service.d
             └─disable-sshd-keygen-if-cloud-init-active.conf
     Active: inactive (dead)
  Condition: start condition failed at Mon 2026-02-02 09:53:02 UTC; 32min ago

Feb 02 09:00:45 localhost systemd[1]: OpenSSH rsa Server Key Generation was skipped because of an unmet condition check (ConditionPathExists=!/run/systemd/generator.early/multi-user.target.wants/cloud-init.target).
Feb 02 09:53:02 compute-0 systemd[1]: OpenSSH rsa Server Key Generation was skipped because of an unmet condition check (ConditionPathExists=!/run/systemd/generator.early/multi-user.target.wants/cloud-init.target).

● sshd.service - OpenSSH server daemon
     Loaded: loaded (/usr/lib/systemd/system/sshd.service; enabled; preset: enabled)
     Active: active (running) since Mon 2026-02-02 09:53:02 UTC; 32min ago
       Docs: man:sshd(8)
             man:sshd_config(5)
   Main PID: 192944 (sshd)
         IO: 532.0K read, 0B written
      Tasks: 1 (limit: 48560)
     Memory: 3.9M (peak: 6.9M)
        CPU: 479ms
     CGroup: /system.slice/sshd.service
             └─192944 "sshd: /usr/sbin/sshd -D [listener] 0 of 10-100 startups"

Feb 02 10:14:07 compute-0 sshd-session[273448]: pam_unix(sshd:session): sessiUnit syslog.service could not be found.
on opened for user zuul(uid=1000) by zuul(uid=0)
Feb 02 10:15:46 compute-0 sshd-session[282307]: Accepted publickey for zuul from 192.168.122.10 port 51192 ssh2: ECDSA SHA256:RIWOugHsRom13QN8+H2eekzMj6VNcm6gUxie+zDStiQ
Feb 02 10:15:46 compute-0 sshd-session[282307]: pam_unix(sshd:session): session opened for user zuul(uid=1000) by zuul(uid=0)
Feb 02 10:15:46 compute-0 sshd-session[282307]: pam_unix(sshd:session): session closed for user zuul
Feb 02 10:15:47 compute-0 sshd-session[282355]: Accepted publickey for zuul from 192.168.122.10 port 51194 ssh2: ECDSA SHA256:RIWOugHsRom13QN8+H2eekzMj6VNcm6gUxie+zDStiQ
Feb 02 10:15:47 compute-0 sshd-session[282355]: pam_unix(sshd:session): session opened for user zuul(uid=1000) by zuul(uid=0)
Feb 02 10:15:47 compute-0 sshd-session[282355]: pam_unix(sshd:session): session closed for user zuul
Feb 02 10:23:04 compute-0 sshd-session[289293]: Connection closed by authenticating user root 194.163.174.210 port 53290 [preauth]
Feb 02 10:24:27 compute-0 sshd-session[291112]: Accepted publickey for zuul from 192.168.122.10 port 46284 ssh2: ECDSA SHA256:RIWOugHsRom13QN8+H2eekzMj6VNcm6gUxie+zDStiQ
Feb 02 10:24:27 compute-0 sshd-session[291112]: pam_unix(sshd:session): session opened for user zuul(uid=1000) by zuul(uid=0)

○ sssd-kcm.service - SSSD Kerberos Cache Manager
     Loaded: loaded (/usr/lib/systemd/system/sssd-kcm.service; indirect; preset: disabled)
     Active: inactive (dead)
TriggeredBy: ● sssd-kcm.socket
       Docs: man:sssd-kcm(5)

○ sssd.service - System Security Services Daemon
     Loaded: loaded (/usr/lib/systemd/system/sssd.service; enabled; preset: enabled)
     Active: inactive (dead)
  Condition: start condition failed at Mon 2026-02-02 09:00:45 UTC; 1h 24min ago

Feb 02 09:00:45 localhost systemd[1]: System Security Services Daemon was skipped because no trigger condition checks were met.

○ sysstat-collect.service - system activity accounting tool
     Loaded: loaded (/usr/lib/systemd/system/sysstat-collect.service; static)
     Active: inactive (dead)
TriggeredBy: ○ sysstat-collect.timer
       Docs: man:sa1(8)

○ sysstat-summary.service - Generate a daily summary of process accounting
     Loaded: loaded (/usr/lib/systemd/system/sysstat-summary.service; static)
     Active: inactive (dead)
TriggeredBy: ○ sysstat-summary.timer
       Docs: man:sa2(8)

○ sysstat.service - Resets System Activity Logs
     Loaded: loaded (/usr/lib/systemd/system/sysstat.service; enabled; preset: enabled)
     Active: inactive (dead)

○ systemd-ask-password-console.service - Dispatch Password Requests to Console
     Loaded: loaded (/usr/lib/systemd/system/systemd-ask-password-console.service; static)
     Active: inactive (dead)
TriggeredBy: ● systemd-ask-password-console.path
       Docs: man:systemd-ask-password-console.service(8)

○ systemd-ask-password-wall.service - Forward Password Requests to Wall
     Loaded: loaded (/usr/lib/systemd/system/systemd-ask-password-wall.service; static)
     Active: inactive (dead)
TriggeredBy: ● systemd-ask-password-wall.path
       Docs: man:systemd-ask-password-wall.service(8)

○ systemd-binfmt.service - Set Up Additional Binary Formats
     Loaded: loaded (/usr/lib/systemd/system/systemd-binfmt.service; static)
     Active: inactive (dead)
  Condition: start condition failed at Mon 2026-02-02 09:00:45 UTC; 1h 24min ago
       Docs: man:systemd-binfmt.service(8)
             man:binfmt.d(5)
             https://docs.kernel.org/admin-guide/binfmt-misc.html
             https://www.freedesktop.org/wiki/Software/systemd/APIFileSystems

Feb 02 09:00:45 localhost systemd[1]: Set Up Additional Binary Formats was skipped because no trigger condition checks were met.

○ systemd-boot-random-seed.service - Update Boot Loader Random Seed
     Loaded: loaded (/usr/lib/systemd/system/systemd-boot-random-seed.service; static)
     Active: inactive (dead)
  Condition: start condition failed at Mon 2026-02-02 09:00:45 UTC; 1h 24min ago
       Docs: man:systemd-boot-random-seed.service(8)
             man:random(4)

Feb 02 09:00:45 localhost systemd[1]: Update Boot Loader Random Seed was skipped because no trigger condition checks were met.

● systemd-boot-update.service - Automatic Boot Loader Update
     Loaded: loaded (/usr/lib/systemd/system/systemd-boot-update.service; enabled; preset: enabled)
     Active: active (exited) since Mon 2026-02-02 09:00:45 UTC; 1h 24min ago
       Docs: man:bootctl(1)
   Main PID: 695 (code=exited, status=0/SUCCESS)
        CPU: 9ms

Feb 02 09:00:45 localhost systemd[1]: Starting Automatic Boot Loader Update...
Feb 02 09:00:45 localhost bootctl[695]: Couldn't find EFI system partition, skipping.
Feb 02 09:00:45 localhost systemd[1]: Finished Automatic Boot Loader Update.

○ systemd-firstboot.service - First Boot Wizard
     Loaded: loaded (/usr/lib/systemd/system/systemd-firstboot.service; static)
     Active: inactive (dead)
  Condition: start condition failed at Mon 2026-02-02 09:00:45 UTC; 1h 24min ago
       Docs: man:systemd-firstboot(1)

Feb 02 09:00:45 localhost systemd[1]: First Boot Wizard was skipped because of an unmet condition check (ConditionFirstBoot=yes).

○ systemd-fsck-root.service - File System Check on Root Device
     Loaded: loaded (/usr/lib/systemd/system/systemd-fsck-root.service; static)
     Active: inactive (dead) since Mon 2026-02-02 09:00:45 UTC; 1h 24min ago
   Duration: 1.935s
       Docs: man:systemd-fsck-root.service(8)
   Main PID: 551 (code=exited, status=0/SUCCESS)
        CPU: 15ms

Feb 02 09:00:43 localhost systemd[1]: Starting File System Check on /dev/disk/by-uuid/822f14ea-6e7e-41df-b0d8-fbe282d9ded8...
Feb 02 09:00:43 localhost systemd-fsck[553]: /usr/sbin/fsck.xfs: XFS file system.
Feb 02 09:00:43 localhost systemd[1]: Finished File System Check on /dev/disk/by-uuid/822f14ea-6e7e-41df-b0d8-fbe282d9ded8.

● systemd-hostnamed.service - Hostname Service
     Loaded: loaded (/usr/lib/systemd/system/systemd-hostnamed.service; static)
    Drop-In: /usr/lib/systemd/system/systemd-hostnamed.service.d
             └─disable-privatedevices.conf
     Active: active (running) since Mon 2026-02-02 10:24:47 UTC; 20s ago
       Docs: man:systemd-hostnamed.service(8)
             man:hostname(5)
             man:machine-info(5)
             man:org.freedesktop.resolve1(5)
   Main PID: 293546 (systemd-hostnam)
         IO: 0B read, 0B written
      Tasks: 1 (limit: 48560)
     Memory: 2.7M (peak: 3.6M)
        CPU: 101ms
     CGroup: /system.slice/systemd-hostnamed.service
             └─293546 /usr/lib/systemd/systemd-hostnamed

Feb 02 10:24:47 compute-0 systemd[1]: Starting Hostname Service...
Feb 02 10:24:47 compute-0 systemd[1]: Started Hostname Service.

● systemd-hwdb-update.service - Rebuild Hardware Database
     Loaded: loaded (/usr/lib/systemd/system/systemd-hwdb-update.service; static)
     Active: active (exited) since Mon 2026-02-02 09:00:45 UTC; 1h 24min ago
       Docs: man:hwdb(7)
             man:systemd-hwdb(8)
   Main PID: 686 (code=exited, status=0/SUCCESS)
        CPU: 471ms

Feb 02 09:00:45 localhost systemd[1]: Starting Rebuild Hardware Database...
Feb 02 09:00:45 localhost systemd[1]: Finished Rebuild Hardware Database.

○ systemd-initctl.service - initctl Compatibility Daemon
     Loaded: loaded (/usr/lib/systemd/system/systemd-initctl.service; static)
     Active: inactive (dead)
TriggeredBy: ● systemd-initctl.socket
       Docs: man:systemd-initctl.service(8)

● systemd-journal-catalog-update.service - Rebuild Journal Catalog
     Loaded: loaded (/usr/lib/systemd/system/systemd-journal-catalog-update.service; static)
     Active: active (exited) since Mon 2026-02-02 09:00:45 UTC; 1h 24min ago
       Docs: man:systemd-journald.service(8)
             man:journald.conf(5)
   Main PID: 700 (code=exited, status=0/SUCCESS)
        CPU: 14ms

Feb 02 09:00:45 localhost systemd[1]: Starting Rebuild Journal Catalog...
Feb 02 09:00:45 localhost systemd[1]: Finished Rebuild Journal Catalog.

● systemd-journal-flush.service - Flush Journal to Persistent Storage
     Loaded: loaded (/usr/lib/systemd/system/systemd-journal-flush.service; static)
     Active: active (exited) since Mon 2026-02-02 09:00:45 UTC; 1h 24min ago
       Docs: man:systemd-journald.service(8)
             man:journald.conf(5)
   Main PID: 687 (code=exited, status=0/SUCCESS)
        CPU: 10ms

Feb 02 09:00:45 localhost systemd[1]: Starting Flush Journal to Persistent Storage...
Feb 02 09:00:45 localhost systemd[1]: Finished Flush Journal to Persistent Storage.

● systemd-journald.service - Journal Service
     Loaded: loaded (/usr/lib/systemd/system/systemd-journald.service; static)
     Active: active (running) since Mon 2026-02-02 09:00:45 UTC; 1h 24min ago
TriggeredBy: ● systemd-journald-dev-log.socket
             ● systemd-journald.socket
       Docs: man:systemd-journald.service(8)
             man:journald.conf(5)
   Main PID: 677 (systemd-journal)
     Status: "Processing requests..."
         IO: 0B read, 0B written
      Tasks: 1 (limit: 48560)
     Memory: 80.9M (peak: 88.1M)
        CPU: 15.071s
     CGroup: /system.slice/systemd-journald.service
             └─677 /usr/lib/systemd/systemd-journald

Feb 02 09:00:45 localhost systemd-journald[677]: Journal started
Feb 02 09:00:45 localhost systemd-journald[677]: Runtime Journal (/run/log/journal/bf0bc0bb03de29b24cba1cc9599cf5d0) is 8.0M, max 153.6M, 145.6M free.
Feb 02 09:00:44 localhost systemd[1]: systemd-journald.service: Deactivated successfully.
Feb 02 09:00:45 localhost systemd-journald[677]: Runtime Journal (/run/log/journal/bf0bc0bb03de29b24cba1cc9599cf5d0) is 8.0M, max 153.6M, 145.6M free.
Feb 02 09:00:45 localhost systemd-journald[677]: Received client request to flush runtime journal.

● systemd-logind.service - User Login Management
     Loaded: loaded (/usr/lib/systemd/system/systemd-logind.service; static)
    Drop-In: /usr/lib/systemd/system/systemd-logind.service.d
             └─10-grub2-logind-service.conf
     Active: active (running) since Mon 2026-02-02 09:00:46 UTC; 1h 24min ago
       Docs: man:sd-login(3)
             man:systemd-logind.service(8)
             man:logind.conf(5)
             man:org.freedesktop.login1(5)
   Main PID: 793 (systemd-logind)
     Status: "Processing requests..."
         IO: 4.0K read, 0B written
      Tasks: 1 (limit: 48560)
     Memory: 5.6M (peak: 8.0M)
        CPU: 3.936s
     CGroup: /system.slice/systemd-logind.service
             └─793 /usr/lib/systemd/systemd-logind

Feb 02 10:14:07 compute-0 systemd-logind[793]: New session 56 of user zuul.
Feb 02 10:15:46 compute-0 systemd-logind[793]: Session 56 logged out. Waiting for processes to exit.
Feb 02 10:15:46 compute-0 systemd-logind[793]: Removed session 56.
Feb 02 10:15:46 compute-0 systemd-logind[793]: New session 57 of user zuul.
Feb 02 10:15:46 compute-0 systemd-logind[793]: Session 57 logged out. Waiting for processes to exit.
Feb 02 10:15:46 compute-0 systemd-logind[793]: Removed session 57.
Feb 02 10:15:47 compute-0 systemd-logind[793]: New session 58 of user zuul.
Feb 02 10:15:47 compute-0 systemd-logind[793]: Session 58 logged out. Waiting for processes to exit.
Feb 02 10:15:47 compute-0 systemd-logind[793]: Removed session 58.
Feb 02 10:24:27 compute-0 systemd-logind[793]: New session 59 of user zuul.

○ systemd-machine-id-commit.service - Commit a transient machine-id on disk
     Loaded: loaded (/usr/lib/systemd/system/systemd-machine-id-commit.service; static)
     Active: inactive (dead)
  Condition: start condition failed at Mon 2026-02-02 09:00:45 UTC; 1h 24min ago
       Docs: man:systemd-machine-id-commit.service(8)

Feb 02 09:00:45 localhost systemd[1]: Commit a transient machine-id on disk was skipped because of an unmet condition check (ConditionPathIsMountPoint=/etc/machine-id).

● systemd-machined.service - Virtual Machine and Container Registration Service
     Loaded: loaded (/usr/lib/systemd/system/systemd-machined.service; static)
     Active: active (running) since Mon 2026-02-02 09:54:31 UTC; 30min ago
       Docs: man:systemd-machined.service(8)
             man:org.freedesktop.machine1(5)
   Main PID: 219024 (systemd-machine)
     Status: "Processing requests..."
         Unit systemd-networkd-wait-online.service could not be found.
IO: 0B read, 0B written
      Tasks: 1 (limit: 48560)
     Memory: 1.5M (peak: 2.0M)
        CPU: 1.425s
     CGroup: /system.slice/systemd-machined.service
             └─219024 /usr/lib/systemd/systemd-machined

Feb 02 10:02:38 compute-0 systemd-machined[219024]: New machine qemu-1-instance-00000001.
Feb 02 10:03:51 compute-0 systemd-machined[219024]: Machine qemu-1-instance-00000001 terminated.
Feb 02 10:04:20 compute-0 systemd-machined[219024]: New machine qemu-2-instance-00000003.
Feb 02 10:05:00 compute-0 systemd-machined[219024]: Machine qemu-2-instance-00000003 terminated.
Feb 02 10:06:14 compute-0 systemd-machined[219024]: New machine qemu-3-instance-00000005.
Feb 02 10:06:32 compute-0 systemd-machined[219024]: Machine qemu-3-instance-00000005 terminated.
Feb 02 10:08:00 compute-0 systemd-machined[219024]: New machine qemu-4-instance-00000007.
Feb 02 10:08:51 compute-0 systemd-machined[219024]: Machine qemu-4-instance-00000007 terminated.
Feb 02 10:12:42 compute-0 systemd-machined[219024]: New machine qemu-5-instance-0000000d.
Feb 02 10:13:05 compute-0 systemd-machined[219024]: Machine qemu-5-instance-0000000d terminated.

● systemd-modules-load.service - Load Kernel Modules
     Loaded: loaded (/usr/lib/systemd/system/systemd-modules-load.service; static)
     Active: active (exited) since Mon 2026-02-02 09:56:10 UTC; 28min ago
       Docs: man:systemd-modules-load.service(8)
             man:modules-load.d(5)
   Main PID: 234975 (code=exited, status=0/SUCCESS)
        CPU: 16ms

Feb 02 09:56:10 compute-0 systemd[1]: Starting Load Kernel Modules...
Feb 02 09:56:10 compute-0 systemd[1]: Finished Load Kernel Modules.

● systemd-network-generator.service - Generate network units from Kernel command line
     Loaded: loaded (/usr/lib/systemd/system/systemd-network-generator.service; enabled; preset: enabled)
     Active: active (exited) since Mon 2026-02-02 09:00:45 UTC; 1h 24min ago
       Docs: man:systemd-network-generator.service(8)
   Main PID: 678 (code=exited, status=0/SUCCESS)
        CPU: 9ms

Feb 02 09:00:45 localhost systemd[1]: Finished Generate network units from Kernel command line.

○ systemd-pcrmachine.service - TPM2 PCR Machine ID Measurement
     Loaded: loaded (/usr/lib/systemd/system/systemd-pcrmachine.service; static)
     Active: inactive (dead)
  Condition: start condition failed at Mon 2026-02-02 09:00:45 UTC; 1h 24min ago
       Docs: man:systemd-pcrmachine.service(8)

○ systemd-pcrphase-initrd.service - TPM2 PCR Barrier (initrd)
     Loaded: loaded (/usr/lib/systemd/system/systemd-pcrphase-initrd.service; static)
     Active: inactive (dead)
       Docs: man:systemd-pcrphase-initrd.service(8)

○ systemd-pcrphase-sysinit.service - TPM2 PCR Barrier (Initialization)
     Loaded: loaded (/usr/lib/systemd/system/systemd-pcrphase-sysinit.service; static)
     Active: inactive (dead)
  Condition: start condition failed at Mon 2026-02-02 09:00:45 UTC; 1h 24min ago
       Docs: man:systemd-pcrphase-sysinit.service(8)

Feb 02 09:00:45 localhost systemd[1]: TPM2 PCR Barrier (Initialization) was skipped because of an unmet condition check (ConditionPathExists=/sys/firmware/efi/efivars/StubPcrKernelImage-4a67b082-0a4c-41cf-b6c7-440b29bb8c4f).

○ systemd-pcrphase.service - TPM2 PCR Barrier (User)
     Loaded: loaded (/usr/lib/systemd/system/systemd-pcrphase.service; static)
     Active: inactive (dead)
  Condition: start condition failed at Mon 2026-02-02 09:00:47 UTC; 1h 24min ago
       Docs: man:systemd-pcrphase.service(8)

Feb 02 09:00:47 np0005604790.novalocal systemd[1]: TPM2 PCR Barrier (User) was skipped because of an unmet condition check (ConditionPathExists=/sys/firmware/efi/efivars/StubPcrKernelImage-4a67b082-0a4c-41cf-b6c7-440b29bb8c4f).

○ systemd-pstore.service - Platform Persistent Storage Archival
     Loaded: loaded (/usr/lib/systemd/system/systemd-pstore.service; enabled; preset: enabled)
     Active: inactive (dead)
  Condition: start condition failed at Mon 2026-02-02 09:00:45 UTC; 1h 24min ago
       Docs: man:systemd-pstore(8)

Feb 02 09:00:45 localhost systemd[1Unit systemd-timesyncd.service could not be found.
]: Platform Persistent Storage Archival was skipped because of an unmet condition check (ConditionDirectoryNotEmpty=/sys/fs/pstore).

● systemd-random-seed.service - Load/Save OS Random Seed
     Loaded: loaded (/usr/lib/systemd/system/systemd-random-seed.service; static)
     Active: active (exited) since Mon 2026-02-02 09:00:45 UTC; 1h 24min ago
       Docs: man:systemd-random-seed.service(8)
             man:random(4)
   Main PID: 688 (code=exited, status=0/SUCCESS)
        CPU: 7ms

Feb 02 09:00:45 localhost systemd[1]: Starting Load/Save OS Random Seed...
Feb 02 09:00:45 localhost systemd[1]: Finished Load/Save OS Random Seed.

● systemd-remount-fs.service - Remount Root and Kernel File Systems
     Loaded: loaded (/usr/lib/systemd/system/systemd-remount-fs.service; enabled-runtime; preset: disabled)
     Active: active (exited) since Mon 2026-02-02 09:00:45 UTC; 1h 24min ago
       Docs: man:systemd-remount-fs.service(8)
             https://www.freedesktop.org/wiki/Software/systemd/APIFileSystems
   Main PID: 679 (code=exited, status=0/SUCCESS)
        CPU: 13ms

Feb 02 09:00:45 localhost systemd[1]: Finished Remount Root and Kernel File Systems.

○ systemd-repart.service - Repartition Root Disk
     Loaded: loaded (/usr/lib/systemd/system/systemd-repart.service; static)
     Active: inactive (dead)
  Condition: start condition failed at Mon 2026-02-02 09:00:45 UTC; 1h 24min ago
       Docs: man:systemd-repart.service(8)

○ systemd-rfkill.service - Load/Save RF Kill Switch Status
     Loaded: loaded (/usr/lib/systemd/system/systemd-rfkill.service; static)
     Active: inactive (dead)
TriggeredBy: ● systemd-rfkill.socket
       Docs: man:systemd-rfkill.service(8)

● systemd-sysctl.service - Apply Kernel Variables
     Loaded: loaded (/usr/lib/systemd/system/systemd-sysctl.service; static)
     Active: active (exited) since Mon 2026-02-02 09:33:01 UTC; 52min ago
       Docs: man:systemd-sysctl.service(8)
             man:sysctl.d(5)
   Main PID: 45003 (code=exited, status=0/SUCCESS)
        CPU: 25ms

Feb 02 09:33:01 compute-0 systemd[1]: Starting Apply Kernel Variables...
Feb 02 09:33:01 compute-0 systemd[1]: Finished Apply Kernel Variables.

○ systemd-sysext.service - Merge System Extension Images into /usr/ and /opt/
     Loaded: loaded (/usr/lib/systemd/system/systemd-sysext.service; disabled; preset: disabled)
     Active: inactive (dead)
       Docs: man:systemd-sysext.service(8)

● systemd-sysusers.service - Create System Users
     Loaded: loaded (/usr/lib/systemd/system/systemd-sysusers.service; static)
     Active: active (exited) since Mon 2026-02-02 09:00:45 UTC; 1h 24min ago
       Docs: man:sysusers.d(5)
             man:systemd-sysusers.service(8)
   Main PID: 689 (code=exited, status=0/SUCCESS)
        CPU: 18ms

Feb 02 09:00:45 localhost systemd[1]: Starting Create System Users...
Feb 02 09:00:45 localhost systemd[1]: Finished Create System Users.

○ systemd-tmpfiles-clean.service - Cleanup of Temporary Directories
     Loaded: loaded (/usr/lib/systemd/system/systemd-tmpfiles-clean.service; static)
     Active: inactive (dead) since Mon 2026-02-02 09:15:51 UTC; 1h 9min ago
TriggeredBy: ● systemd-tmpfiles-clean.timer
       Docs: man:tmpfiles.d(5)
             man:systemd-tmpfiles(8)
   Main PID: 29987 (code=exited, status=0/SUCCESS)
        CPU: 59ms

Feb 02 09:15:51 compute-0 systemd[1]: Starting Cleanup of Temporary Directories...
Feb 02 09:15:51 compute-0 systemd[1]: systemd-tmpfiles-clean.service: Deactivated successfully.
Feb 02 09:15:51 compute-0 systemd[1]: Finished Cleanup of Temporary Directories.

● systemd-tmpfiles-setup-dev.service - Create Static Device Nodes in /dev
     Loaded: loaded (/usr/lib/systemd/system/systemd-tmpfiles-setup-dev.service; static)
     Active: active (exited) since Mon 2026-02-02 09:00:45 UTC; 1h 24min ago
       Docs: man:tmpfiles.d(5)
             man:systemd-tmpfiles(8)
   Main PID: 691 (code=exited, status=0/SUCCESS)
        CPU: 37ms

Feb 02 09:00:45 localhost systemd[1]: Starting Create Static Device Nodes in /dev...
Feb 02 09:00:45 loUnit systemd-tmpfiles.service could not be found.
calhost systemd[1]: Finished Create Static Device Nodes in /dev.

● systemd-tmpfiles-setup.service - Create Volatile Files and Directories
     Loaded: loaded (/usr/lib/systemd/system/systemd-tmpfiles-setup.service; static)
     Active: active (exited) since Mon 2026-02-02 09:00:45 UTC; 1h 24min ago
       Docs: man:tmpfiles.d(5)
             man:systemd-tmpfiles(8)
   Main PID: 696 (code=exited, status=0/SUCCESS)
        CPU: 96ms

Feb 02 09:00:45 localhost systemd[1]: Starting Create Volatile Files and Directories...
Feb 02 09:00:45 localhost systemd[1]: Finished Create Volatile Files and Directories.

● systemd-udev-settle.service - Wait for udev To Complete Device Initialization
     Loaded: loaded (/usr/lib/systemd/system/systemd-udev-settle.service; static)
     Active: active (exited) since Mon 2026-02-02 09:56:05 UTC; 29min ago
       Docs: man:systemd-udev-settle.service(8)
   Main PID: 233929 (code=exited, status=0/SUCCESS)
        CPU: 14ms

Feb 02 09:56:05 compute-0 systemd[1]: Starting Wait for udev To Complete Device Initialization...
Feb 02 09:56:05 compute-0 udevadm[233929]: systemd-udev-settle.service is deprecated. Please fix multipathd.service not to pull it in.
Feb 02 09:56:05 compute-0 systemd[1]: Finished Wait for udev To Complete Device Initialization.

● systemd-udev-trigger.service - Coldplug All udev Devices
     Loaded: loaded (/usr/lib/systemd/system/systemd-udev-trigger.service; static)
    Drop-In: /usr/lib/systemd/system/systemd-udev-trigger.service.d
             └─systemd-udev-trigger-no-reload.conf
     Active: active (exited) since Mon 2026-02-02 09:00:45 UTC; 1h 24min ago
       Docs: man:udev(7)
             man:systemd-udevd.service(8)
   Main PID: 683 (code=exited, status=0/SUCCESS)
        CPU: 82ms

Feb 02 09:00:45 localhost systemd[1]: Finished Coldplug All udev Devices.

● systemd-udevd.service - Rule-based Manager for Device Events and Files
     Loaded: loaded (/usr/lib/systemd/system/systemd-udevd.service; static)
     Active: active (running) since Mon 2026-02-02 09:00:45 UTC; 1h 24min ago
TriggeredBy: ● systemd-udevd-control.socket
             ● systemd-udevd-kernel.socket
       Docs: man:systemd-udevd.service(8)
             man:udev(7)
   Main PID: 729 (systemd-udevd)
     Status: "Processing with 32 children at max"
         IO: 72.2M read, 1.1M written
      Tasks: 1
     Memory: 29.8M (peak: 90.2M)
        CPU: 8.534s
     CGroup: /system.slice/systemd-udevd.service
             └─udev
               └─729 /usr/lib/systemd/systemd-udevd

Feb 02 10:20:54 compute-0 lvm[287991]: PV /dev/loop3 online, VG ceph_vg0 is complete.
Feb 02 10:20:54 compute-0 lvm[287991]: VG ceph_vg0 finished
Feb 02 10:22:00 compute-0 lvm[288976]: PV /dev/loop3 online, VG ceph_vg0 is complete.
Feb 02 10:22:00 compute-0 lvm[288976]: VG ceph_vg0 finished
Feb 02 10:23:07 compute-0 lvm[289982]: PV /dev/loop3 online, VG ceph_vg0 is complete.
Feb 02 10:23:07 compute-0 lvm[289982]: VG ceph_vg0 finished
Feb 02 10:24:13 compute-0 lvm[290961]: PV /dev/loop3 online, VG ceph_vg0 is complete.
Feb 02 10:24:13 compute-0 lvm[290961]: VG ceph_vg0 finished
Feb 02 10:24:35 compute-0 lvm[291802]: PV /dev/loop3 online, VG ceph_vg0 is complete.
Feb 02 10:24:35 compute-0 lvm[291802]: VG ceph_vg0 finished

● systemd-update-done.service - Update is Completed
     Loaded: loaded (/usr/lib/systemd/system/systemd-update-done.service; static)
     Active: active (exited) since Mon 2026-02-02 09:00:45 UTC; 1h 24min ago
       Docs: man:systemd-update-done.service(8)
   Main PID: 762 (code=exited, status=0/SUCCESS)
        CPU: 10ms

Feb 02 09:00:45 localhost systemd[1]: Starting Update is Completed...
Feb 02 09:00:45 localhost systemd[1]: Finished Update is Completed.

○ systemd-update-utmp-runlevel.service - Record Runlevel Change in UTMP
     Loaded: loaded (/usr/lib/systemd/system/systemd-update-utmp-runlevel.service; static)
     Active: inactive (dead) since Mon 2026-02-02 09:00:50 UTC; 1h 24min ago
       Docs: man:systemd-update-utmp-runlevel.service(8)
             man:utmp(5)
   Main PID:Unit tlp.service could not be found.
 1016 (code=exited, status=0/SUCCESS)
        CPU: 7ms

Feb 02 09:00:50 np0005604790.novalocal systemd[1]: Starting Record Runlevel Change in UTMP...
Feb 02 09:00:50 np0005604790.novalocal systemd[1]: systemd-update-utmp-runlevel.service: Deactivated successfully.
Feb 02 09:00:50 np0005604790.novalocal systemd[1]: Finished Record Runlevel Change in UTMP.

● systemd-update-utmp.service - Record System Boot/Shutdown in UTMP
     Loaded: loaded (/usr/lib/systemd/system/systemd-update-utmp.service; static)
     Active: active (exited) since Mon 2026-02-02 09:00:45 UTC; 1h 24min ago
       Docs: man:systemd-update-utmp.service(8)
             man:utmp(5)
   Main PID: 728 (code=exited, status=0/SUCCESS)
        CPU: 11ms

Feb 02 09:00:45 localhost systemd[1]: Starting Record System Boot/Shutdown in UTMP...
Feb 02 09:00:45 localhost systemd[1]: Finished Record System Boot/Shutdown in UTMP.

● systemd-user-sessions.service - Permit User Sessions
     Loaded: loaded (/usr/lib/systemd/system/systemd-user-sessions.service; static)
     Active: active (exited) since Mon 2026-02-02 09:00:50 UTC; 1h 24min ago
       Docs: man:systemd-user-sessions.service(8)
   Main PID: 1007 (code=exited, status=0/SUCCESS)
        CPU: 10ms

Feb 02 09:00:50 np0005604790.novalocal systemd[1]: Starting Permit User Sessions...
Feb 02 09:00:50 np0005604790.novalocal systemd[1]: Finished Permit User Sessions.

○ systemd-vconsole-setup.service - Setup Virtual Console
     Loaded: loaded (/usr/lib/systemd/system/systemd-vconsole-setup.service; static)
     Active: inactive (dead) since Mon 2026-02-02 09:00:44 UTC; 1h 24min ago
   Duration: 1.871s
       Docs: man:systemd-vconsole-setup.service(8)
             man:vconsole.conf(5)
   Main PID: 310 (code=exited, status=0/SUCCESS)
        CPU: 208ms

Feb 02 09:00:42 localhost systemd[1]: Finished Setup Virtual Console.
Feb 02 09:00:44 localhost systemd[1]: systemd-vconsole-setup.service: Deactivated successfully.
Feb 02 09:00:44 localhost systemd[1]: Stopped Setup Virtual Console.
Notice: journal has been rotated since unit was started, output may be incomplete.

● tuned.service - Dynamic System Tuning Daemon
     Loaded: loaded (/usr/lib/systemd/system/tuned.service; enabled; preset: enabled)
     Active: active (running) since Mon 2026-02-02 09:44:17 UTC; 40min ago
       Docs: man:tuned(8)
             man:tuned.conf(5)
             man:tuned-adm(8)
   Main PID: 113383 (tuned)
         IO: 0B read, 0B written
      Tasks: 4 (limit: 48560)
     Memory: 13.8M (peak: 16.9M)
        CPU: 1.153s
     CGroup: /system.slice/tuned.service
             └─113383 /usr/bin/python3 -Es /usr/sbin/tuned -l -P

Feb 02 09:44:17 compute-0 systemd[1]: Starting Dynamic System Tuning Daemon...
Feb 02 09:44:17 compute-0 systemd[1]: Started Dynamic System Tuning Daemon.

○ unbound-anchor.service - update of the root trust anchor for DNSSEC validation in unbound
     Loaded: loaded (/usr/lib/systemd/system/unbound-anchor.service; static)
     Active: inactive (dead)
TriggeredBy: ● unbound-anchor.timer
       Docs: man:unbound-anchor(8)

● user-runtime-dir@1000.service - User Runtime Directory /run/user/1000
     Loaded: loaded (/usr/lib/systemd/system/user-runtime-dir@.service; static)
     Active: active (exited) since Mon 2026-02-02 09:01:14 UTC; 1h 23min ago
       Docs: man:user@.service(5)
   Main PID: 4319 (code=exited, status=0/SUCCESS)
        CPU: 18ms

Feb 02 09:01:14 np0005604790.novalocal systemd[1]: Starting User Runtime Directory /run/user/1000...
Feb 02 09:01:14 np0005604790.novalocal systemd[1]: Finished User Runtime Directory /run/user/1000.

● user-runtime-dir@42477.service - User Runtime Directory /run/user/42477
     Loaded: loaded (/usr/lib/systemd/system/user-runtime-dir@.service; static)
     Active: active (exited) since Mon 2026-02-02 09:40:48 UTC; 44min ago
       Docs: man:user@.service(5)
   Main PID: 93257 (code=exited, status=0/SUCCESS)
        CPU: 10ms

Feb 02 09:40:48 compute-0 systemd[1]: Starting User Runtime Directory /run/user/42477...
Feb 02 09:40:48 compute-0 systemd[1]: Finished User Runtime Directory /run/user/42477.

● user@1000.service - User Manager for UID 1000
     Loaded: loaded (/usr/lib/systemd/system/user@.service; static)
    Drop-In: /usr/lib/systemd/system/user@.service.d
             └─10-login-barrier.conf
     Active: active (running) since Mon 2026-02-02 09:01:14 UTC; 1h 23min ago
       Docs: man:user@.service(5)
   Main PID: 4320 (systemd)
     Status: "Ready."
         IO: 676.0K read, 4.0K written
      Tasks: 5
     Memory: 9.2M (peak: 15.1M)
        CPU: 4.370s
     CGroup: /user.slice/user-1000.slice/user@1000.service
             ├─app.slice
             │ └─dbus-broker.service
             │   ├─13129 /usr/bin/dbus-broker-launch --scope user
             │   └─13141 dbus-broker --log 4 --controller 9 --machine-id bf0bc0bb03de29b24cba1cc9599cf5d0 --max-bytes 100000000000000 --max-fds 25000000000000 --max-matches 5000000000
             ├─init.scope
             │ ├─4320 /usr/lib/systemd/systemd --user
             │ └─4322 "(sd-pam)"
             └─user.slice
               └─podman-pause-663ee478.scope
                 └─13025 catatonit -P

Feb 02 09:14:17 np0005604790.novalocal dbus-broker-launch[13129]: Policy to allow eavesdropping in /usr/share/dbus-1/session.conf +31: Eavesdropping is deprecated and ignored
Feb 02 09:14:17 np0005604790.novalocal dbus-broker-launch[13129]: Policy to allow eavesdropping in /usr/share/dbus-1/session.conf +33: Eavesdropping is deprecated and ignored
Feb 02 09:14:17 np0005604790.novalocal systemd[4320]: Started D-Bus User Message Bus.
Feb 02 09:14:17 np0005604790.novalocal dbus-broker-lau[13129]: Ready
Feb 02 09:14:17 np0005604790.novalocal systemd[4320]: selinux: avc:  op=load_policy lsm=selinux seqno=4 res=1
Feb 02 09:14:17 np0005604790.novalocal systemd[4320]: Created slice Slice /user.
Feb 02 09:14:17 np0005604790.novalocal systemd[4320]: podman-13007.scope: unit configures an IP firewall, but not running as root.
Feb 02 09:14:17 np0005604790.novalocal systemd[4320]: (This warning is only shown for the first unit using IP firewalling.)
Feb 02 09:14:17 np0005604790.novalocal systemd[4320]: Started podman-13007.scope.
Feb 02 09:14:17 np0005604790.novalocal systemd[4320]: Started podman-pause-663ee478.scope.

● user@42477.service - User Manager for UID 42477
     Loaded: loaded (/usr/lib/systemd/system/user@.service; static)
    Drop-In: /usr/lib/systemd/system/user@.service.d
             └─10-login-barrier.conf
     Active: active (running) since Mon 2026-02-02 09:40:48 UTC; 44min ago
       Docs: man:user@.service(5)
   Main PID: 93258 (systemd)
     Status: "Ready."
         IO: 0B read, 0B written
      Tasks: 2
     Memory: 6.0M (peak: 10.8M)
        CPU: 2.678s
     CGroup: /user.slice/user-42477.slice/user@42477.service
             └─init.scope
               ├─93258 /usr/lib/systemd/systemd --user
               └─93260 "(sd-pam)"

Feb 02 09:40:48 compute-0 systemd[93258]: Finished Create User's Volatile Files and Directories.
Feb 02 09:40:48 compute-0 systemd[93258]: Reached target Basic System.
Feb 02 09:40:48 compute-0 systemd[93258]: Reached target Main User Target.
Feb 02 09:40:48 compute-0 systemd[93258]: Startup finished in 163ms.
Feb 02 09:40:48 compute-0 systemd[1]: Started User Manager for UID 42477.
Feb 02 09:42:48 compute-0 systemd[93258]: Starting Mark boot as successful...
Feb 02 09:42:48 compute-0 systemd[93258]: Finished Mark boot as successful.
Feb 02 09:46:05 compute-0 systemd[93258]: Created slice User Background Tasks Slice.
Feb 02 09:46:05 compute-0 systemd[93258]: Starting Cleanup of User's Temporary Files and Directories...
Feb 02 09:46:05 compute-0 systemd[93258]: Finished Cleanup of User's Temporary Files and Directories.

○ virtinterfaced.service - libvirt interface daemon
     Loaded: loaded (/usr/lib/systemd/system/virtinterfaced.service; disabled; preset: disabled)
     Active: inactive (dead)
TriggeredBy: ○ virtinterfaced-ro.socket
             ○ virtinterfaced.socket
             ○ virtinterfaced-admin.socket
       Docs: man:virtinterfaced(8)
             https://libvirt.org/

○ virtlockd.service - libvirt locking daemon
     Loaded: loaded (/usr/lib/systemd/system/virtlockd.service; disabled; preset: disabled)
     Active: inactive (dead)
TriggeredBy: ○ virtlockd-admin.socket
             ● virtlockd.socket
       Docs: man:virtlockd(8)
             https://libvirt.org/

● virtlogd.service - libvirt logging daemon
     Loaded: loaded (/usr/lib/systemd/system/virtlogd.service; enabled; preset: disabled)
     Active: active (running) since Mon 2026-02-02 09:54:28 UTC; 30min ago
TriggeredBy: ● virtlogd.socket
             ● virtlogd-admin.socket
       Docs: man:virtlogd(8)
             https://libvirt.org/
   Main PID: 218379 (virtlogd)
         IO: 648.0K read, 500.0K written
      Tasks: 1 (limit: 48560)
     Memory: 3.3M (peak: 3.6M)
        CPU: 7.034s
     CGroup: /system.slice/virtlogd.service
             └─218379 /usr/sbin/virtlogd

Feb 02 09:54:28 compute-0 systemd[1]: Starting libvirt logging daemon...
Feb 02 09:54:28 compute-0 systemd[1]: Started libvirt logging daemon.

○ virtnetworkd.service - libvirt network daemon
     Loaded: loaded (/usr/lib/systemd/system/virtnetworkd.service; disabled; preset: disabled)
     Active: inactive (dead)
TriggeredBy: ○ virtnetworkd-ro.socket
             ○ virtnetworkd.socket
             ○ virtnetworkd-admin.socket
       Docs: man:virtnetworkd(8)
             https://libvirt.org/

● virtnodedevd.service - libvirt nodedev daemon
     Loaded: loaded (/usr/lib/systemd/system/virtnodedevd.service; enabled; preset: disabled)
     Active: active (running) since Mon 2026-02-02 09:58:03 UTC; 27min ago
TriggeredBy: ● virtnodedevd-ro.socket
             ● virtnodedevd-admin.socket
             ● virtnodedevd.socket
       Docs: man:virtnodedevd(8)
             https://libvirt.org/
   Main PID: 252999 (virtnodedevd)
         IO: 2.6M read, 0B written
      Tasks: 20 (limit: 48560)
     Memory: 8.4M (peak: 9.7M)
        CPU: 1.959s
     CGroup: /system.slice/virtnodedevd.service
             └─252999 /usr/sbin/virtnodedevd --timeout 120

Feb 02 09:58:02 compute-0 systemd[1]: Starting libvirt nodedev daemon...
Feb 02 09:58:03 compute-0 systemd[1]: Started libvirt nodedev daemon.

○ virtnwfilterd.service - libvirt nwfilter daemon
     Loaded: loaded (/usr/lib/systemd/system/virtnwfilterd.service; disabled; preset: disabled)
     Active: inactive (dead)
TriggeredBy: ○ virtnwfilterd-ro.socket
             ○ virtnwfilterd.socket
             ○ virtnwfilterd-admin.socket
       Docs: man:virtnwfilterd(8)
             https://libvirt.org/

○ virtproxyd.service - libvirt proxy daemon
     Loaded: loaded (/usr/lib/systemd/system/virtproxyd.service; enabled; preset: disabled)
     Active: inactive (dead) since Mon 2026-02-02 09:56:30 UTC; 28min ago
   Duration: 2min 7ms
TriggeredBy: ● virtproxyd-tls.socket
             ● virtproxyd.socket
             ● virtproxyd-admin.socket
             ● virtproxyd-ro.socket
       Docs: man:virtproxyd(8)
             https://libvirt.org/
   Main PID: 218806 (code=exited, status=0/SUCCESS)
        CPU: 45ms

Feb 02 09:54:30 compute-0 systemd[1]: Starting libvirt proxy daemon...
Feb 02 09:54:30 compute-0 systemd[1]: Started libvirt proxy daemon.
Feb 02 09:56:30 compute-0 systemd[1]: virtproxyd.service: Deactivated successfully.

● virtqemud.service - libvirt QEMU daemon
     Loaded: loaded (/usr/lib/systemd/system/virtqemud.service; enabled; preset: enabled)
     Active: active (running) since Mon 2026-02-02 09:57:55 UTC; 27min ago
TriggeredBy: ● virtqemud.socket
             ● virtqemud-admin.socket
             ● virtqemud-ro.socket
       Docs: man:virtqemud(8)
             https://libvirt.org/
   Main PID: 252362 (virtqemud)
         IO: 13.0M read, 338.5K written
      Tasks: 19 (limit: 32768)
     Memory: 30.8M (peak: 57.1M)
        CPU: 3.398s
     CGroup: /system.slice/virtqemud.service
             └─252362 /usr/sbin/virtqemud --timeout 120

Feb 02 09:57:57 compute-0 virtqemud[252362]: Unit ypbind.service could not be found.
Unit yppasswdd.service could not be found.
Unit ypserv.service could not be found.
Unit ypxfrd.service could not be found.
libvirt version: 11.10.0, package: 2.el9 (builder@centos.org, 2025-12-18-15:09:54, )
Feb 02 09:57:57 compute-0 virtqemud[252362]: hostname: compute-0
Feb 02 09:57:57 compute-0 virtqemud[252362]: End of file while reading data: Input/output error
Feb 02 10:14:19 compute-0 virtqemud[252362]: Failed to connect socket to '/var/run/libvirt/virtnetworkd-sock-ro': No such file or directory
Feb 02 10:14:19 compute-0 virtqemud[252362]: Failed to connect socket to '/var/run/libvirt/virtnwfilterd-sock-ro': No such file or directory
Feb 02 10:14:19 compute-0 virtqemud[252362]: Failed to connect socket to '/var/run/libvirt/virtstoraged-sock-ro': No such file or directory
Feb 02 10:14:52 compute-0 virtqemud[252362]: Failed to connect socket to '/var/run/libvirt/virtstoraged-sock-ro': No such file or directory
Feb 02 10:24:34 compute-0 virtqemud[252362]: Failed to connect socket to '/var/run/libvirt/virtnetworkd-sock-ro': No such file or directory
Feb 02 10:24:35 compute-0 virtqemud[252362]: Failed to connect socket to '/var/run/libvirt/virtnwfilterd-sock-ro': No such file or directory
Feb 02 10:24:35 compute-0 virtqemud[252362]: Failed to connect socket to '/var/run/libvirt/virtstoraged-sock-ro': No such file or directory

○ virtsecretd.service - libvirt secret daemon
     Loaded: loaded (/usr/lib/systemd/system/virtsecretd.service; enabled; preset: disabled)
     Active: inactive (dead) since Mon 2026-02-02 10:14:42 UTC; 10min ago
   Duration: 2min 25ms
TriggeredBy: ● virtsecretd-ro.socket
             ● virtsecretd-admin.socket
             ● virtsecretd.socket
       Docs: man:virtsecretd(8)
             https://libvirt.org/
    Process: 271982 ExecStart=/usr/sbin/virtsecretd $VIRTSECRETD_ARGS (code=exited, status=0/SUCCESS)
   Main PID: 271982 (code=exited, status=0/SUCCESS)
        CPU: 49ms

Feb 02 10:12:42 compute-0 systemd[1]: Starting libvirt secret daemon...
Feb 02 10:12:42 compute-0 systemd[1]: Started libvirt secret daemon.
Feb 02 10:14:42 compute-0 systemd[1]: virtsecretd.service: Deactivated successfully.

○ virtstoraged.service - libvirt storage daemon
     Loaded: loaded (/usr/lib/systemd/system/virtstoraged.service; disabled; preset: disabled)
     Active: inactive (dead)
TriggeredBy: ○ virtstoraged.socket
             ○ virtstoraged-admin.socket
             ○ virtstoraged-ro.socket
       Docs: man:virtstoraged(8)
             https://libvirt.org/

● -.slice - Root Slice
     Loaded: loaded
     Active: active since Mon 2026-02-02 09:00:41 UTC; 1h 24min ago
      Until: Mon 2026-02-02 09:00:41 UTC; 1h 24min ago
       Docs: man:systemd.special(7)
      Tasks: 1391
     Memory: 3.2G
        CPU: 44min 11.550s
     CGroup: /
             ├─297147 turbostat --debug sleep 10
             ├─297154 sleep 10
             ├─init.scope
             │ └─1 /usr/lib/systemd/systemd --switched-root --system --deserialize 31
             ├─machine.slice
             │ ├─libpod-29bea7eb4976451e3dfdb1e9a3aba30b10e64cbce131bf8d09548b4a224f8ee8.scope
             │ │ └─container
             │ │   ├─165361 dumb-init --single-child -- kolla_start
             │ │   ├─165364 "neutron-ovn-metadata-agent (/usr/bin/python3 /usr/bin/neutron-ovn-metadata-agent)"
             │ │   ├─165809 "neutron-ovn-metadata-agent (/usr/bin/python3 /usr/bin/neutron-ovn-metadata-agent)"
             │ │   ├─166028 /usr/bin/python3 /bin/privsep-helper --config-file /etc/neutron/neutron.conf --config-dir /etc/neutron.conf.d --privsep_context neutron.privileged.namespace_cmd --privsep_sock_path /tmp/tmp_26sdv4_/privsep.sock
             │ │   ├─257524 /usr/bin/python3 /bin/privsep-helper --config-file /etc/neutron/neutron.conf --config-dir /etc/neutron.conf.d --privsep_context neutron.privileged.default --privsep_sock_path /tmp/tmpq3jugzpc/privsep.sock
             │ │   └─257582 /usr/bin/python3 /bin/privsep-helper --config-file /etc/neutron/neutron.conf --config-dir /etc/neutron.conf.d --privsep_context neutron.privileged.link_cmd --privsep_sock_path /tmp/tmpjv70h06s/privsep.sock
             │ ├─libpod-46ea9bbd396be6c9cf42e800f6ecffbc7ecf6b0a7dd6f731e2744db95bbe8ea3.scope
             │ │ └─container
             │ │   ├─252674 dumb-init --single-child -- kolla_start
             │ │   ├─252676 /usr/bin/python3 /usr/bin/nova-compute
             │ │   ├─257381 /usr/bin/python3 /bin/privsep-helper --config-file /etc/nova/nova.conf --config-file /etc/nova/nova-compute.conf --config-dir /etc/nova/nova.conf.d --privsep_context vif_plug_ovs.privsep.vif_plug --privsep_sock_path /tmp/tmpwhhqa15o/privsep.sock
             │ │   └─258300 /usr/bin/python3 /bin/privsep-helper --config-file /etc/nova/nova.conf --config-file /etc/nova/nova-compute.conf --config-dir /etc/nova/nova.conf.d --privsep_context nova.privsep.sys_admin_pctxt --privsep_sock_path /tmp/tmp683yhkaj/privsep.sock
             │ └─libpod-e39122d9482da8df802204ab6c35fb7c982874580968140e6f06bdfc8eefae36.scope
             │   └─container
             │     ├─154633 dumb-init --single-child -- kolla_start
             │     └─154636 /usr/bin/ovn-controller --pidfile unix:/run/openvswitch/db.sock -p /etc/pki/tls/private/ovndb.key -c /etc/pki/tls/certs/ovndb.crt -C /etc/pki/tls/certs/ovndbca.crt
             ├─system.slice
             │ ├─NetworkManager.service
             │ │ └─49024 /usr/sbin/NetworkManager --no-daemon
             │ ├─auditd.service
             │ │ ├─701 /sbin/auditd
             │ │ └─703 /usr/sbin/sedispatch
             │ ├─chronyd.service
             │ │ └─58590 /usr/sbin/chronyd -F 2
             │ ├─crond.service
             │ │ └─1009 /usr/sbin/crond -n
             │ ├─dbus-broker.service
             │ │ ├─772 /usr/bin/dbus-broker-launch --scope system --audit
             │ │ └─780 dbus-broker --log 4 --controller 9 --machine-id bf0bc0bb03de29b24cba1cc9599cf5d0 --max-bytes 536870912 --max-fds 4096 --max-matches 131072 --audit
             │ ├─edpm_nova_compute.service
             │ │ └─252672 /usr/bin/conmon --api-version 1 -c 46ea9bbd396be6c9cf42e800f6ecffbc7ecf6b0a7dd6f731e2744db95bbe8ea3 -u 46ea9bbd396be6c9cf42e800f6ecffbc7ecf6b0a7dd6f731e2744db95bbe8ea3 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/46ea9bbd396be6c9cf42e800f6ecffbc7ecf6b0a7dd6f731e2744db95bbe8ea3/userdata -p /run/containers/storage/overlay-containers/46ea9bbd396be6c9cf42e800f6ecffbc7ecf6b0a7dd6f731e2744db95bbe8ea3/userdata/pidfile -n nova_compute --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/46ea9bbd396be6c9cf42e800f6ecffbc7ecf6b0a7dd6f731e2744db95bbe8ea3 --full-attach -s -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/46ea9bbd396be6c9cf42e800f6ecffbc7ecf6b0a7dd6f731e2744db95bbe8ea3/userdata/oci-log --conmon-pidfile /run/nova_compute.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg 46ea9bbd396be6c9cf42e800f6ecffbc7ecf6b0a7dd6f731e2744db95bbe8ea3
             │ ├─edpm_ovn_controller.service
             │ │ └─154631 /usr/bin/conmon --api-version 1 -c e39122d9482da8df802204ab6c35fb7c982874580968140e6f06bdfc8eefae36 -u e39122d9482da8df802204ab6c35fb7c982874580968140e6f06bdfc8eefae36 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/e39122d9482da8df802204ab6c35fb7c982874580968140e6f06bdfc8eefae36/userdata -p /run/containers/storage/overlay-containers/e39122d9482da8df802204ab6c35fb7c982874580968140e6f06bdfc8eefae36/userdata/pidfile -n ovn_controller --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/e39122d9482da8df802204ab6c35fb7c982874580968140e6f06bdfc8eefae36 --full-attach -s -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/e39122d9482da8df802204ab6c35fb7c982874580968140e6f06bdfc8eefae36/userdata/oci-log --conmon-pidfile /run/ovn_controller.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg e39122d9482da8df802204ab6c35fb7c982874580968140e6f06bdfc8eefae36
             │ ├─edpm_ovn_metadata_agent.service
             │ │ └─165359 /usr/bin/conmon --api-version 1 -c 29bea7eb4976451e3dfdb1e9a3aba30b10e64cbce131bf8d09548b4a224f8ee8 -u 29bea7eb4976451e3dfdb1e9a3aba30b10e64cbce131bf8d09548b4a224f8ee8 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/29bea7eb4976451e3dfdb1e9a3aba30b10e64cbce131bf8d09548b4a224f8ee8/userdata -p /run/containers/storage/overlay-containers/29bea7eb4976451e3dfdb1e9a3aba30b10e64cbce131bf8d09548b4a224f8ee8/userdata/pidfile -n ovn_metadata_agent --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/29bea7eb4976451e3dfdb1e9a3aba30b10e64cbce131bf8d09548b4a224f8ee8 --full-attach -s -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/29bea7eb4976451e3dfdb1e9a3aba30b10e64cbce131bf8d09548b4a224f8ee8/userdata/oci-log --conmon-pidfile /run/ovn_metadata_agent.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg 29bea7eb4976451e3dfdb1e9a3aba30b10e64cbce131bf8d09548b4a224f8ee8
             │ ├─gssproxy.service
             │ │ └─879 /usr/sbin/gssproxy -D
             │ ├─irqbalance.service
             │ │ └─788 /usr/sbin/irqbalance
             │ ├─iscsid.service
             │ │ └─236646 /usr/sbin/iscsid -f
             │ ├─multipathd.service
             │ │ └─236888 /sbin/multipathd -d -s
             │ ├─ovs-vswitchd.service
             │ │ └─47331 ovs-vswitchd unix:/var/run/openvswitch/db.sock -vconsole:emer -vsyslog:err -vfile:info --mlockall --user openvswitch:hugetlbfs --no-chdir --log-file=/var/log/openvswitch/ovs-vswitchd.log --pidfile=/var/run/openvswitch/ovs-vswitchd.pid --detach
             │ ├─ovsdb-server.service
             │ │ └─47250 ovsdb-server /etc/openvswitch/conf.db -vconsole:emer -vsyslog:err -vfile:info --remote=punix:/var/run/openvswitch/db.sock --private-key=db:Open_vSwitch,SSL,private_key --certificate=db:Open_vSwitch,SSL,certificate --bootstrap-ca-cert=db:Open_vSwitch,SSL,ca_cert --user openvswitch:hugetlbfs --no-chdir --log-file=/var/log/openvswitch/ovsdb-server.log --pidfile=/var/run/openvswitch/ovsdb-server.pid --detach
             │ ├─polkit.service
             │ │ └─43515 /usr/lib/polkit-1/polkitd --no-debug
             │ ├─rpcbind.service
             │ │ └─699 /usr/bin/rpcbind -w -f
             │ ├─rsyslog.service
             │ │ └─1005 /usr/sbin/rsyslogd -n
             │ ├─sshd.service
             │ │ └─192944 "sshd: /usr/sbin/sshd -D [listener] 0 of 10-100 startups"
             │ ├─system-ceph\x2dd241d473\x2d9fcb\x2d5f74\x2db163\x2df1ca4454e7f1.slice
             │ │ ├─ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1@alertmanager.compute-0.service
             │ │ │ ├─libpod-payload-63a3970896ab11bd3cbece1b971a05452b0bd8a5b643f0eac52d5b3639ab19c4
             │ │ │ │ ├─104368 /run/podman-init -- /bin/alertmanager --cluster.listen-address=:9094 --web.listen-address=192.168.122.100:9093 --config.file=/etc/alertmanager/alertmanager.yml
             │ │ │ │ └─104370 /bin/alertmanager --cluster.listen-address=:9094 --web.listen-address=192.168.122.100:9093 --config.file=/etc/alertmanager/alertmanager.yml
             │ │ │ └─runtime
             │ │ │   └─104366 /usr/bin/conmon --api-version 1 -c 63a3970896ab11bd3cbece1b971a05452b0bd8a5b643f0eac52d5b3639ab19c4 -u 63a3970896ab11bd3cbece1b971a05452b0bd8a5b643f0eac52d5b3639ab19c4 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/63a3970896ab11bd3cbece1b971a05452b0bd8a5b643f0eac52d5b3639ab19c4/userdata -p /run/containers/storage/overlay-containers/63a3970896ab11bd3cbece1b971a05452b0bd8a5b643f0eac52d5b3639ab19c4/userdata/pidfile -n ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1-alertmanager-compute-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/63a3970896ab11bd3cbece1b971a05452b0bd8a5b643f0eac52d5b3639ab19c4 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/63a3970896ab11bd3cbece1b971a05452b0bd8a5b643f0eac52d5b3639ab19c4/userdata/oci-log --conmon-pidfile /run/ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1@alertmanager.compute-0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 63a3970896ab11bd3cbece1b971a05452b0bd8a5b643f0eac52d5b3639ab19c4
             │ │ ├─ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1@crash.compute-0.service
             │ │ │ ├─libpod-payload-318ef38b81cae6eaaebf216bc863b04a4bef5216fba3bfba4e81b73ac8904bc2
             │ │ │ │ ├─79741 /run/podman-init -- /usr/bin/ceph-crash -n client.crash.compute-0
             │ │ │ │ └─79743 /usr/bin/python3 -s /usr/bin/ceph-crash -n client.crash.compute-0
             │ │ │ └─runtime
             │ │ │   └─79739 /usr/bin/conmon --api-version 1 -c 318ef38b81cae6eaaebf216bc863b04a4bef5216fba3bfba4e81b73ac8904bc2 -u 318ef38b81cae6eaaebf216bc863b04a4bef5216fba3bfba4e81b73ac8904bc2 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/318ef38b81cae6eaaebf216bc863b04a4bef5216fba3bfba4e81b73ac8904bc2/userdata -p /run/containers/storage/overlay-containers/318ef38b81cae6eaaebf216bc863b04a4bef5216fba3bfba4e81b73ac8904bc2/userdata/pidfile -n ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1-crash-compute-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/318ef38b81cae6eaaebf216bc863b04a4bef5216fba3bfba4e81b73ac8904bc2 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/318ef38b81cae6eaaebf216bc863b04a4bef5216fba3bfba4e81b73ac8904bc2/userdata/oci-log --conmon-pidfile /run/ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1@crash.compute-0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 318ef38b81cae6eaaebf216bc863b04a4bef5216fba3bfba4e81b73ac8904bc2
             │ │ ├─ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1@grafana.compute-0.service
             │ │ │ ├─libpod-payload-207575e5b32cec4e058e0ca4f48bad94c6e447fc9aa765a0aa8117f4604125b2
             │ │ │ │ ├─104675 /run/podman-init -- /run.sh
             │ │ │ │ └─104677 grafana server --homepath=/usr/share/grafana --config=/etc/grafana/grafana.ini --packaging=docker cfg:default.log.mode=console cfg:default.paths.data=/var/lib/grafana cfg:default.paths.logs=/var/log/grafana cfg:default.paths.plugins=/var/lib/grafana/plugins cfg:default.paths.provisioning=/etc/grafana/provisioning
             │ │ │ └─runtime
             │ │ │   └─104673 /usr/bin/conmon --api-version 1 -c 207575e5b32cec4e058e0ca4f48bad94c6e447fc9aa765a0aa8117f4604125b2 -u 207575e5b32cec4e058e0ca4f48bad94c6e447fc9aa765a0aa8117f4604125b2 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/207575e5b32cec4e058e0ca4f48bad94c6e447fc9aa765a0aa8117f4604125b2/userdata -p /run/containers/storage/overlay-containers/207575e5b32cec4e058e0ca4f48bad94c6e447fc9aa765a0aa8117f4604125b2/userdata/pidfile -n ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1-grafana-compute-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/207575e5b32cec4e058e0ca4f48bad94c6e447fc9aa765a0aa8117f4604125b2 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/207575e5b32cec4e058e0ca4f48bad94c6e447fc9aa765a0aa8117f4604125b2/userdata/oci-log --conmon-pidfile /run/ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1@grafana.compute-0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 207575e5b32cec4e058e0ca4f48bad94c6e447fc9aa765a0aa8117f4604125b2
             │ │ ├─ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1@haproxy.nfs.cephfs.compute-0.ooxkuo.service
             │ │ │ ├─libpod-payload-5812768ed72c0881a5b563a239565cde81bb05b6f1e1beebab3f203681cce03e
             │ │ │ │ ├─97798 /run/podman-init -- docker-entrypoint.sh haproxy -f /var/lib/haproxy/haproxy.cfg
             │ │ │ │ ├─97800 haproxy -W -db -f /var/lib/haproxy/haproxy.cfg
             │ │ │ │ └─97802 haproxy -W -db -f /var/lib/haproxy/haproxy.cfg
             │ │ │ └─runtime
             │ │ │   └─97796 /usr/bin/conmon --api-version 1 -c 5812768ed72c0881a5b563a239565cde81bb05b6f1e1beebab3f203681cce03e -u 5812768ed72c0881a5b563a239565cde81bb05b6f1e1beebab3f203681cce03e -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/5812768ed72c0881a5b563a239565cde81bb05b6f1e1beebab3f203681cce03e/userdata -p /run/containers/storage/overlay-containers/5812768ed72c0881a5b563a239565cde81bb05b6f1e1beebab3f203681cce03e/userdata/pidfile -n ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1-haproxy-nfs-cephfs-compute-0-ooxkuo --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/5812768ed72c0881a5b563a239565cde81bb05b6f1e1beebab3f203681cce03e --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/5812768ed72c0881a5b563a239565cde81bb05b6f1e1beebab3f203681cce03e/userdata/oci-log --conmon-pidfile /run/ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1@haproxy.nfs.cephfs.compute-0.ooxkuo.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 5812768ed72c0881a5b563a239565cde81bb05b6f1e1beebab3f203681cce03e
             │ │ ├─ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1@haproxy.rgw.default.compute-0.avekxu.service
             │ │ │ ├─libpod-payload-19feecaa7fcd517b2bfb973fc8fcf623ad4b0956f16c53292d24fe12f4780190
             │ │ │ │ ├─90405 /run/podman-init -- docker-entrypoint.sh haproxy -f /var/lib/haproxy/haproxy.cfg
             │ │ │ │ ├─90407 haproxy -W -db -f /var/lib/haproxy/haproxy.cfg
             │ │ │ │ └─90409 haproxy -W -db -f /var/lib/haproxy/haproxy.cfg
             │ │ │ └─runtime
             │ │ │   └─90403 /usr/bin/conmon --api-version 1 -c 19feecaa7fcd517b2bfb973fc8fcf623ad4b0956f16c53292d24fe12f4780190 -u 19feecaa7fcd517b2bfb973fc8fcf623ad4b0956f16c53292d24fe12f4780190 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/19feecaa7fcd517b2bfb973fc8fcf623ad4b0956f16c53292d24fe12f4780190/userdata -p /run/containers/storage/overlay-containers/19feecaa7fcd517b2bfb973fc8fcf623ad4b0956f16c53292d24fe12f4780190/userdata/pidfile -n ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1-haproxy-rgw-default-compute-0-avekxu --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/19feecaa7fcd517b2bfb973fc8fcf623ad4b0956f16c53292d24fe12f4780190 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/19feecaa7fcd517b2bfb973fc8fcf623ad4b0956f16c53292d24fe12f4780190/userdata/oci-log --conmon-pidfile /run/ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1@haproxy.rgw.default.compute-0.avekxu.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 19feecaa7fcd517b2bfb973fc8fcf623ad4b0956f16c53292d24fe12f4780190
             │ │ ├─ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1@keepalived.nfs.cephfs.compute-0.pqolko.service
             │ │ │ ├─libpod-payload-5f2bbf7994e4a92479e9d1b19dfd01c3876abee624a9d2019b778a31c38bb173
             │ │ │ │ ├─98165 /run/podman-init -- ./init.sh
             │ │ │ │ ├─98167 /usr/sbin/keepalived -n -l -f /etc/keepalived/keepalived.conf
             │ │ │ │ └─98169 /usr/sbin/keepalived -n -l -f /etc/keepalived/keepalived.conf
             │ │ │ └─runtime
             │ │ │   └─98163 /usr/bin/conmon --api-version 1 -c 5f2bbf7994e4a92479e9d1b19dfd01c3876abee624a9d2019b778a31c38bb173 -u 5f2bbf7994e4a92479e9d1b19dfd01c3876abee624a9d2019b778a31c38bb173 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/5f2bbf7994e4a92479e9d1b19dfd01c3876abee624a9d2019b778a31c38bb173/userdata -p /run/containers/storage/overlay-containers/5f2bbf7994e4a92479e9d1b19dfd01c3876abee624a9d2019b778a31c38bb173/userdata/pidfile -n ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1-keepalived-nfs-cephfs-compute-0-pqolko --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/5f2bbf7994e4a92479e9d1b19dfd01c3876abee624a9d2019b778a31c38bb173 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/5f2bbf7994e4a92479e9d1b19dfd01c3876abee624a9d2019b778a31c38bb173/userdata/oci-log --conmon-pidfile /run/ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1@keepalived.nfs.cephfs.compute-0.pqolko.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 5f2bbf7994e4a92479e9d1b19dfd01c3876abee624a9d2019b778a31c38bb173
             │ │ ├─ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1@keepalived.rgw.default.compute-0.pxmjnp.service
             │ │ │ ├─libpod-payload-860a76d66e8eda9eaa418ca2983e711afeb6dee68d75c8d8ff31ac03764f810e
             │ │ │ │ ├─99765 /run/podman-init -- ./init.sh
             │ │ │ │ ├─99767 /usr/sbin/keepalived -n -l -f /etc/keepalived/keepalived.conf
             │ │ │ │ └─99769 /usr/sbin/keepalived -n -l -f /etc/keepalived/keepalived.conf
             │ │ │ └─runtime
             │ │ │   └─99763 /usr/bin/conmon --api-version 1 -c 860a76d66e8eda9eaa418ca2983e711afeb6dee68d75c8d8ff31ac03764f810e -u 860a76d66e8eda9eaa418ca2983e711afeb6dee68d75c8d8ff31ac03764f810e -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/860a76d66e8eda9eaa418ca2983e711afeb6dee68d75c8d8ff31ac03764f810e/userdata -p /run/containers/storage/overlay-containers/860a76d66e8eda9eaa418ca2983e711afeb6dee68d75c8d8ff31ac03764f810e/userdata/pidfile -n ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1-keepalived-rgw-default-compute-0-pxmjnp --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/860a76d66e8eda9eaa418ca2983e711afeb6dee68d75c8d8ff31ac03764f810e --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/860a76d66e8eda9eaa418ca2983e711afeb6dee68d75c8d8ff31ac03764f810e/userdata/oci-log --conmon-pidfile /run/ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1@keepalived.rgw.default.compute-0.pxmjnp.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 860a76d66e8eda9eaa418ca2983e711afeb6dee68d75c8d8ff31ac03764f810e
             │ │ ├─ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1@mds.cephfs.compute-0.clmmzw.service
             │ │ │ ├─libpod-payload-7adc20511c464cc8d1a3fc124e35d564818f3952b55827abfa6fd9e805055a10
             │ │ │ │ ├─96759 /run/podman-init -- /usr/bin/ceph-mds -n mds.cephfs.compute-0.clmmzw -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ │ │ └─96761 /usr/bin/ceph-mds -n mds.cephfs.compute-0.clmmzw -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ │ └─runtime
             │ │ │   └─96757 /usr/bin/conmon --api-version 1 -c 7adc20511c464cc8d1a3fc124e35d564818f3952b55827abfa6fd9e805055a10 -u 7adc20511c464cc8d1a3fc124e35d564818f3952b55827abfa6fd9e805055a10 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/7adc20511c464cc8d1a3fc124e35d564818f3952b55827abfa6fd9e805055a10/userdata -p /run/containers/storage/overlay-containers/7adc20511c464cc8d1a3fc124e35d564818f3952b55827abfa6fd9e805055a10/userdata/pidfile -n ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1-mds-cephfs-compute-0-clmmzw --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/7adc20511c464cc8d1a3fc124e35d564818f3952b55827abfa6fd9e805055a10 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/7adc20511c464cc8d1a3fc124e35d564818f3952b55827abfa6fd9e805055a10/userdata/oci-log --conmon-pidfile /run/ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1@mds.cephfs.compute-0.clmmzw.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 7adc20511c464cc8d1a3fc124e35d564818f3952b55827abfa6fd9e805055a10
             │ │ ├─ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1@mgr.compute-0.djvyfo.service
             │ │ │ ├─libpod-payload-3dfd19b9ab30bf136f4a18ad3b4a13ee303004a583ad880116709be18eec72dc
             │ │ │ │ ├─74783 /run/podman-init -- /usr/bin/ceph-mgr -n mgr.compute-0.djvyfo -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ │ │ └─74785 /usr/bin/ceph-mgr -n mgr.compute-0.djvyfo -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ │ └─runtime
             │ │ │   └─74781 /usr/bin/conmon --api-version 1 -c 3dfd19b9ab30bf136f4a18ad3b4a13ee303004a583ad880116709be18eec72dc -u 3dfd19b9ab30bf136f4a18ad3b4a13ee303004a583ad880116709be18eec72dc -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/3dfd19b9ab30bf136f4a18ad3b4a13ee303004a583ad880116709be18eec72dc/userdata -p /run/containers/storage/overlay-containers/3dfd19b9ab30bf136f4a18ad3b4a13ee303004a583ad880116709be18eec72dc/userdata/pidfile -n ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1-mgr-compute-0-djvyfo --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/3dfd19b9ab30bf136f4a18ad3b4a13ee303004a583ad880116709be18eec72dc --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/3dfd19b9ab30bf136f4a18ad3b4a13ee303004a583ad880116709be18eec72dc/userdata/oci-log --conmon-pidfile /run/ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1@mgr.compute-0.djvyfo.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 3dfd19b9ab30bf136f4a18ad3b4a13ee303004a583ad880116709be18eec72dc
             │ │ ├─ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1@mon.compute-0.service
             │ │ │ ├─libpod-payload-79ef7165b184aa21ab9e464efe33891b6304e1ba848414549f299d1b301d6783
             │ │ │ │ ├─74487 /run/podman-init -- /usr/bin/ceph-mon -n mon.compute-0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false
             │ │ │ │ └─74489 /usr/bin/ceph-mon -n mon.compute-0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false
             │ │ │ └─runtime
             │ │ │   └─74485 /usr/bin/conmon --api-version 1 -c 79ef7165b184aa21ab9e464efe33891b6304e1ba848414549f299d1b301d6783 -u 79ef7165b184aa21ab9e464efe33891b6304e1ba848414549f299d1b301d6783 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/79ef7165b184aa21ab9e464efe33891b6304e1ba848414549f299d1b301d6783/userdata -p /run/containers/storage/overlay-containers/79ef7165b184aa21ab9e464efe33891b6304e1ba848414549f299d1b301d6783/userdata/pidfile -n ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1-mon-compute-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/79ef7165b184aa21ab9e464efe33891b6304e1ba848414549f299d1b301d6783 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/79ef7165b184aa21ab9e464efe33891b6304e1ba848414549f299d1b301d6783/userdata/oci-log --conmon-pidfile /run/ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1@mon.compute-0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 79ef7165b184aa21ab9e464efe33891b6304e1ba848414549f299d1b301d6783
             │ │ ├─ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1@nfs.cephfs.2.0.compute-0.fdwwab.service
             │ │ │ ├─libpod-payload-47ffd521b52a4e817e05a876d9da3b01cdfdfeae11aa3098649e39241d4ffff9
             │ │ │ │ ├─270910 /run/podman-init -- /usr/bin/ganesha.nfsd -F -L STDERR -N NIV_EVENT
             │ │ │ │ └─270912 /usr/bin/ganesha.nfsd -F -L STDERR -N NIV_EVENT
             │ │ │ └─runtime
             │ │ │   └─270907 /usr/bin/conmon --api-version 1 -c 47ffd521b52a4e817e05a876d9da3b01cdfdfeae11aa3098649e39241d4ffff9 -u 47ffd521b52a4e817e05a876d9da3b01cdfdfeae11aa3098649e39241d4ffff9 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/47ffd521b52a4e817e05a876d9da3b01cdfdfeae11aa3098649e39241d4ffff9/userdata -p /run/containers/storage/overlay-containers/47ffd521b52a4e817e05a876d9da3b01cdfdfeae11aa3098649e39241d4ffff9/userdata/pidfile -n ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1-nfs-cephfs-2-0-compute-0-fdwwab --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/47ffd521b52a4e817e05a876d9da3b01cdfdfeae11aa3098649e39241d4ffff9 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/47ffd521b52a4e817e05a876d9da3b01cdfdfeae11aa3098649e39241d4ffff9/userdata/oci-log --conmon-pidfile /run/ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1@nfs.cephfs.2.0.compute-0.fdwwab.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 47ffd521b52a4e817e05a876d9da3b01cdfdfeae11aa3098649e39241d4ffff9
             │ │ ├─ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1@node-exporter.compute-0.service
             │ │ │ ├─libpod-payload-690ade5beb0aa03a99cf3b4b1da5291c57988034f4a9506f786da5a6fb824998
             │ │ │ │ ├─104074 /run/podman-init -- /bin/node_exporter --no-collector.timex --web.listen-address=:9100 --path.procfs=/host/proc --path.sysfs=/host/sys --path.rootfs=/rootfs
             │ │ │ │ └─104076 /bin/node_exporter --no-collector.timex --web.listen-address=:9100 --path.procfs=/host/proc --path.sysfs=/host/sys --path.rootfs=/rootfs
             │ │ │ └─runtime
             │ │ │   └─104072 /usr/bin/conmon --api-version 1 -c 690ade5beb0aa03a99cf3b4b1da5291c57988034f4a9506f786da5a6fb824998 -u 690ade5beb0aa03a99cf3b4b1da5291c57988034f4a9506f786da5a6fb824998 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/690ade5beb0aa03a99cf3b4b1da5291c57988034f4a9506f786da5a6fb824998/userdata -p /run/containers/storage/overlay-containers/690ade5beb0aa03a99cf3b4b1da5291c57988034f4a9506f786da5a6fb824998/userdata/pidfile -n ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1-node-exporter-compute-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/690ade5beb0aa03a99cf3b4b1da5291c57988034f4a9506f786da5a6fb824998 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/690ade5beb0aa03a99cf3b4b1da5291c57988034f4a9506f786da5a6fb824998/userdata/oci-log --conmon-pidfile /run/ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1@node-exporter.compute-0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 690ade5beb0aa03a99cf3b4b1da5291c57988034f4a9506f786da5a6fb824998
             │ │ ├─ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1@osd.1.service
             │ │ │ ├─libpod-payload-4642ed65ea9037166532825913a80a3f5fa996c66d25a8d6ec32643bd7f52763
             │ │ │ │ ├─82703 /run/podman-init -- /usr/bin/ceph-osd -n osd.1 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ │ │ └─82705 /usr/bin/ceph-osd -n osd.1 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ │ └─runtime
             │ │ │   └─82701 /usr/bin/conmon --api-version 1 -c 4642ed65ea9037166532825913a80a3f5fa996c66d25a8d6ec32643bd7f52763 -u 4642ed65ea9037166532825913a80a3f5fa996c66d25a8d6ec32643bd7f52763 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/4642ed65ea9037166532825913a80a3f5fa996c66d25a8d6ec32643bd7f52763/userdata -p /run/containers/storage/overlay-containers/4642ed65ea9037166532825913a80a3f5fa996c66d25a8d6ec32643bd7f52763/userdata/pidfile -n ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1-osd-1 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/4642ed65ea9037166532825913a80a3f5fa996c66d25a8d6ec32643bd7f52763 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/4642ed65ea9037166532825913a80a3f5fa996c66d25a8d6ec32643bd7f52763/userdata/oci-log --conmon-pidfile /run/ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1@osd.1.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 4642ed65ea9037166532825913a80a3f5fa996c66d25a8d6ec32643bd7f52763
             │ │ ├─ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1@prometheus.compute-0.service
             │ │ │ ├─libpod-payload-214561532da1ee185d9ab0bf03f5b2d46320266c11cddad43265e8842ed9d667
             │ │ │ │ ├─100307 /run/podman-init -- /bin/prometheus --config.file=/etc/prometheus/prometheus.yml --storage.tsdb.path=/prometheus --storage.tsdb.retention.time=15d --storage.tsdb.retention.size=0 --web.external-url=http://compute-0.ctlplane.example.com:9095 --web.listen-address=192.168.122.100:9095
             │ │ │ │ └─100309 /bin/prometheus --config.file=/etc/prometheus/prometheus.yml --storage.tsdb.path=/prometheus --storage.tsdb.retention.time=15d --storage.tsdb.retention.size=0 --web.external-url=http://compute-0.ctlplane.example.com:9095 --web.listen-address=192.168.122.100:9095
             │ │ │ └─runtime
             │ │ │   └─100305 /usr/bin/conmon --api-version 1 -c 214561532da1ee185d9ab0bf03f5b2d46320266c11cddad43265e8842ed9d667 -u 214561532da1ee185d9ab0bf03f5b2d46320266c11cddad43265e8842ed9d667 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/214561532da1ee185d9ab0bf03f5b2d46320266c11cddad43265e8842ed9d667/userdata -p /run/containers/storage/overlay-containers/214561532da1ee185d9ab0bf03f5b2d46320266c11cddad43265e8842ed9d667/userdata/pidfile -n ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1-prometheus-compute-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/214561532da1ee185d9ab0bf03f5b2d46320266c11cddad43265e8842ed9d667 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/214561532da1ee185d9ab0bf03f5b2d46320266c11cddad43265e8842ed9d667/userdata/oci-log --conmon-pidfile /run/ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1@prometheus.compute-0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 214561532da1ee185d9ab0bf03f5b2d46320266c11cddad43265e8842ed9d667
             │ │ └─ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1@rgw.rgw.compute-0.vltabo.service
             │ │   ├─libpod-payload-2dea2149c1564b5d8c5d4eba925c3597c22745b2ed907d0270e5352d9fbd157f
             │ │   │ ├─89250 /run/podman-init -- /usr/bin/radosgw -n client.rgw.rgw.compute-0.vltabo -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │   │ └─89254 /usr/bin/radosgw -n client.rgw.rgw.compute-0.vltabo -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │   └─runtime
             │ │     └─89247 /usr/bin/conmon --api-version 1 -c 2dea2149c1564b5d8c5d4eba925c3597c22745b2ed907d0270e5352d9fbd157f -u 2dea2149c1564b5d8c5d4eba925c3597c22745b2ed907d0270e5352d9fbd157f -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/2dea2149c1564b5d8c5d4eba925c3597c22745b2ed907d0270e5352d9fbd157f/userdata -p /run/containers/storage/overlay-containers/2dea2149c1564b5d8c5d4eba925c3597c22745b2ed907d0270e5352d9fbd157f/userdata/pidfile -n ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1-rgw-rgw-compute-0-vltabo --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/2dea2149c1564b5d8c5d4eba925c3597c22745b2ed907d0270e5352d9fbd157f --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/2dea2149c1564b5d8c5d4eba925c3597c22745b2ed907d0270e5352d9fbd157f/userdata/oci-log --conmon-pidfile /run/ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1@rgw.rgw.compute-0.vltabo.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 2dea2149c1564b5d8c5d4eba925c3597c22745b2ed907d0270e5352d9fbd157f
             │ ├─system-getty.slice
             │ │ └─getty@tty1.service
             │ │   └─1010 /sbin/agetty -o "-p -- \\u" --noclear - linux
             │ ├─system-serial\x2dgetty.slice
             │ │ └─serial-getty@ttyS0.service
             │ │   └─1011 /sbin/agetty -o "-p -- \\u" --keep-baud 115200,57600,38400,9600 - vt220
             │ ├─systemd-hostnamed.service
             │ │ └─293546 /usr/lib/systemd/systemd-hostnamed
             │ ├─systemd-journald.service
             │ │ └─677 /usr/lib/systemd/systemd-journald
             │ ├─systemd-logind.service
             │ │ └─793 /usr/lib/systemd/systemd-logind
             │ ├─systemd-machined.service
             │ │ └─219024 /usr/lib/systemd/systemd-machined
             │ ├─systemd-udevd.service
             │ │ └─udev
             │ │   └─729 /usr/lib/systemd/systemd-udevd
             │ ├─tuned.service
             │ │ └─113383 /usr/bin/python3 -Es /usr/sbin/tuned -l -P
             │ ├─virtlogd.service
             │ │ └─218379 /usr/sbin/virtlogd
             │ ├─virtnodedevd.service
             │ │ └─252999 /usr/sbin/virtnodedevd --timeout 120
             │ └─virtqemud.service
             │   └─252362 /usr/sbin/virtqemud --timeout 120
             └─user.slice
               ├─user-1000.slice
               │ ├─session-1.scope
               │ │ └─4532 /usr/bin/python3
               │ ├─session-59.scope
               │ │ ├─291112 "sshd-session: zuul [priv]"
               │ │ ├─291116 "sshd-session: zuul@notty"
               │ │ ├─291117 sudo bash -c "rm -rf /var/tmp/sos-osp && mkdir /var/tmp/sos-osp && sos report --batch --all-logs --tmp-dir=/var/tmp/sos-osp  -p container,openstack_edpm,system,storage,virt"
               │ │ ├─291141 /usr/bin/python3 -s /sbin/sos report --batch --all-logs --tmp-dir=/var/tmp/sos-osp -p container,openstack_edpm,system,storage,virt
               │ │ ├─297141 timeout 15s turbostat --debug sleep 10
               │ │ ├─297545 timeout 300s systemctl status --all
               │ │ ├─297547 systemctl status --all
               │ │ ├─297570 timeout 300s ceph osd blocklist ls --format json-pretty
               │ │ ├─297571 /usr/bin/python3 -s /usr/bin/ceph osd blocklist ls --format json-pretty
               │ │ ├─297591 timeout 300s semanage boolean -l
               │ │ └─297592 /usr/bin/python3 -EsI /usr/sbin/semanage boolean -l
               │ └─user@1000.service
               │   ├─app.slice
               │   │ └─dbus-broker.service
               │   │   ├─13129 /usr/bin/dbus-broker-launch --scope user
               │   │   └─13141 dbus-broker --log 4 --controller 9 --machine-id bf0bc0bb03de29b24cba1cc9599cf5d0 --max-bytes 100000000000000 --max-fds 25000000000000 --max-matches 5000000000
               │   ├─init.scope
               │   │ ├─4320 /usr/lib/systemd/systemd --user
               │   │ └─4322 "(sd-pam)"
               │   └─user.slice
               │     └─podman-pause-663ee478.scope
               │       └─13025 catatonit -P
               └─user-42477.slice
                 ├─session-37.scope
                 │ ├─100495 "sshd-session: ceph-admin [priv]"
                 │ └─100520 "sshd-session: ceph-admin@notty"
                 └─user@42477.service
                   └─init.scope
                     ├─93258 /usr/lib/systemd/systemd --user
                     └─93260 "(sd-pam)"

Feb 02 10:24:12 compute-0 systemd[1]: libpod-conmon-848dc93cda9557f239cd4c71f3a2a0105c0319e4ba7e14dc1ba3d399ae689142.scope: Deactivated successfully.
Feb 02 10:24:12 compute-0 systemd[1]: Started libpod-conmon-2a81d223795487067ed66f2d6b051fab157fee0d4360f4f93536cc39825dbbeb.scope.
Feb 02 10:24:12 compute-0 systemd[1]: Started libcrun container.
Feb 02 10:24:13 compute-0 systemd[1]: libpod-2a81d223795487067ed66f2d6b051fab157fee0d4360f4f93536cc39825dbbeb.scope: Deactivated successfully.
Feb 02 10:24:13 compute-0 systemd[1]: libpod-2a81d223795487067ed66f2d6b051fab157fee0d4360f4f93536cc39825dbbeb.scope: Consumed 1.138s CPU time.
Feb 02 10:24:13 compute-0 systemd[1]: var-lib-containers-storage-overlay-96fc210fd1a7d459dcdfa35fe58c22b8659262809c7e28b8d8a865e13479f766-merged.mount: Deactivated successfully.
Feb 02 10:24:13 compute-0 systemd[1]: libpod-conmon-2a81d223795487067ed66f2d6b051fab157fee0d4360f4f93536cc39825dbbeb.scope: Deactivated successfully.
Feb 02 10:24:27 compute-0 systemd[1]: Started Session 59 of User zuul.
Feb 02 10:24:47 compute-0 systemd[1]: Starting Hostname Service...
Feb 02 10:24:47 compute-0 systemd[1]: Started Hostname Service.

● machine.slice - Virtual Machine and Container Slice
     Loaded: loaded (/usr/lib/systemd/system/machine.slice; static)
     Active: active since Mon 2026-02-02 09:37:40 UTC; 47min ago
      Until: Mon 2026-02-02 09:37:40 UTC; 47min ago
       Docs: man:systemd.special(7)
         IO: 315.9M read, 60.1M written
      Tasks: 43
     Memory: 888.6M (peak: 1.1G)
        CPU: 5min 928ms
     CGroup: /machine.slice
             ├─libpod-29bea7eb4976451e3dfdb1e9a3aba30b10e64cbce131bf8d09548b4a224f8ee8.scope
             │ └─container
             │   ├─165361 dumb-init --single-child -- kolla_start
             │   ├─165364 "neutron-ovn-metadata-agent (/usr/bin/python3 /usr/bin/neutron-ovn-metadata-agent)"
             │   ├─165809 "neutron-ovn-metadata-agent (/usr/bin/python3 /usr/bin/neutron-ovn-metadata-agent)"
             │   ├─166028 /usr/bin/python3 /bin/privsep-helper --config-file /etc/neutron/neutron.conf --config-dir /etc/neutron.conf.d --privsep_context neutron.privileged.namespace_cmd --privsep_sock_path /tmp/tmp_26sdv4_/privsep.sock
             │   ├─257524 /usr/bin/python3 /bin/privsep-helper --config-file /etc/neutron/neutron.conf --config-dir /etc/neutron.conf.d --privsep_context neutron.privileged.default --privsep_sock_path /tmp/tmpq3jugzpc/privsep.sock
             │   └─257582 /usr/bin/python3 /bin/privsep-helper --config-file /etc/neutron/neutron.conf --config-dir /etc/neutron.conf.d --privsep_context neutron.privileged.link_cmd --privsep_sock_path /tmp/tmpjv70h06s/privsep.sock
             ├─libpod-46ea9bbd396be6c9cf42e800f6ecffbc7ecf6b0a7dd6f731e2744db95bbe8ea3.scope
             │ └─container
             │   ├─252674 dumb-init --single-child -- kolla_start
             │   ├─252676 /usr/bin/python3 /usr/bin/nova-compute
             │   ├─257381 /usr/bin/python3 /bin/privsep-helper --config-file /etc/nova/nova.conf --config-file /etc/nova/nova-compute.conf --config-dir /etc/nova/nova.conf.d --privsep_context vif_plug_ovs.privsep.vif_plug --privsep_sock_path /tmp/tmpwhhqa15o/privsep.sock
             │   └─258300 /usr/bin/python3 /bin/privsep-helper --config-file /etc/nova/nova.conf --config-file /etc/nova/nova-compute.conf --config-dir /etc/nova/nova.conf.d --privsep_context nova.privsep.sys_admin_pctxt --privsep_sock_path /tmp/tmp683yhkaj/privsep.sock
             └─libpod-e39122d9482da8df802204ab6c35fb7c982874580968140e6f06bdfc8eefae36.scope
               └─container
                 ├─154633 dumb-init --single-child -- kolla_start
                 └─154636 /usr/bin/ovn-controller --pidfile unix:/run/openvswitch/db.sock -p /etc/pki/tls/private/ovndb.key -c /etc/pki/tls/certs/ovndb.crt -C /etc/pki/tls/certs/ovndbca.crt

Feb 02 10:24:11 compute-0 flamboyant_lehmann[290710]:             },
Feb 02 10:24:11 compute-0 flamboyant_lehmann[290710]:             "type": "block",
Feb 02 10:24:11 compute-0 flamboyant_lehmann[290710]:             "vg_name": "ceph_vg0"
Feb 02 10:24:11 compute-0 flamboyant_lehmann[290710]:         }
Feb 02 10:24:11 compute-0 flamboyant_lehmann[290710]:     ]
Feb 02 10:24:11 compute-0 flamboyant_lehmann[290710]: }
Feb 02 10:24:11 compute-0 podman[290719]: 2026-02-02 10:24:11.366853305 +0000 UTC m=+0.036680254 container died 009f6b5509f53e901363d6ca6d2168d2cf02faedc683f9d5c9d28975dc5b3087 (image=quay.io/ceph/ceph@sha256:7c69e59beaeea61ca714e71cb84ff6d5e533db7f1fd84143dd9ba6649a5fd2ec, name=flamboyant_lehmann, CEPH_REF=squid, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=c92aebb279828e9c3c1f5d24613efca272649e62, io.buildah.version=1.40.1, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250325)
Feb 02 10:24:11 compute-0 podman[290719]: 2026-02-02 10:24:11.475121046 +0000 UTC m=+0.144947905 container remove 009f6b5509f53e901363d6ca6d2168d2cf02faedc683f9d5c9d28975dc5b3087 (image=quay.io/ceph/ceph@sha256:7c69e59beaeea61ca714e71cb84ff6d5e533db7f1fd84143dd9ba6649a5fd2ec, name=flamboyant_lehmann, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.40.1, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=squid, CEPH_SHA1=c92aebb279828e9c3c1f5d24613efca272649e62, ceph=True, org.label-schema.build-date=20250325, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2)
Feb 02 10:24:12 compute-0 exciting_lamport[290844]: 167 167
Feb 02 10:24:13 compute-0 peaceful_lewin[290887]: {}

● system-ceph\x2dd241d473\x2d9fcb\x2d5f74\x2db163\x2df1ca4454e7f1.slice - Slice /system/ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1
     Loaded: loaded
     Active: active since Mon 2026-02-02 09:37:43 UTC; 47min ago
      Until: Mon 2026-02-02 09:37:43 UTC; 47min ago
         IO: 225.2M read, 2.3G written
      Tasks: 1017
     Memory: 1.6G (peak: 1.6G)
        CPU: 5min 55.985s
     CGroup: /system.slice/system-ceph\x2dd241d473\x2d9fcb\x2d5f74\x2db163\x2df1ca4454e7f1.slice
             ├─ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1@alertmanager.compute-0.service
             │ ├─libpod-payload-63a3970896ab11bd3cbece1b971a05452b0bd8a5b643f0eac52d5b3639ab19c4
             │ │ ├─104368 /run/podman-init -- /bin/alertmanager --cluster.listen-address=:9094 --web.listen-address=192.168.122.100:9093 --config.file=/etc/alertmanager/alertmanager.yml
             │ │ └─104370 /bin/alertmanager --cluster.listen-address=:9094 --web.listen-address=192.168.122.100:9093 --config.file=/etc/alertmanager/alertmanager.yml
             │ └─runtime
             │   └─104366 /usr/bin/conmon --api-version 1 -c 63a3970896ab11bd3cbece1b971a05452b0bd8a5b643f0eac52d5b3639ab19c4 -u 63a3970896ab11bd3cbece1b971a05452b0bd8a5b643f0eac52d5b3639ab19c4 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/63a3970896ab11bd3cbece1b971a05452b0bd8a5b643f0eac52d5b3639ab19c4/userdata -p /run/containers/storage/overlay-containers/63a3970896ab11bd3cbece1b971a05452b0bd8a5b643f0eac52d5b3639ab19c4/userdata/pidfile -n ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1-alertmanager-compute-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/63a3970896ab11bd3cbece1b971a05452b0bd8a5b643f0eac52d5b3639ab19c4 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/63a3970896ab11bd3cbece1b971a05452b0bd8a5b643f0eac52d5b3639ab19c4/userdata/oci-log --conmon-pidfile /run/ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1@alertmanager.compute-0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 63a3970896ab11bd3cbece1b971a05452b0bd8a5b643f0eac52d5b3639ab19c4
             ├─ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1@crash.compute-0.service
             │ ├─libpod-payload-318ef38b81cae6eaaebf216bc863b04a4bef5216fba3bfba4e81b73ac8904bc2
             │ │ ├─79741 /run/podman-init -- /usr/bin/ceph-crash -n client.crash.compute-0
             │ │ └─79743 /usr/bin/python3 -s /usr/bin/ceph-crash -n client.crash.compute-0
             │ └─runtime
             │   └─79739 /usr/bin/conmon --api-version 1 -c 318ef38b81cae6eaaebf216bc863b04a4bef5216fba3bfba4e81b73ac8904bc2 -u 318ef38b81cae6eaaebf216bc863b04a4bef5216fba3bfba4e81b73ac8904bc2 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/318ef38b81cae6eaaebf216bc863b04a4bef5216fba3bfba4e81b73ac8904bc2/userdata -p /run/containers/storage/overlay-containers/318ef38b81cae6eaaebf216bc863b04a4bef5216fba3bfba4e81b73ac8904bc2/userdata/pidfile -n ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1-crash-compute-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/318ef38b81cae6eaaebf216bc863b04a4bef5216fba3bfba4e81b73ac8904bc2 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/318ef38b81cae6eaaebf216bc863b04a4bef5216fba3bfba4e81b73ac8904bc2/userdata/oci-log --conmon-pidfile /run/ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1@crash.compute-0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 318ef38b81cae6eaaebf216bc863b04a4bef5216fba3bfba4e81b73ac8904bc2
             ├─ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1@grafana.compute-0.service
             │ ├─libpod-payload-207575e5b32cec4e058e0ca4f48bad94c6e447fc9aa765a0aa8117f4604125b2
             │ │ ├─104675 /run/podman-init -- /run.sh
             │ │ └─104677 grafana server --homepath=/usr/share/grafana --config=/etc/grafana/grafana.ini --packaging=docker cfg:default.log.mode=console cfg:default.paths.data=/var/lib/grafana cfg:default.paths.logs=/var/log/grafana cfg:default.paths.plugins=/var/lib/grafana/plugins cfg:default.paths.provisioning=/etc/grafana/provisioning
             │ └─runtime
             │   └─104673 /usr/bin/conmon --api-version 1 -c 207575e5b32cec4e058e0ca4f48bad94c6e447fc9aa765a0aa8117f4604125b2 -u 207575e5b32cec4e058e0ca4f48bad94c6e447fc9aa765a0aa8117f4604125b2 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/207575e5b32cec4e058e0ca4f48bad94c6e447fc9aa765a0aa8117f4604125b2/userdata -p /run/containers/storage/overlay-containers/207575e5b32cec4e058e0ca4f48bad94c6e447fc9aa765a0aa8117f4604125b2/userdata/pidfile -n ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1-grafana-compute-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/207575e5b32cec4e058e0ca4f48bad94c6e447fc9aa765a0aa8117f4604125b2 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/207575e5b32cec4e058e0ca4f48bad94c6e447fc9aa765a0aa8117f4604125b2/userdata/oci-log --conmon-pidfile /run/ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1@grafana.compute-0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 207575e5b32cec4e058e0ca4f48bad94c6e447fc9aa765a0aa8117f4604125b2
             ├─ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1@haproxy.nfs.cephfs.compute-0.ooxkuo.service
             │ ├─libpod-payload-5812768ed72c0881a5b563a239565cde81bb05b6f1e1beebab3f203681cce03e
             │ │ ├─97798 /run/podman-init -- docker-entrypoint.sh haproxy -f /var/lib/haproxy/haproxy.cfg
             │ │ ├─97800 haproxy -W -db -f /var/lib/haproxy/haproxy.cfg
             │ │ └─97802 haproxy -W -db -f /var/lib/haproxy/haproxy.cfg
             │ └─runtime
             │   └─97796 /usr/bin/conmon --api-version 1 -c 5812768ed72c0881a5b563a239565cde81bb05b6f1e1beebab3f203681cce03e -u 5812768ed72c0881a5b563a239565cde81bb05b6f1e1beebab3f203681cce03e -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/5812768ed72c0881a5b563a239565cde81bb05b6f1e1beebab3f203681cce03e/userdata -p /run/containers/storage/overlay-containers/5812768ed72c0881a5b563a239565cde81bb05b6f1e1beebab3f203681cce03e/userdata/pidfile -n ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1-haproxy-nfs-cephfs-compute-0-ooxkuo --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/5812768ed72c0881a5b563a239565cde81bb05b6f1e1beebab3f203681cce03e --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/5812768ed72c0881a5b563a239565cde81bb05b6f1e1beebab3f203681cce03e/userdata/oci-log --conmon-pidfile /run/ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1@haproxy.nfs.cephfs.compute-0.ooxkuo.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 5812768ed72c0881a5b563a239565cde81bb05b6f1e1beebab3f203681cce03e
             ├─ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1@haproxy.rgw.default.compute-0.avekxu.service
             │ ├─libpod-payload-19feecaa7fcd517b2bfb973fc8fcf623ad4b0956f16c53292d24fe12f4780190
             │ │ ├─90405 /run/podman-init -- docker-entrypoint.sh haproxy -f /var/lib/haproxy/haproxy.cfg
             │ │ ├─90407 haproxy -W -db -f /var/lib/haproxy/haproxy.cfg
             │ │ └─90409 haproxy -W -db -f /var/lib/haproxy/haproxy.cfg
             │ └─runtime
             │   └─90403 /usr/bin/conmon --api-version 1 -c 19feecaa7fcd517b2bfb973fc8fcf623ad4b0956f16c53292d24fe12f4780190 -u 19feecaa7fcd517b2bfb973fc8fcf623ad4b0956f16c53292d24fe12f4780190 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/19feecaa7fcd517b2bfb973fc8fcf623ad4b0956f16c53292d24fe12f4780190/userdata -p /run/containers/storage/overlay-containers/19feecaa7fcd517b2bfb973fc8fcf623ad4b0956f16c53292d24fe12f4780190/userdata/pidfile -n ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1-haproxy-rgw-default-compute-0-avekxu --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/19feecaa7fcd517b2bfb973fc8fcf623ad4b0956f16c53292d24fe12f4780190 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/19feecaa7fcd517b2bfb973fc8fcf623ad4b0956f16c53292d24fe12f4780190/userdata/oci-log --conmon-pidfile /run/ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1@haproxy.rgw.default.compute-0.avekxu.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 19feecaa7fcd517b2bfb973fc8fcf623ad4b0956f16c53292d24fe12f4780190
             ├─ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1@keepalived.nfs.cephfs.compute-0.pqolko.service
             │ ├─libpod-payload-5f2bbf7994e4a92479e9d1b19dfd01c3876abee624a9d2019b778a31c38bb173
             │ │ ├─98165 /run/podman-init -- ./init.sh
             │ │ ├─98167 /usr/sbin/keepalived -n -l -f /etc/keepalived/keepalived.conf
             │ │ └─98169 /usr/sbin/keepalived -n -l -f /etc/keepalived/keepalived.conf
             │ └─runtime
             │   └─98163 /usr/bin/conmon --api-version 1 -c 5f2bbf7994e4a92479e9d1b19dfd01c3876abee624a9d2019b778a31c38bb173 -u 5f2bbf7994e4a92479e9d1b19dfd01c3876abee624a9d2019b778a31c38bb173 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/5f2bbf7994e4a92479e9d1b19dfd01c3876abee624a9d2019b778a31c38bb173/userdata -p /run/containers/storage/overlay-containers/5f2bbf7994e4a92479e9d1b19dfd01c3876abee624a9d2019b778a31c38bb173/userdata/pidfile -n ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1-keepalived-nfs-cephfs-compute-0-pqolko --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/5f2bbf7994e4a92479e9d1b19dfd01c3876abee624a9d2019b778a31c38bb173 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/5f2bbf7994e4a92479e9d1b19dfd01c3876abee624a9d2019b778a31c38bb173/userdata/oci-log --conmon-pidfile /run/ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1@keepalived.nfs.cephfs.compute-0.pqolko.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 5f2bbf7994e4a92479e9d1b19dfd01c3876abee624a9d2019b778a31c38bb173
             ├─ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1@keepalived.rgw.default.compute-0.pxmjnp.service
             │ ├─libpod-payload-860a76d66e8eda9eaa418ca2983e711afeb6dee68d75c8d8ff31ac03764f810e
             │ │ ├─99765 /run/podman-init -- ./init.sh
             │ │ ├─99767 /usr/sbin/keepalived -n -l -f /etc/keepalived/keepalived.conf
             │ │ └─99769 /usr/sbin/keepalived -n -l -f /etc/keepalived/keepalived.conf
             │ └─runtime
             │   └─99763 /usr/bin/conmon --api-version 1 -c 860a76d66e8eda9eaa418ca2983e711afeb6dee68d75c8d8ff31ac03764f810e -u 860a76d66e8eda9eaa418ca2983e711afeb6dee68d75c8d8ff31ac03764f810e -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/860a76d66e8eda9eaa418ca2983e711afeb6dee68d75c8d8ff31ac03764f810e/userdata -p /run/containers/storage/overlay-containers/860a76d66e8eda9eaa418ca2983e711afeb6dee68d75c8d8ff31ac03764f810e/userdata/pidfile -n ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1-keepalived-rgw-default-compute-0-pxmjnp --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/860a76d66e8eda9eaa418ca2983e711afeb6dee68d75c8d8ff31ac03764f810e --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/860a76d66e8eda9eaa418ca2983e711afeb6dee68d75c8d8ff31ac03764f810e/userdata/oci-log --conmon-pidfile /run/ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1@keepalived.rgw.default.compute-0.pxmjnp.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 860a76d66e8eda9eaa418ca2983e711afeb6dee68d75c8d8ff31ac03764f810e
             ├─ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1@mds.cephfs.compute-0.clmmzw.service
             │ ├─libpod-payload-7adc20511c464cc8d1a3fc124e35d564818f3952b55827abfa6fd9e805055a10
             │ │ ├─96759 /run/podman-init -- /usr/bin/ceph-mds -n mds.cephfs.compute-0.clmmzw -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ └─96761 /usr/bin/ceph-mds -n mds.cephfs.compute-0.clmmzw -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ └─runtime
             │   └─96757 /usr/bin/conmon --api-version 1 -c 7adc20511c464cc8d1a3fc124e35d564818f3952b55827abfa6fd9e805055a10 -u 7adc20511c464cc8d1a3fc124e35d564818f3952b55827abfa6fd9e805055a10 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/7adc20511c464cc8d1a3fc124e35d564818f3952b55827abfa6fd9e805055a10/userdata -p /run/containers/storage/overlay-containers/7adc20511c464cc8d1a3fc124e35d564818f3952b55827abfa6fd9e805055a10/userdata/pidfile -n ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1-mds-cephfs-compute-0-clmmzw --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/7adc20511c464cc8d1a3fc124e35d564818f3952b55827abfa6fd9e805055a10 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/7adc20511c464cc8d1a3fc124e35d564818f3952b55827abfa6fd9e805055a10/userdata/oci-log --conmon-pidfile /run/ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1@mds.cephfs.compute-0.clmmzw.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 7adc20511c464cc8d1a3fc124e35d564818f3952b55827abfa6fd9e805055a10
             ├─ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1@mgr.compute-0.djvyfo.service
             │ ├─libpod-payload-3dfd19b9ab30bf136f4a18ad3b4a13ee303004a583ad880116709be18eec72dc
             │ │ ├─74783 /run/podman-init -- /usr/bin/ceph-mgr -n mgr.compute-0.djvyfo -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ └─74785 /usr/bin/ceph-mgr -n mgr.compute-0.djvyfo -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ └─runtime
             │   └─74781 /usr/bin/conmon --api-version 1 -c 3dfd19b9ab30bf136f4a18ad3b4a13ee303004a583ad880116709be18eec72dc -u 3dfd19b9ab30bf136f4a18ad3b4a13ee303004a583ad880116709be18eec72dc -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/3dfd19b9ab30bf136f4a18ad3b4a13ee303004a583ad880116709be18eec72dc/userdata -p /run/containers/storage/overlay-containers/3dfd19b9ab30bf136f4a18ad3b4a13ee303004a583ad880116709be18eec72dc/userdata/pidfile -n ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1-mgr-compute-0-djvyfo --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/3dfd19b9ab30bf136f4a18ad3b4a13ee303004a583ad880116709be18eec72dc --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/3dfd19b9ab30bf136f4a18ad3b4a13ee303004a583ad880116709be18eec72dc/userdata/oci-log --conmon-pidfile /run/ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1@mgr.compute-0.djvyfo.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 3dfd19b9ab30bf136f4a18ad3b4a13ee303004a583ad880116709be18eec72dc
             ├─ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1@mon.compute-0.service
             │ ├─libpod-payload-79ef7165b184aa21ab9e464efe33891b6304e1ba848414549f299d1b301d6783
             │ │ ├─74487 /run/podman-init -- /usr/bin/ceph-mon -n mon.compute-0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false
             │ │ └─74489 /usr/bin/ceph-mon -n mon.compute-0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false
             │ └─runtime
             │   └─74485 /usr/bin/conmon --api-version 1 -c 79ef7165b184aa21ab9e464efe33891b6304e1ba848414549f299d1b301d6783 -u 79ef7165b184aa21ab9e464efe33891b6304e1ba848414549f299d1b301d6783 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/79ef7165b184aa21ab9e464efe33891b6304e1ba848414549f299d1b301d6783/userdata -p /run/containers/storage/overlay-containers/79ef7165b184aa21ab9e464efe33891b6304e1ba848414549f299d1b301d6783/userdata/pidfile -n ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1-mon-compute-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/79ef7165b184aa21ab9e464efe33891b6304e1ba848414549f299d1b301d6783 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/79ef7165b184aa21ab9e464efe33891b6304e1ba848414549f299d1b301d6783/userdata/oci-log --conmon-pidfile /run/ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1@mon.compute-0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 79ef7165b184aa21ab9e464efe33891b6304e1ba848414549f299d1b301d6783
             ├─ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1@nfs.cephfs.2.0.compute-0.fdwwab.service
             │ ├─libpod-payload-47ffd521b52a4e817e05a876d9da3b01cdfdfeae11aa3098649e39241d4ffff9
             │ │ ├─270910 /run/podman-init -- /usr/bin/ganesha.nfsd -F -L STDERR -N NIV_EVENT
             │ │ └─270912 /usr/bin/ganesha.nfsd -F -L STDERR -N NIV_EVENT
             │ └─runtime
             │   └─270907 /usr/bin/conmon --api-version 1 -c 47ffd521b52a4e817e05a876d9da3b01cdfdfeae11aa3098649e39241d4ffff9 -u 47ffd521b52a4e817e05a876d9da3b01cdfdfeae11aa3098649e39241d4ffff9 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/47ffd521b52a4e817e05a876d9da3b01cdfdfeae11aa3098649e39241d4ffff9/userdata -p /run/containers/storage/overlay-containers/47ffd521b52a4e817e05a876d9da3b01cdfdfeae11aa3098649e39241d4ffff9/userdata/pidfile -n ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1-nfs-cephfs-2-0-compute-0-fdwwab --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/47ffd521b52a4e817e05a876d9da3b01cdfdfeae11aa3098649e39241d4ffff9 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/47ffd521b52a4e817e05a876d9da3b01cdfdfeae11aa3098649e39241d4ffff9/userdata/oci-log --conmon-pidfile /run/ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1@nfs.cephfs.2.0.compute-0.fdwwab.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 47ffd521b52a4e817e05a876d9da3b01cdfdfeae11aa3098649e39241d4ffff9
             ├─ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1@node-exporter.compute-0.service
             │ ├─libpod-payload-690ade5beb0aa03a99cf3b4b1da5291c57988034f4a9506f786da5a6fb824998
             │ │ ├─104074 /run/podman-init -- /bin/node_exporter --no-collector.timex --web.listen-address=:9100 --path.procfs=/host/proc --path.sysfs=/host/sys --path.rootfs=/rootfs
             │ │ └─104076 /bin/node_exporter --no-collector.timex --web.listen-address=:9100 --path.procfs=/host/proc --path.sysfs=/host/sys --path.rootfs=/rootfs
             │ └─runtime
             │   └─104072 /usr/bin/conmon --api-version 1 -c 690ade5beb0aa03a99cf3b4b1da5291c57988034f4a9506f786da5a6fb824998 -u 690ade5beb0aa03a99cf3b4b1da5291c57988034f4a9506f786da5a6fb824998 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/690ade5beb0aa03a99cf3b4b1da5291c57988034f4a9506f786da5a6fb824998/userdata -p /run/containers/storage/overlay-containers/690ade5beb0aa03a99cf3b4b1da5291c57988034f4a9506f786da5a6fb824998/userdata/pidfile -n ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1-node-exporter-compute-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/690ade5beb0aa03a99cf3b4b1da5291c57988034f4a9506f786da5a6fb824998 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/690ade5beb0aa03a99cf3b4b1da5291c57988034f4a9506f786da5a6fb824998/userdata/oci-log --conmon-pidfile /run/ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1@node-exporter.compute-0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 690ade5beb0aa03a99cf3b4b1da5291c57988034f4a9506f786da5a6fb824998
             ├─ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1@osd.1.service
             │ ├─libpod-payload-4642ed65ea9037166532825913a80a3f5fa996c66d25a8d6ec32643bd7f52763
             │ │ ├─82703 /run/podman-init -- /usr/bin/ceph-osd -n osd.1 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ └─82705 /usr/bin/ceph-osd -n osd.1 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ └─runtime
             │   └─82701 /usr/bin/conmon --api-version 1 -c 4642ed65ea9037166532825913a80a3f5fa996c66d25a8d6ec32643bd7f52763 -u 4642ed65ea9037166532825913a80a3f5fa996c66d25a8d6ec32643bd7f52763 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/4642ed65ea9037166532825913a80a3f5fa996c66d25a8d6ec32643bd7f52763/userdata -p /run/containers/storage/overlay-containers/4642ed65ea9037166532825913a80a3f5fa996c66d25a8d6ec32643bd7f52763/userdata/pidfile -n ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1-osd-1 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/4642ed65ea9037166532825913a80a3f5fa996c66d25a8d6ec32643bd7f52763 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/4642ed65ea9037166532825913a80a3f5fa996c66d25a8d6ec32643bd7f52763/userdata/oci-log --conmon-pidfile /run/ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1@osd.1.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 4642ed65ea9037166532825913a80a3f5fa996c66d25a8d6ec32643bd7f52763
             ├─ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1@prometheus.compute-0.service
             │ ├─libpod-payload-214561532da1ee185d9ab0bf03f5b2d46320266c11cddad43265e8842ed9d667
             │ │ ├─100307 /run/podman-init -- /bin/prometheus --config.file=/etc/prometheus/prometheus.yml --storage.tsdb.path=/prometheus --storage.tsdb.retention.time=15d --storage.tsdb.retention.size=0 --web.external-url=http://compute-0.ctlplane.example.com:9095 --web.listen-address=192.168.122.100:9095
             │ │ └─100309 /bin/prometheus --config.file=/etc/prometheus/prometheus.yml --storage.tsdb.path=/prometheus --storage.tsdb.retention.time=15d --storage.tsdb.retention.size=0 --web.external-url=http://compute-0.ctlplane.example.com:9095 --web.listen-address=192.168.122.100:9095
             │ └─runtime
             │   └─100305 /usr/bin/conmon --api-version 1 -c 214561532da1ee185d9ab0bf03f5b2d46320266c11cddad43265e8842ed9d667 -u 214561532da1ee185d9ab0bf03f5b2d46320266c11cddad43265e8842ed9d667 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/214561532da1ee185d9ab0bf03f5b2d46320266c11cddad43265e8842ed9d667/userdata -p /run/containers/storage/overlay-containers/214561532da1ee185d9ab0bf03f5b2d46320266c11cddad43265e8842ed9d667/userdata/pidfile -n ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1-prometheus-compute-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/214561532da1ee185d9ab0bf03f5b2d46320266c11cddad43265e8842ed9d667 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/214561532da1ee185d9ab0bf03f5b2d46320266c11cddad43265e8842ed9d667/userdata/oci-log --conmon-pidfile /run/ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1@prometheus.compute-0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 214561532da1ee185d9ab0bf03f5b2d46320266c11cddad43265e8842ed9d667
             └─ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1@rgw.rgw.compute-0.vltabo.service
               ├─libpod-payload-2dea2149c1564b5d8c5d4eba925c3597c22745b2ed907d0270e5352d9fbd157f
               │ ├─89250 /run/podman-init -- /usr/bin/radosgw -n client.rgw.rgw.compute-0.vltabo -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
               │ └─89254 /usr/bin/radosgw -n client.rgw.rgw.compute-0.vltabo -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
               └─runtime
                 └─89247 /usr/bin/conmon --api-version 1 -c 2dea2149c1564b5d8c5d4eba925c3597c22745b2ed907d0270e5352d9fbd157f -u 2dea2149c1564b5d8c5d4eba925c3597c22745b2ed907d0270e5352d9fbd157f -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/2dea2149c1564b5d8c5d4eba925c3597c22745b2ed907d0270e5352d9fbd157f/userdata -p /run/containers/storage/overlay-containers/2dea2149c1564b5d8c5d4eba925c3597c22745b2ed907d0270e5352d9fbd157f/userdata/pidfile -n ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1-rgw-rgw-compute-0-vltabo --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/2dea2149c1564b5d8c5d4eba925c3597c22745b2ed907d0270e5352d9fbd157f --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/2dea2149c1564b5d8c5d4eba925c3597c22745b2ed907d0270e5352d9fbd157f/userdata/oci-log --conmon-pidfile /run/ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1@rgw.rgw.compute-0.vltabo.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 2dea2149c1564b5d8c5d4eba925c3597c22745b2ed907d0270e5352d9fbd157f

Feb 02 10:25:06 compute-0 ceph-mgr[74785]: log_channel(cluster) log [DBG] : pgmap v1376: 353 pgs: 353 active+clean; 41 MiB data, 303 MiB used, 60 GiB / 60 GiB avail; 511 B/s rd, 0 op/s
Feb 02 10:25:06 compute-0 ceph-mgr[74785]: log_channel(audit) log [DBG] : from='client.29663 -' entity='client.admin' cmd=[{"prefix": "osd df", "output_method": "tree", "target": ["mon-mgr", ""], "format": "json-pretty"}]: dispatch
Feb 02 10:25:07 compute-0 ceph-mgr[74785]: log_channel(audit) log [DBG] : from='client.29626 -' entity='client.admin' cmd=[{"prefix": "osd blocked-by", "target": ["mon-mgr", ""], "format": "json-pretty"}]: dispatch
Feb 02 10:25:07 compute-0 ceph-mgr[74785]: log_channel(audit) log [DBG] : from='client.29681 -' entity='client.admin' cmd=[{"prefix": "osd df", "target": ["mon-mgr", ""], "format": "json-pretty"}]: dispatch
Feb 02 10:25:07 compute-0 ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1-alertmanager-compute-0[104366]: ts=2026-02-02T10:25:07.274Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=1 err="ceph-dashboard/webhook[2]: notify retry canceled after 2 attempts: Post \"http://compute-2.ctlplane.example.com:8443/api/prometheus_receiver\": context deadline exceeded; ceph-dashboard/webhook[1]: notify retry canceled after 2 attempts: Post \"http://compute-1.ctlplane.example.com:8443/api/prometheus_receiver\": context deadline exceeded"
Feb 02 10:25:07 compute-0 radosgw[89254]: ====== starting new request req=0x7f123bf7e5d0 =====
Feb 02 10:25:07 compute-0 radosgw[89254]: ====== req done req=0x7f123bf7e5d0 op status=0 http_status=200 latency=0.000000000s ======
Feb 02 10:25:07 compute-0 radosgw[89254]: beast: 0x7f123bf7e5d0: 192.168.122.100 - anonymous [02/Feb/2026:10:25:07.408 +0000] "HEAD / HTTP/1.0" 200 0 - - - latency=0.000000000s
Feb 02 10:25:07 compute-0 ceph-mon[74489]: mon.compute-0@0(leader) e3 handle_command mon_command({"prefix": "osd blocklist ls", "format": "json-pretty"} v 0)
Feb 02 10:25:07 compute-0 ceph-mon[74489]: log_channel(audit) log [DBG] : from='client.? 192.168.122.100:0/1657061058' entity='client.admin' cmd=[{"prefix": "osd blocklist ls", "format": "json-pretty"}]: dispatch

● system-dbus\x2d:1.1\x2dorg.fedoraproject.SetroubleshootPrivileged.slice - Slice /system/dbus-:1.1-org.fedoraproject.SetroubleshootPrivileged
     Loaded: loaded
     Active: active since Mon 2026-02-02 09:54:30 UTC; 30min ago
      Until: Mon 2026-02-02 09:54:30 UTC; 30min ago
         IO: 4.0K read, 0B written
      Tasks: 0
     Memory: 8.0K (peak: 58.8M)
        CPU: 1.026s
     CGroup: /system.slice/system-dbus\x2d:1.1\x2dorg.fedoraproject.SetroubleshootPrivileged.slice

Feb 02 09:54:30 compute-0 systemd[1]: Created slice Slice /system/dbus-:1.1-org.fedoraproject.SetroubleshootPrivileged.

● system-getty.slice - Slice /system/getty
     Loaded: loaded
     Active: active since Mon 2026-02-02 09:00:44 UTC; 1h 24min ago
      Until: Mon 2026-02-02 09:00:44 UTC; 1h 24min ago
         IO: 0B read, 0B written
      Tasks: 1
     Memory: 232.0K (peak: 456.0K)
        CPU: 7ms
     CGroup: /system.slice/system-getty.slice
             └─getty@tty1.service
               └─1010 /sbin/agetty -o "-p -- \\u" --noclear - linux

● system-modprobe.slice - Slice /system/modprobe
     Loaded: loaded
     Active: active since Mon 2026-02-02 09:00:42 UTC; 1h 24min ago
      Until: Mon 2026-02-02 09:00:42 UTC; 1h 24min ago
         IO: 0B read, 0B written
      Tasks: 0
     Memory: 52.0K (peak: 12.0M)
        CPU: 154ms
     CGroup: /system.slice/system-modprobe.slice

Feb 02 09:00:42 localhost systemd[1]: Created slice Slice /system/modprobe.

● system-serial\x2dgetty.slice - Slice /system/serial-getty
     Loaded: loaded
     Active: active since Mon 2026-02-02 09:00:44 UTC; 1h 24min ago
      Until: Mon 2026-02-02 09:00:44 UTC; 1h 24min ago
         IO: 0B read, 0B written
      Tasks: 1
     Memory: 316.0K (peak: 572.0K)
        CPU: 8ms
     CGroup: /system.slice/system-serial\x2dgetty.slice
             └─serial-getty@ttyS0.service
               └─1011 /sbin/agetty -o "-p -- \\u" --keep-baud 115200,57600,38400,9600 - vt220

● system-sshd\x2dkeygen.slice - Slice /system/sshd-keygen
     Loaded: loaded
     Active: active since Mon 2026-02-02 09:00:44 UTC; 1h 24min ago
      Until: Mon 2026-02-02 09:00:44 UTC; 1h 24min ago
         IO: 0B read, 0B written
      Tasks: 0
     Memory: 0B (peak: 0B)
        CPU: 0
     CGroup: /system.slice/system-sshd\x2dkeygen.slice

● system-systemd\x2dcoredump.slice - Slice /system/systemd-coredump
     Loaded: loaded
     Active: active since Mon 2026-02-02 09:45:02 UTC; 40min ago
      Until: Mon 2026-02-02 09:45:02 UTC; 40min ago
         IO: 1.1M read, 1.3G written
      Tasks: 0
     Memory: 4.8M (peak: 464.4M)
        CPU: 11.704s
     CGroup: /system.slice/system-systemd\x2dcoredump.slice

Feb 02 09:50:56 compute-0 systemd-coredump[166480]: Process 164003 (ganesha.nfsd) of user 0 dumped core.
                                                    
                                                    Stack trace of thread 42:
                                                    #0  0x00007f9a01a8932e n/a (/usr/lib64/libntirpc.so.5.8 + 0x2232e)
                                                    ELF object binary architecture: AMD x86-64
Feb 02 09:51:43 compute-0 systemd-coredump[173074]: Process 168354 (ganesha.nfsd) of user 0 dumped core.
                                                    
                                                    Stack trace of thread 45:
                                                    #0  0x00007f277f18532e n/a (/usr/lib64/libntirpc.so.5.8 + 0x2232e)
                                                    ELF object binary architecture: AMD x86-64
Feb 02 09:52:22 compute-0 systemd-coredump[175718]: Process 173945 (ganesha.nfsd) of user 0 dumped core.
                                                    
                                                    Stack trace of thread 43:
                                                    #0  0x00007f557d42c32e n/a (/usr/lib64/libntirpc.so.5.8 + 0x2232e)
                                                    ELF object binary architecture: AMD x86-64
Feb 02 09:53:33 compute-0 systemd-coredump[205756]: Process 184987 (ganesha.nfsd) of user 0 dumped core.
                                                    
                                                    Stack trace of thread 41:
                                                    #0  0x00007fb66ee3b32e n/a (/usr/lib64/libntirpc.so.5.8 + 0x2232e)
                                                    ELF object binary architecture: AMD x86-64
Feb 02 09:54:05 compute-0 systemd-coredump[213244]: Process 208392 (ganesha.nfsd) of user 0 dumped core.
                                                    
                                                    Stack trace of thread 41:
                                                    #0  0x00007f795683032e n/a (/usr/lib64/libntirpc.so.5.8 + 0x2232e)
                                                    ELF object binary architecture: AMD x86-64
Feb 02 09:55:57 compute-0 systemd-coredump[232235]: Process 216307 (ganesha.nfsd) of user 0 dumped core.
                                                    
                                                    Stack trace of thread 58:
                                                    #0  0x00007fc3aae2f32e n/a (/usr/lib64/libntirpc.so.5.8 + 0x2232e)
                                                    ELF object binary architecture: AMD x86-64
Feb 02 10:03:39 compute-0 systemd-coredump[258626]: Process 234639 (ganesha.nfsd) of user 0 dumped core.
                                                    
                                                    Stack trace of thread 75:
                                                    #0  0x00007f212ace632e n/a (/usr/lib64/libntirpc.so.5.8 + 0x2232e)
                                                    ELF object binary architecture: AMD x86-64
Feb 02 10:04:19 compute-0 systemd-coredump[260015]: Process 258787 (ganesha.nfsd) of user 0 dumped core.
                                                    
                                                    Stack trace of thread 46:
                                                    #0  0x00007f58af33d32e n/a (/usr/lib64/libntirpc.so.5.8 + 0x2232e)
                                                    ELF object binary architecture: AMD x86-64
Feb 02 10:07:37 compute-0 systemd-coredump[265060]: Process 260466 (ganesha.nfsd) of user 0 dumped core.
                                                    
                                                    Stack trace of thread 63:
                                                    #0  0x00007f24afd3532e n/a (/usr/lib64/libntirpc.so.5.8 + 0x2232e)
                                                    ELF object binary architecture: AMD x86-64
Feb 02 10:11:44 compute-0 systemd-coredump[270555]: Process 265216 (ganesha.nfsd) of user 0 dumped core.
                                                    
                                                    Stack trace of thread 67:
                                                    #0  0x00007f282686b32e n/a (/usr/lib64/libntirpc.so.5.8 + 0x2232e)
                                                    ELF object binary architecture: AMD x86-64

● system.slice - System Slice
     Loaded: loaded
     Active: active since Mon 2026-02-02 09:00:41 UTC; 1h 24min ago
      Until: Mon 2026-02-02 09:00:41 UTC; 1h 24min ago
       Docs: man:systemd.special(7)
         IO: 372.1M read, 3.7G written
      Tasks: 1129
     Memory: 2.2G (peak: 2.6G)
        CPU: 10min 36.219s
     CGroup: /system.slice
             ├─NetworkManager.service
             │ └─49024 /usr/sbin/NetworkManager --no-daemon
             ├─auditd.service
             │ ├─701 /sbin/auditd
             │ └─703 /usr/sbin/sedispatch
             ├─chronyd.service
             │ └─58590 /usr/sbin/chronyd -F 2
             ├─crond.service
             │ └─1009 /usr/sbin/crond -n
             ├─dbus-broker.service
             │ ├─772 /usr/bin/dbus-broker-launch --scope system --audit
             │ └─780 dbus-broker --log 4 --controller 9 --machine-id bf0bc0bb03de29b24cba1cc9599cf5d0 --max-bytes 536870912 --max-fds 4096 --max-matches 131072 --audit
             ├─edpm_nova_compute.service
             │ └─252672 /usr/bin/conmon --api-version 1 -c 46ea9bbd396be6c9cf42e800f6ecffbc7ecf6b0a7dd6f731e2744db95bbe8ea3 -u 46ea9bbd396be6c9cf42e800f6ecffbc7ecf6b0a7dd6f731e2744db95bbe8ea3 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/46ea9bbd396be6c9cf42e800f6ecffbc7ecf6b0a7dd6f731e2744db95bbe8ea3/userdata -p /run/containers/storage/overlay-containers/46ea9bbd396be6c9cf42e800f6ecffbc7ecf6b0a7dd6f731e2744db95bbe8ea3/userdata/pidfile -n nova_compute --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/46ea9bbd396be6c9cf42e800f6ecffbc7ecf6b0a7dd6f731e2744db95bbe8ea3 --full-attach -s -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/46ea9bbd396be6c9cf42e800f6ecffbc7ecf6b0a7dd6f731e2744db95bbe8ea3/userdata/oci-log --conmon-pidfile /run/nova_compute.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg 46ea9bbd396be6c9cf42e800f6ecffbc7ecf6b0a7dd6f731e2744db95bbe8ea3
             ├─edpm_ovn_controller.service
             │ └─154631 /usr/bin/conmon --api-version 1 -c e39122d9482da8df802204ab6c35fb7c982874580968140e6f06bdfc8eefae36 -u e39122d9482da8df802204ab6c35fb7c982874580968140e6f06bdfc8eefae36 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/e39122d9482da8df802204ab6c35fb7c982874580968140e6f06bdfc8eefae36/userdata -p /run/containers/storage/overlay-containers/e39122d9482da8df802204ab6c35fb7c982874580968140e6f06bdfc8eefae36/userdata/pidfile -n ovn_controller --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/e39122d9482da8df802204ab6c35fb7c982874580968140e6f06bdfc8eefae36 --full-attach -s -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/e39122d9482da8df802204ab6c35fb7c982874580968140e6f06bdfc8eefae36/userdata/oci-log --conmon-pidfile /run/ovn_controller.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg e39122d9482da8df802204ab6c35fb7c982874580968140e6f06bdfc8eefae36
             ├─edpm_ovn_metadata_agent.service
             │ └─165359 /usr/bin/conmon --api-version 1 -c 29bea7eb4976451e3dfdb1e9a3aba30b10e64cbce131bf8d09548b4a224f8ee8 -u 29bea7eb4976451e3dfdb1e9a3aba30b10e64cbce131bf8d09548b4a224f8ee8 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/29bea7eb4976451e3dfdb1e9a3aba30b10e64cbce131bf8d09548b4a224f8ee8/userdata -p /run/containers/storage/overlay-containers/29bea7eb4976451e3dfdb1e9a3aba30b10e64cbce131bf8d09548b4a224f8ee8/userdata/pidfile -n ovn_metadata_agent --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/29bea7eb4976451e3dfdb1e9a3aba30b10e64cbce131bf8d09548b4a224f8ee8 --full-attach -s -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/29bea7eb4976451e3dfdb1e9a3aba30b10e64cbce131bf8d09548b4a224f8ee8/userdata/oci-log --conmon-pidfile /run/ovn_metadata_agent.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg 29bea7eb4976451e3dfdb1e9a3aba30b10e64cbce131bf8d09548b4a224f8ee8
             ├─gssproxy.service
             │ └─879 /usr/sbin/gssproxy -D
             ├─irqbalance.service
             │ └─788 /usr/sbin/irqbalance
             ├─iscsid.service
             │ └─236646 /usr/sbin/iscsid -f
             ├─multipathd.service
             │ └─236888 /sbin/multipathd -d -s
             ├─ovs-vswitchd.service
             │ └─47331 ovs-vswitchd unix:/var/run/openvswitch/db.sock -vconsole:emer -vsyslog:err -vfile:info --mlockall --user openvswitch:hugetlbfs --no-chdir --log-file=/var/log/openvswitch/ovs-vswitchd.log --pidfile=/var/run/openvswitch/ovs-vswitchd.pid --detach
             ├─ovsdb-server.service
             │ └─47250 ovsdb-server /etc/openvswitch/conf.db -vconsole:emer -vsyslog:err -vfile:info --remote=punix:/var/run/openvswitch/db.sock --private-key=db:Open_vSwitch,SSL,private_key --certificate=db:Open_vSwitch,SSL,certificate --bootstrap-ca-cert=db:Open_vSwitch,SSL,ca_cert --user openvswitch:hugetlbfs --no-chdir --log-file=/var/log/openvswitch/ovsdb-server.log --pidfile=/var/run/openvswitch/ovsdb-server.pid --detach
             ├─polkit.service
             │ └─43515 /usr/lib/polkit-1/polkitd --no-debug
             ├─rpcbind.service
             │ └─699 /usr/bin/rpcbind -w -f
             ├─rsyslog.service
             │ └─1005 /usr/sbin/rsyslogd -n
             ├─sshd.service
             │ └─192944 "sshd: /usr/sbin/sshd -D [listener] 0 of 10-100 startups"
             ├─system-ceph\x2dd241d473\x2d9fcb\x2d5f74\x2db163\x2df1ca4454e7f1.slice
             │ ├─ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1@alertmanager.compute-0.service
             │ │ ├─libpod-payload-63a3970896ab11bd3cbece1b971a05452b0bd8a5b643f0eac52d5b3639ab19c4
             │ │ │ ├─104368 /run/podman-init -- /bin/alertmanager --cluster.listen-address=:9094 --web.listen-address=192.168.122.100:9093 --config.file=/etc/alertmanager/alertmanager.yml
             │ │ │ └─104370 /bin/alertmanager --cluster.listen-address=:9094 --web.listen-address=192.168.122.100:9093 --config.file=/etc/alertmanager/alertmanager.yml
             │ │ └─runtime
             │ │   └─104366 /usr/bin/conmon --api-version 1 -c 63a3970896ab11bd3cbece1b971a05452b0bd8a5b643f0eac52d5b3639ab19c4 -u 63a3970896ab11bd3cbece1b971a05452b0bd8a5b643f0eac52d5b3639ab19c4 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/63a3970896ab11bd3cbece1b971a05452b0bd8a5b643f0eac52d5b3639ab19c4/userdata -p /run/containers/storage/overlay-containers/63a3970896ab11bd3cbece1b971a05452b0bd8a5b643f0eac52d5b3639ab19c4/userdata/pidfile -n ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1-alertmanager-compute-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/63a3970896ab11bd3cbece1b971a05452b0bd8a5b643f0eac52d5b3639ab19c4 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/63a3970896ab11bd3cbece1b971a05452b0bd8a5b643f0eac52d5b3639ab19c4/userdata/oci-log --conmon-pidfile /run/ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1@alertmanager.compute-0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 63a3970896ab11bd3cbece1b971a05452b0bd8a5b643f0eac52d5b3639ab19c4
             │ ├─ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1@crash.compute-0.service
             │ │ ├─libpod-payload-318ef38b81cae6eaaebf216bc863b04a4bef5216fba3bfba4e81b73ac8904bc2
             │ │ │ ├─79741 /run/podman-init -- /usr/bin/ceph-crash -n client.crash.compute-0
             │ │ │ └─79743 /usr/bin/python3 -s /usr/bin/ceph-crash -n client.crash.compute-0
             │ │ └─runtime
             │ │   └─79739 /usr/bin/conmon --api-version 1 -c 318ef38b81cae6eaaebf216bc863b04a4bef5216fba3bfba4e81b73ac8904bc2 -u 318ef38b81cae6eaaebf216bc863b04a4bef5216fba3bfba4e81b73ac8904bc2 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/318ef38b81cae6eaaebf216bc863b04a4bef5216fba3bfba4e81b73ac8904bc2/userdata -p /run/containers/storage/overlay-containers/318ef38b81cae6eaaebf216bc863b04a4bef5216fba3bfba4e81b73ac8904bc2/userdata/pidfile -n ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1-crash-compute-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/318ef38b81cae6eaaebf216bc863b04a4bef5216fba3bfba4e81b73ac8904bc2 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/318ef38b81cae6eaaebf216bc863b04a4bef5216fba3bfba4e81b73ac8904bc2/userdata/oci-log --conmon-pidfile /run/ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1@crash.compute-0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 318ef38b81cae6eaaebf216bc863b04a4bef5216fba3bfba4e81b73ac8904bc2
             │ ├─ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1@grafana.compute-0.service
             │ │ ├─libpod-payload-207575e5b32cec4e058e0ca4f48bad94c6e447fc9aa765a0aa8117f4604125b2
             │ │ │ ├─104675 /run/podman-init -- /run.sh
             │ │ │ └─104677 grafana server --homepath=/usr/share/grafana --config=/etc/grafana/grafana.ini --packaging=docker cfg:default.log.mode=console cfg:default.paths.data=/var/lib/grafana cfg:default.paths.logs=/var/log/grafana cfg:default.paths.plugins=/var/lib/grafana/plugins cfg:default.paths.provisioning=/etc/grafana/provisioning
             │ │ └─runtime
             │ │   └─104673 /usr/bin/conmon --api-version 1 -c 207575e5b32cec4e058e0ca4f48bad94c6e447fc9aa765a0aa8117f4604125b2 -u 207575e5b32cec4e058e0ca4f48bad94c6e447fc9aa765a0aa8117f4604125b2 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/207575e5b32cec4e058e0ca4f48bad94c6e447fc9aa765a0aa8117f4604125b2/userdata -p /run/containers/storage/overlay-containers/207575e5b32cec4e058e0ca4f48bad94c6e447fc9aa765a0aa8117f4604125b2/userdata/pidfile -n ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1-grafana-compute-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/207575e5b32cec4e058e0ca4f48bad94c6e447fc9aa765a0aa8117f4604125b2 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/207575e5b32cec4e058e0ca4f48bad94c6e447fc9aa765a0aa8117f4604125b2/userdata/oci-log --conmon-pidfile /run/ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1@grafana.compute-0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 207575e5b32cec4e058e0ca4f48bad94c6e447fc9aa765a0aa8117f4604125b2
             │ ├─ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1@haproxy.nfs.cephfs.compute-0.ooxkuo.service
             │ │ ├─libpod-payload-5812768ed72c0881a5b563a239565cde81bb05b6f1e1beebab3f203681cce03e
             │ │ │ ├─97798 /run/podman-init -- docker-entrypoint.sh haproxy -f /var/lib/haproxy/haproxy.cfg
             │ │ │ ├─97800 haproxy -W -db -f /var/lib/haproxy/haproxy.cfg
             │ │ │ └─97802 haproxy -W -db -f /var/lib/haproxy/haproxy.cfg
             │ │ └─runtime
             │ │   └─97796 /usr/bin/conmon --api-version 1 -c 5812768ed72c0881a5b563a239565cde81bb05b6f1e1beebab3f203681cce03e -u 5812768ed72c0881a5b563a239565cde81bb05b6f1e1beebab3f203681cce03e -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/5812768ed72c0881a5b563a239565cde81bb05b6f1e1beebab3f203681cce03e/userdata -p /run/containers/storage/overlay-containers/5812768ed72c0881a5b563a239565cde81bb05b6f1e1beebab3f203681cce03e/userdata/pidfile -n ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1-haproxy-nfs-cephfs-compute-0-ooxkuo --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/5812768ed72c0881a5b563a239565cde81bb05b6f1e1beebab3f203681cce03e --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/5812768ed72c0881a5b563a239565cde81bb05b6f1e1beebab3f203681cce03e/userdata/oci-log --conmon-pidfile /run/ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1@haproxy.nfs.cephfs.compute-0.ooxkuo.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 5812768ed72c0881a5b563a239565cde81bb05b6f1e1beebab3f203681cce03e
             │ ├─ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1@haproxy.rgw.default.compute-0.avekxu.service
             │ │ ├─libpod-payload-19feecaa7fcd517b2bfb973fc8fcf623ad4b0956f16c53292d24fe12f4780190
             │ │ │ ├─90405 /run/podman-init -- docker-entrypoint.sh haproxy -f /var/lib/haproxy/haproxy.cfg
             │ │ │ ├─90407 haproxy -W -db -f /var/lib/haproxy/haproxy.cfg
             │ │ │ └─90409 haproxy -W -db -f /var/lib/haproxy/haproxy.cfg
             │ │ └─runtime
             │ │   └─90403 /usr/bin/conmon --api-version 1 -c 19feecaa7fcd517b2bfb973fc8fcf623ad4b0956f16c53292d24fe12f4780190 -u 19feecaa7fcd517b2bfb973fc8fcf623ad4b0956f16c53292d24fe12f4780190 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/19feecaa7fcd517b2bfb973fc8fcf623ad4b0956f16c53292d24fe12f4780190/userdata -p /run/containers/storage/overlay-containers/19feecaa7fcd517b2bfb973fc8fcf623ad4b0956f16c53292d24fe12f4780190/userdata/pidfile -n ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1-haproxy-rgw-default-compute-0-avekxu --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/19feecaa7fcd517b2bfb973fc8fcf623ad4b0956f16c53292d24fe12f4780190 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/19feecaa7fcd517b2bfb973fc8fcf623ad4b0956f16c53292d24fe12f4780190/userdata/oci-log --conmon-pidfile /run/ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1@haproxy.rgw.default.compute-0.avekxu.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 19feecaa7fcd517b2bfb973fc8fcf623ad4b0956f16c53292d24fe12f4780190
             │ ├─ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1@keepalived.nfs.cephfs.compute-0.pqolko.service
             │ │ ├─libpod-payload-5f2bbf7994e4a92479e9d1b19dfd01c3876abee624a9d2019b778a31c38bb173
             │ │ │ ├─98165 /run/podman-init -- ./init.sh
             │ │ │ ├─98167 /usr/sbin/keepalived -n -l -f /etc/keepalived/keepalived.conf
             │ │ │ └─98169 /usr/sbin/keepalived -n -l -f /etc/keepalived/keepalived.conf
             │ │ └─runtime
             │ │   └─98163 /usr/bin/conmon --api-version 1 -c 5f2bbf7994e4a92479e9d1b19dfd01c3876abee624a9d2019b778a31c38bb173 -u 5f2bbf7994e4a92479e9d1b19dfd01c3876abee624a9d2019b778a31c38bb173 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/5f2bbf7994e4a92479e9d1b19dfd01c3876abee624a9d2019b778a31c38bb173/userdata -p /run/containers/storage/overlay-containers/5f2bbf7994e4a92479e9d1b19dfd01c3876abee624a9d2019b778a31c38bb173/userdata/pidfile -n ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1-keepalived-nfs-cephfs-compute-0-pqolko --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/5f2bbf7994e4a92479e9d1b19dfd01c3876abee624a9d2019b778a31c38bb173 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/5f2bbf7994e4a92479e9d1b19dfd01c3876abee624a9d2019b778a31c38bb173/userdata/oci-log --conmon-pidfile /run/ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1@keepalived.nfs.cephfs.compute-0.pqolko.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 5f2bbf7994e4a92479e9d1b19dfd01c3876abee624a9d2019b778a31c38bb173
             │ ├─ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1@keepalived.rgw.default.compute-0.pxmjnp.service
             │ │ ├─libpod-payload-860a76d66e8eda9eaa418ca2983e711afeb6dee68d75c8d8ff31ac03764f810e
             │ │ │ ├─99765 /run/podman-init -- ./init.sh
             │ │ │ ├─99767 /usr/sbin/keepalived -n -l -f /etc/keepalived/keepalived.conf
             │ │ │ └─99769 /usr/sbin/keepalived -n -l -f /etc/keepalived/keepalived.conf
             │ │ └─runtime
             │ │   └─99763 /usr/bin/conmon --api-version 1 -c 860a76d66e8eda9eaa418ca2983e711afeb6dee68d75c8d8ff31ac03764f810e -u 860a76d66e8eda9eaa418ca2983e711afeb6dee68d75c8d8ff31ac03764f810e -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/860a76d66e8eda9eaa418ca2983e711afeb6dee68d75c8d8ff31ac03764f810e/userdata -p /run/containers/storage/overlay-containers/860a76d66e8eda9eaa418ca2983e711afeb6dee68d75c8d8ff31ac03764f810e/userdata/pidfile -n ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1-keepalived-rgw-default-compute-0-pxmjnp --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/860a76d66e8eda9eaa418ca2983e711afeb6dee68d75c8d8ff31ac03764f810e --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/860a76d66e8eda9eaa418ca2983e711afeb6dee68d75c8d8ff31ac03764f810e/userdata/oci-log --conmon-pidfile /run/ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1@keepalived.rgw.default.compute-0.pxmjnp.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 860a76d66e8eda9eaa418ca2983e711afeb6dee68d75c8d8ff31ac03764f810e
             │ ├─ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1@mds.cephfs.compute-0.clmmzw.service
             │ │ ├─libpod-payload-7adc20511c464cc8d1a3fc124e35d564818f3952b55827abfa6fd9e805055a10
             │ │ │ ├─96759 /run/podman-init -- /usr/bin/ceph-mds -n mds.cephfs.compute-0.clmmzw -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ │ └─96761 /usr/bin/ceph-mds -n mds.cephfs.compute-0.clmmzw -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ └─runtime
             │ │   └─96757 /usr/bin/conmon --api-version 1 -c 7adc20511c464cc8d1a3fc124e35d564818f3952b55827abfa6fd9e805055a10 -u 7adc20511c464cc8d1a3fc124e35d564818f3952b55827abfa6fd9e805055a10 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/7adc20511c464cc8d1a3fc124e35d564818f3952b55827abfa6fd9e805055a10/userdata -p /run/containers/storage/overlay-containers/7adc20511c464cc8d1a3fc124e35d564818f3952b55827abfa6fd9e805055a10/userdata/pidfile -n ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1-mds-cephfs-compute-0-clmmzw --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/7adc20511c464cc8d1a3fc124e35d564818f3952b55827abfa6fd9e805055a10 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/7adc20511c464cc8d1a3fc124e35d564818f3952b55827abfa6fd9e805055a10/userdata/oci-log --conmon-pidfile /run/ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1@mds.cephfs.compute-0.clmmzw.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 7adc20511c464cc8d1a3fc124e35d564818f3952b55827abfa6fd9e805055a10
             │ ├─ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1@mgr.compute-0.djvyfo.service
             │ │ ├─libpod-payload-3dfd19b9ab30bf136f4a18ad3b4a13ee303004a583ad880116709be18eec72dc
             │ │ │ ├─74783 /run/podman-init -- /usr/bin/ceph-mgr -n mgr.compute-0.djvyfo -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ │ └─74785 /usr/bin/ceph-mgr -n mgr.compute-0.djvyfo -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ └─runtime
             │ │   └─74781 /usr/bin/conmon --api-version 1 -c 3dfd19b9ab30bf136f4a18ad3b4a13ee303004a583ad880116709be18eec72dc -u 3dfd19b9ab30bf136f4a18ad3b4a13ee303004a583ad880116709be18eec72dc -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/3dfd19b9ab30bf136f4a18ad3b4a13ee303004a583ad880116709be18eec72dc/userdata -p /run/containers/storage/overlay-containers/3dfd19b9ab30bf136f4a18ad3b4a13ee303004a583ad880116709be18eec72dc/userdata/pidfile -n ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1-mgr-compute-0-djvyfo --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/3dfd19b9ab30bf136f4a18ad3b4a13ee303004a583ad880116709be18eec72dc --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/3dfd19b9ab30bf136f4a18ad3b4a13ee303004a583ad880116709be18eec72dc/userdata/oci-log --conmon-pidfile /run/ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1@mgr.compute-0.djvyfo.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 3dfd19b9ab30bf136f4a18ad3b4a13ee303004a583ad880116709be18eec72dc
             │ ├─ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1@mon.compute-0.service
             │ │ ├─libpod-payload-79ef7165b184aa21ab9e464efe33891b6304e1ba848414549f299d1b301d6783
             │ │ │ ├─74487 /run/podman-init -- /usr/bin/ceph-mon -n mon.compute-0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false
             │ │ │ └─74489 /usr/bin/ceph-mon -n mon.compute-0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false
             │ │ └─runtime
             │ │   └─74485 /usr/bin/conmon --api-version 1 -c 79ef7165b184aa21ab9e464efe33891b6304e1ba848414549f299d1b301d6783 -u 79ef7165b184aa21ab9e464efe33891b6304e1ba848414549f299d1b301d6783 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/79ef7165b184aa21ab9e464efe33891b6304e1ba848414549f299d1b301d6783/userdata -p /run/containers/storage/overlay-containers/79ef7165b184aa21ab9e464efe33891b6304e1ba848414549f299d1b301d6783/userdata/pidfile -n ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1-mon-compute-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/79ef7165b184aa21ab9e464efe33891b6304e1ba848414549f299d1b301d6783 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/79ef7165b184aa21ab9e464efe33891b6304e1ba848414549f299d1b301d6783/userdata/oci-log --conmon-pidfile /run/ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1@mon.compute-0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 79ef7165b184aa21ab9e464efe33891b6304e1ba848414549f299d1b301d6783
             │ ├─ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1@nfs.cephfs.2.0.compute-0.fdwwab.service
             │ │ ├─libpod-payload-47ffd521b52a4e817e05a876d9da3b01cdfdfeae11aa3098649e39241d4ffff9
             │ │ │ ├─270910 /run/podman-init -- /usr/bin/ganesha.nfsd -F -L STDERR -N NIV_EVENT
             │ │ │ └─270912 /usr/bin/ganesha.nfsd -F -L STDERR -N NIV_EVENT
             │ │ └─runtime
             │ │   └─270907 /usr/bin/conmon --api-version 1 -c 47ffd521b52a4e817e05a876d9da3b01cdfdfeae11aa3098649e39241d4ffff9 -u 47ffd521b52a4e817e05a876d9da3b01cdfdfeae11aa3098649e39241d4ffff9 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/47ffd521b52a4e817e05a876d9da3b01cdfdfeae11aa3098649e39241d4ffff9/userdata -p /run/containers/storage/overlay-containers/47ffd521b52a4e817e05a876d9da3b01cdfdfeae11aa3098649e39241d4ffff9/userdata/pidfile -n ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1-nfs-cephfs-2-0-compute-0-fdwwab --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/47ffd521b52a4e817e05a876d9da3b01cdfdfeae11aa3098649e39241d4ffff9 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/47ffd521b52a4e817e05a876d9da3b01cdfdfeae11aa3098649e39241d4ffff9/userdata/oci-log --conmon-pidfile /run/ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1@nfs.cephfs.2.0.compute-0.fdwwab.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 47ffd521b52a4e817e05a876d9da3b01cdfdfeae11aa3098649e39241d4ffff9
             │ ├─ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1@node-exporter.compute-0.service
             │ │ ├─libpod-payload-690ade5beb0aa03a99cf3b4b1da5291c57988034f4a9506f786da5a6fb824998
             │ │ │ ├─104074 /run/podman-init -- /bin/node_exporter --no-collector.timex --web.listen-address=:9100 --path.procfs=/host/proc --path.sysfs=/host/sys --path.rootfs=/rootfs
             │ │ │ └─104076 /bin/node_exporter --no-collector.timex --web.listen-address=:9100 --path.procfs=/host/proc --path.sysfs=/host/sys --path.rootfs=/rootfs
             │ │ └─runtime
             │ │   └─104072 /usr/bin/conmon --api-version 1 -c 690ade5beb0aa03a99cf3b4b1da5291c57988034f4a9506f786da5a6fb824998 -u 690ade5beb0aa03a99cf3b4b1da5291c57988034f4a9506f786da5a6fb824998 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/690ade5beb0aa03a99cf3b4b1da5291c57988034f4a9506f786da5a6fb824998/userdata -p /run/containers/storage/overlay-containers/690ade5beb0aa03a99cf3b4b1da5291c57988034f4a9506f786da5a6fb824998/userdata/pidfile -n ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1-node-exporter-compute-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/690ade5beb0aa03a99cf3b4b1da5291c57988034f4a9506f786da5a6fb824998 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/690ade5beb0aa03a99cf3b4b1da5291c57988034f4a9506f786da5a6fb824998/userdata/oci-log --conmon-pidfile /run/ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1@node-exporter.compute-0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 690ade5beb0aa03a99cf3b4b1da5291c57988034f4a9506f786da5a6fb824998
             │ ├─ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1@osd.1.service
             │ │ ├─libpod-payload-4642ed65ea9037166532825913a80a3f5fa996c66d25a8d6ec32643bd7f52763
             │ │ │ ├─82703 /run/podman-init -- /usr/bin/ceph-osd -n osd.1 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ │ └─82705 /usr/bin/ceph-osd -n osd.1 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ └─runtime
             │ │   └─82701 /usr/bin/conmon --api-version 1 -c 4642ed65ea9037166532825913a80a3f5fa996c66d25a8d6ec32643bd7f52763 -u 4642ed65ea9037166532825913a80a3f5fa996c66d25a8d6ec32643bd7f52763 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/4642ed65ea9037166532825913a80a3f5fa996c66d25a8d6ec32643bd7f52763/userdata -p /run/containers/storage/overlay-containers/4642ed65ea9037166532825913a80a3f5fa996c66d25a8d6ec32643bd7f52763/userdata/pidfile -n ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1-osd-1 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/4642ed65ea9037166532825913a80a3f5fa996c66d25a8d6ec32643bd7f52763 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/4642ed65ea9037166532825913a80a3f5fa996c66d25a8d6ec32643bd7f52763/userdata/oci-log --conmon-pidfile /run/ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1@osd.1.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 4642ed65ea9037166532825913a80a3f5fa996c66d25a8d6ec32643bd7f52763
             │ ├─ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1@prometheus.compute-0.service
             │ │ ├─libpod-payload-214561532da1ee185d9ab0bf03f5b2d46320266c11cddad43265e8842ed9d667
             │ │ │ ├─100307 /run/podman-init -- /bin/prometheus --config.file=/etc/prometheus/prometheus.yml --storage.tsdb.path=/prometheus --storage.tsdb.retention.time=15d --storage.tsdb.retention.size=0 --web.external-url=http://compute-0.ctlplane.example.com:9095 --web.listen-address=192.168.122.100:9095
             │ │ │ └─100309 /bin/prometheus --config.file=/etc/prometheus/prometheus.yml --storage.tsdb.path=/prometheus --storage.tsdb.retention.time=15d --storage.tsdb.retention.size=0 --web.external-url=http://compute-0.ctlplane.example.com:9095 --web.listen-address=192.168.122.100:9095
             │ │ └─runtime
             │ │   └─100305 /usr/bin/conmon --api-version 1 -c 214561532da1ee185d9ab0bf03f5b2d46320266c11cddad43265e8842ed9d667 -u 214561532da1ee185d9ab0bf03f5b2d46320266c11cddad43265e8842ed9d667 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/214561532da1ee185d9ab0bf03f5b2d46320266c11cddad43265e8842ed9d667/userdata -p /run/containers/storage/overlay-containers/214561532da1ee185d9ab0bf03f5b2d46320266c11cddad43265e8842ed9d667/userdata/pidfile -n ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1-prometheus-compute-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/214561532da1ee185d9ab0bf03f5b2d46320266c11cddad43265e8842ed9d667 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/214561532da1ee185d9ab0bf03f5b2d46320266c11cddad43265e8842ed9d667/userdata/oci-log --conmon-pidfile /run/ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1@prometheus.compute-0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 214561532da1ee185d9ab0bf03f5b2d46320266c11cddad43265e8842ed9d667
             │ └─ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1@rgw.rgw.compute-0.vltabo.service
             │   ├─libpod-payload-2dea2149c1564b5d8c5d4eba925c3597c22745b2ed907d0270e5352d9fbd157f
             │   │ ├─89250 /run/podman-init -- /usr/bin/radosgw -n client.rgw.rgw.compute-0.vltabo -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │   │ └─89254 /usr/bin/radosgw -n client.rgw.rgw.compute-0.vltabo -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │   └─runtime
             │     └─89247 /usr/bin/conmon --api-version 1 -c 2dea2149c1564b5d8c5d4eba925c3597c22745b2ed907d0270e5352d9fbd157f -u 2dea2149c1564b5d8c5d4eba925c3597c22745b2ed907d0270e5352d9fbd157f -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/2dea2149c1564b5d8c5d4eba925c3597c22745b2ed907d0270e5352d9fbd157f/userdata -p /run/containers/storage/overlay-containers/2dea2149c1564b5d8c5d4eba925c3597c22745b2ed907d0270e5352d9fbd157f/userdata/pidfile -n ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1-rgw-rgw-compute-0-vltabo --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/2dea2149c1564b5d8c5d4eba925c3597c22745b2ed907d0270e5352d9fbd157f --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/2dea2149c1564b5d8c5d4eba925c3597c22745b2ed907d0270e5352d9fbd157f/userdata/oci-log --conmon-pidfile /run/ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1@rgw.rgw.compute-0.vltabo.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 2dea2149c1564b5d8c5d4eba925c3597c22745b2ed907d0270e5352d9fbd157f
             ├─system-getty.slice
             │ └─getty@tty1.service
             │   └─1010 /sbin/agetty -o "-p -- \\u" --noclear - linux
             ├─system-serial\x2dgetty.slice
             │ └─serial-getty@ttyS0.service
             │   └─1011 /sbin/agetty -o "-p -- \\u" --keep-baud 115200,57600,38400,9600 - vt220
             ├─systemd-hostnamed.service
             │ └─293546 /usr/lib/systemd/systemd-hostnamed
             ├─systemd-journald.service
             │ └─677 /usr/lib/systemd/systemd-journald
             ├─systemd-logind.service
             │ └─793 /usr/lib/systemd/systemd-logind
             ├─systemd-machined.service
             │ └─219024 /usr/lib/systemd/systemd-machined
             ├─systemd-udevd.service
             │ └─udev
             │   └─729 /usr/lib/systemd/systemd-udevd
             ├─tuned.service
             │ └─113383 /usr/bin/python3 -Es /usr/sbin/tuned -l -P
             ├─virtlogd.service
             │ └─218379 /usr/sbin/virtlogd
             ├─virtnodedevd.service
             │ └─252999 /usr/sbin/virtnodedevd --timeout 120
             └─virtqemud.service
               └─252362 /usr/sbin/virtqemud --timeout 120

Feb 02 10:24:48 compute-0 nova_compute[252672]: 2026-02-02 10:24:48.169 252676 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263[00m
Feb 02 10:24:48 compute-0 nova_compute[252672]: 2026-02-02 10:24:48.719 252676 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263[00m
Feb 02 10:24:53 compute-0 nova_compute[252672]: 2026-02-02 10:24:53.173 252676 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263[00m
Feb 02 10:24:53 compute-0 podman[294442]: 2026-02-02 10:24:53.375128539 +0000 UTC m=+0.089392181 container health_status e39122d9482da8df802204ab6c35fb7c982874580968140e6f06bdfc8eefae36 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller@sha256:099d88ae13fa2b3409da5310cdcba7fa01d2c87a8bc98296299a57054b9a075e, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, config_id=ovn_controller, org.label-schema.vendor=CentOS, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'EDPM_CONFIG_HASH': 'db4758ee7523fe447444c4bd2b867b543b1eee4e3bbcf6676cd1b27bf6147d86-65dc6d8b666d074a9e865f271939acafedbf905c57c15ae47c3f2766afb95121-65dc6d8b666d074a9e865f271939acafedbf905c57c15ae47c3f2766afb95121-65dc6d8b666d074a9e865f271939acafedbf905c57c15ae47c3f2766afb95121'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller@sha256:099d88ae13fa2b3409da5310cdcba7fa01d2c87a8bc98296299a57054b9a075e', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, org.label-schema.schema-version=1.0, io.buildah.version=1.41.3, org.label-schema.build-date=20260127, tcib_managed=true, container_name=ovn_controller, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, tcib_build_tag=b85d0548925081ae8c6bdd697658cec4)
Feb 02 10:24:53 compute-0 nova_compute[252672]: 2026-02-02 10:24:53.721 252676 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263[00m
Feb 02 10:24:58 compute-0 nova_compute[252672]: 2026-02-02 10:24:58.213 252676 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263[00m
Feb 02 10:24:58 compute-0 nova_compute[252672]: 2026-02-02 10:24:58.724 252676 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263[00m
Feb 02 10:25:01 compute-0 podman[296714]: 2026-02-02 10:25:01.343555891 +0000 UTC m=+0.055312007 container health_status 29bea7eb4976451e3dfdb1e9a3aba30b10e64cbce131bf8d09548b4a224f8ee8 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn@sha256:7c305a77ab65247f0dc2ea1616c427b173cb95f37bb37e34c631d9615a73d2cc, name=ovn_metadata_agent, health_status=healthy, health_failing_streak=0, health_log=, tcib_build_tag=b85d0548925081ae8c6bdd697658cec4, config_id=ovn_metadata_agent, maintainer=OpenStack Kubernetes Operator team, config_data={'cgroupns': 'host', 'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'EDPM_CONFIG_HASH': 'db4758ee7523fe447444c4bd2b867b543b1eee4e3bbcf6676cd1b27bf6147d86-65dc6d8b666d074a9e865f271939acafedbf905c57c15ae47c3f2766afb95121-65dc6d8b666d074a9e865f271939acafedbf905c57c15ae47c3f2766afb95121-65dc6d8b666d074a9e865f271939acafedbf905c57c15ae47c3f2766afb95121-0823bd3e096c75f72e4a95820d41b0d4b6a1172bd2892ddb9f29b788a11bc87d'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_metadata_agent', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn@sha256:7c305a77ab65247f0dc2ea1616c427b173cb95f37bb37e34c631d9615a73d2cc', 'net': 'host', 'pid': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/run/openvswitch:/run/openvswitch:z', '/var/lib/openstack/neutron-ovn-metadata-agent:/etc/neutron.conf.d:z', '/run/netns:/run/netns:shared', '/var/lib/kolla/config_files/ovn_metadata_agent.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/neutron:/var/lib/neutron:shared,z', '/var/lib/neutron/ovn_metadata_haproxy_wrapper:/usr/local/bin/haproxy:ro', '/var/lib/neutron/kill_scripts:/etc/neutron/kill_scripts:ro', '/var/lib/openstack/cacerts/neutron-metadata/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_metadata_agent:/openstack:ro,z']}, container_name=ovn_metadata_agent, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, managed_by=edpm_ansible, org.label-schema.build-date=20260127, tcib_managed=true, io.buildah.version=1.41.3, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0)
Feb 02 10:25:03 compute-0 nova_compute[252672]: 2026-02-02 10:25:03.217 252676 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263[00m
Feb 02 10:25:03 compute-0 nova_compute[252672]: 2026-02-02 10:25:03.726 252676 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 24 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263[00m

● user-1000.slice - User Slice of UID 1000
     Loaded: loaded
    Drop-In: /usr/lib/systemd/system/user-.slice.d
             └─10-defaults.conf
     Active: active since Mon 2026-02-02 09:01:14 UTC; 1h 23min ago
      Until: Mon 2026-02-02 09:01:14 UTC; 1h 23min ago
       Docs: man:user@.service(5)
         IO: 638.8M read, 8.2G written
      Tasks: 25 (limit: 20031)
     Memory: 3.1G (peak: 4.2G)
        CPU: 22min 41.656s
     CGroup: /user.slice/user-1000.slice
             ├─session-1.scope
             │ └─4532 /usr/bin/python3
             ├─session-59.scope
             │ ├─291112 "sshd-session: zuul [priv]"
             │ ├─291116 "sshd-session: zuul@notty"
             │ ├─291117 sudo bash -c "rm -rf /var/tmp/sos-osp && mkdir /var/tmp/sos-osp && sos report --batch --all-logs --tmp-dir=/var/tmp/sos-osp  -p container,openstack_edpm,system,storage,virt"
             │ ├─291141 /usr/bin/python3 -s /sbin/sos report --batch --all-logs --tmp-dir=/var/tmp/sos-osp -p container,openstack_edpm,system,storage,virt
             │ ├─297141 timeout 15s turbostat --debug sleep 10
             │ ├─297545 timeout 300s systemctl status --all
             │ ├─297547 systemctl status --all
             │ ├─297591 timeout 300s semanage boolean -l
             │ ├─297592 /usr/bin/python3 -EsI /usr/sbin/semanage boolean -l
             │ ├─297595 timeout 300s ceph osd df tree --format json-pretty
             │ └─297596 /usr/bin/python3 -s /usr/bin/ceph osd df tree --format json-pretty
             └─user@1000.service
               ├─app.slice
               │ └─dbus-broker.service
               │   ├─13129 /usr/bin/dbus-broker-launch --scope user
               │   └─13141 dbus-broker --log 4 --controller 9 --machine-id bf0bc0bb03de29b24cba1cc9599cf5d0 --max-bytes 100000000000000 --max-fds 25000000000000 --max-matches 5000000000
               ├─init.scope
               │ ├─4320 /usr/lib/systemd/systemd --user
               │ └─4322 "(sd-pam)"
               └─user.slice
                 └─podman-pause-663ee478.scope
                   └─13025 catatonit -P

Feb 02 10:15:46 compute-0 sshd-session[282329]: Disconnected from user zuul 192.168.122.10 port 51192
Feb 02 10:15:47 compute-0 sudo[282359]:     zuul : PWD=/home/zuul ; USER=root ; COMMAND=/bin/rm -rf /var/tmp/sos-osp
Feb 02 10:15:47 compute-0 sudo[282359]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Feb 02 10:15:47 compute-0 sudo[282359]: pam_unix(sudo:session): session closed for user root
Feb 02 10:15:47 compute-0 sshd-session[282358]: Received disconnect from 192.168.122.10 port 51194:11: disconnected by user
Feb 02 10:15:47 compute-0 sshd-session[282358]: Disconnected from user zuul 192.168.122.10 port 51194
Feb 02 10:24:27 compute-0 sudo[291117]:     zuul : PWD=/home/zuul ; USER=root ; COMMAND=/bin/bash -c 'rm -rf /var/tmp/sos-osp && mkdir /var/tmp/sos-osp && sos report --batch --all-logs --tmp-dir=/var/tmp/sos-osp  -p container,openstack_edpm,system,storage,virt'
Feb 02 10:24:27 compute-0 sudo[291117]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Feb 02 10:24:43 compute-0 crontab[293123]: (root) LIST (root)
Feb 02 10:24:59 compute-0 ovs-appctl[295930]: ovs|00001|daemon_unix|WARN|/var/run/openvswitch/ovs-monitor-ipsec.pid: open: No such file or directory

● user-42477.slice - User Slice of UID 42477
     Loaded: loaded
    Drop-In: /usr/lib/systemd/system/user-.slice.d
             └─10-defaults.conf
     Active: active since Mon 2026-02-02 09:40:48 UTC; 44min ago
      Until: Mon 2026-02-02 09:40:48 UTC; 44min ago
       Docs: man:user@.service(5)
         IO: 28.0K read, 1.0G written
      Tasks: 4 (limit: 20031)
     Memory: 226.2M (peak: 1.1G)
        CPU: 4min 8.293s
     CGroup: /user.slice/user-42477.slice
             ├─session-37.scope
             │ ├─100495 "sshd-session: ceph-admin [priv]"
             │ └─100520 "sshd-session: ceph-admin@notty"
             └─user@42477.service
               └─init.scope
                 ├─93258 /usr/lib/systemd/systemd --user
                 └─93260 "(sd-pam)"

Feb 02 10:24:13 compute-0 sudo[290977]: pam_unix(sudo:session): session closed for user root
Feb 02 10:24:23 compute-0 sudo[291083]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Feb 02 10:24:23 compute-0 sudo[291083]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Feb 02 10:24:23 compute-0 sudo[291083]: pam_unix(sudo:session): session closed for user root
Feb 02 10:24:43 compute-0 sudo[293064]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Feb 02 10:24:43 compute-0 sudo[293064]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Feb 02 10:24:43 compute-0 sudo[293064]: pam_unix(sudo:session): session closed for user root
Feb 02 10:25:03 compute-0 sudo[297283]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Feb 02 10:25:03 compute-0 sudo[297283]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Feb 02 10:25:03 compute-0 sudo[297283]: pam_unix(sudo:session): session closed for user root

● user.slice - User and Session Slice
     Loaded: loaded (/usr/lib/systemd/system/user.slice; static)
     Active: active since Mon 2026-02-02 09:00:44 UTC; 1h 24min ago
      Until: Mon 2026-02-02 09:00:44 UTC; 1h 24min ago
       Docs: man:systemd.special(7)
         IO: 638.9M read, 9.4G written
      Tasks: 29
     Memory: 3.4G (peak: 4.9G)
        CPU: 27min 25.892s
     CGroup: /user.slice
             ├─user-1000.slice
             │ ├─session-1.scope
             │ │ └─4532 /usr/bin/python3
             │ ├─session-59.scope
             │ │ ├─291112 "sshd-session: zuul [priv]"
             │ │ ├─291116 "sshd-session: zuul@notty"
             │ │ ├─291117 sudo bash -c "rm -rf /var/tmp/sos-osp && mkdir /var/tmp/sos-osp && sos report --batch --all-logs --tmp-dir=/var/tmp/sos-osp  -p container,openstack_edpm,system,storage,virt"
             │ │ ├─291141 /usr/bin/python3 -s /sbin/sos report --batch --all-logs --tmp-dir=/var/tmp/sos-osp -p container,openstack_edpm,system,storage,virt
             │ │ ├─297141 timeout 15s turbostat --debug sleep 10
             │ │ ├─297545 timeout 300s systemctl status --all
             │ │ ├─297547 systemctl status --all
             │ │ ├─297591 timeout 300s semanage boolean -l
             │ │ ├─297592 /usr/bin/python3 -EsI /usr/sbin/semanage boolean -l
             │ │ ├─297595 timeout 300s ceph osd df tree --format json-pretty
             │ │ └─297596 /usr/bin/python3 -s /usr/bin/ceph osd df tree --format json-pretty
             │ └─user@1000.service
             │   ├─app.slice
             │   │ └─dbus-broker.service
             │   │   ├─13129 /usr/bin/dbus-broker-launch --scope user
             │   │   └─13141 dbus-broker --log 4 --controller 9 --machine-id bf0bc0bb03de29b24cba1cc9599cf5d0 --max-bytes 100000000000000 --max-fds 25000000000000 --max-matches 5000000000
             │   ├─init.scope
             │   │ ├─4320 /usr/lib/systemd/systemd --user
             │   │ └─4322 "(sd-pam)"
             │   └─user.slice
             │     └─podman-pause-663ee478.scope
             │       └─13025 catatonit -P
             └─user-42477.slice
               ├─session-37.scope
               │ ├─100495 "sshd-session: ceph-admin [priv]"
               │ └─100520 "sshd-session: ceph-admin@notty"
               └─user@42477.service
                 └─init.scope
                   ├─93258 /usr/lib/systemd/systemd --user
                   └─93260 "(sd-pam)"

● dbus.socket - D-Bus System Message Bus Socket
     Loaded: loaded (/usr/lib/systemd/system/dbus.socket; enabled; preset: enabled)
     Active: active (running) since Mon 2026-02-02 09:00:45 UTC; 1h 24min ago
      Until: Mon 2026-02-02 09:00:45 UTC; 1h 24min ago
   Triggers: ● dbus-broker.service
     Listen: /run/dbus/system_bus_socket (Stream)
     CGroup: /system.slice/dbus.socket

Feb 02 09:00:45 localhost systemd[1]: Listening on D-Bus System Message Bus Socket.

● dm-event.socket - Device-mapper event daemon FIFOs
     Loaded: loaded (/usr/lib/systemd/system/dm-event.socket; enabled; preset: enabled)
     Active: active (listening) since Mon 2026-02-02 09:31:14 UTC; 53min ago
      Until: Mon 2026-02-02 09:31:14 UTC; 53min ago
   Triggers: ● dm-event.service
       Docs: man:dmeventd(8)
     Listen: /run/dmeventd-server (FIFO)
             /run/dmeventd-client (FIFO)
     CGroup: /system.slice/dm-event.socket

Feb 02 09:31:14 compute-0 systemd[1]: Listening on Device-mapper event daemon FIFOs.

● iscsid.socket - Open-iSCSI iscsid Socket
     Loaded: loaded (/usr/lib/systemd/system/iscsid.socket; enabled; preset: enabled)
     Active: active (running) since Mon 2026-02-02 09:55:37 UTC; 29min ago
      Until: Mon 2026-02-02 09:55:37 UTC; 29min ago
   Triggers: ● iscsid.service
       Docs: man:iscsid(8)
             man:iscsiadm(8)
     Listen: @ISCSIADM_ABSTRACT_NAMESPACE (Stream)
     CGroup: /system.slice/iscsid.socket

Feb 02 09:55:37 compute-0 systemd[1]: Listening on Open-iSCSI iscsid Socket.

○ iscsiuio.socket - Open-iSCSI iscsiuio Socket
     Loaded: loaded (/usr/lib/systemd/system/iscsiuio.socket; enabled; preset: enabled)
     Active: inactive (dead)
   Triggers: ● iscsiuio.service
       Docs: man:iscsiuio(8)
     Listen: @ISCSID_UIP_ABSTRACT_NAMESPACE (Stream)

○ libvirtd-admin.socket - libvirt legacy monolithic daemon admin socket
     Loaded: loaded (/usr/lib/systemd/system/libvirtd-admin.socket; disabled; preset: disabled)
     Active: inactive (dead)
   Triggers: ● libvirtd.service
     Listen: /run/libvirt/libvirt-admin-sock (Stream)

○ libvirtd-ro.socket - libvirt legacy monolithic daemon read-only socket
     Loaded: loaded (/usr/lib/systemd/system/libvirtd-ro.socket; disabled; preset: disabled)
     Active: inactive (dead)
   Triggers: ● libvirtd.service
     Listen: /run/libvirt/libvirt-sock-ro (Stream)

○ libvirtd-tls.socket
     Loaded: masked (Reason: Unit libvirtd-tls.socket is masked.)
     Active: inactive (dead)

○ libvirtd.socket - libvirt legacy monolithic daemon socket
     Loaded: loaded (/usr/lib/systemd/system/libvirtd.socket; disabled; preset: disabled)
     Active: inactive (dead)
   Triggers: ● libvirtd.service
     Listen: /run/libvirt/libvirt-sock (Stream)

● lvm2-lvmpolld.socket - LVM2 poll daemon socket
     Loaded: loaded (/usr/lib/systemd/system/lvm2-lvmpolld.socket; enabled; preset: enabled)
     Active: active (listening) since Mon 2026-02-02 09:31:15 UTC; 53min ago
      Until: Mon 2026-02-02 09:31:15 UTC; 53min ago
   Triggers: ● lvm2-lvmpolld.service
       Docs: man:lvmpolld(8)
     Listen: /run/lvm/lvmpolld.socket (Stream)
     CGroup: /system.slice/lvm2-lvmpolld.socket

Feb 02 09:31:15 compute-0 systemd[1]: Listening on LVM2 poll daemon socket.

● multipathd.socket - multipathd control socket
     Loaded: loaded (/usr/lib/systemd/system/multipathd.socket; enabled; preset: disabled)
     Active: active (running) since Mon 2026-02-02 09:56:04 UTC; 29min ago
      Until: Mon 2026-02-02 09:56:04 UTC; 29min ago
   Triggers: ● multipathd.service
     Listen: @/org/kernel/linux/storage/multipathd (Stream)
             /run/multipathd.socket (Stream)
     CGroup: /system.slice/multipathd.socket

Feb 02 09:56:04 compute-0 systemd[1]: Listening on multipathd control socket.

● rpcbind.socket - RPCbind Server Activation Socket
     Loaded: loaded (/usr/lib/systemd/system/rpcbind.socket; enabled; preset: enabled)
     Active: active (running) since Mon 2026-02-02 09:00:45 UTC; 1h 24min ago
      Until: Mon 2026-02-02 09:00:45 UTC; 1h 24min ago
   Triggers: ● rpcbind.service
     Listen: /run/rpcbind.sock (Stream)
             0.0.0.0:111 (Stream)
             0.0.0.0:111 (Datagram)
             [::]:111 (Stream)
             [::]:111 (Datagram)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48560)
     Memory: 16.0K (peak: 288.0K)
        CPU: 4ms
     CGroup: /system.slice/rpcbind.socket

● sssd-kcm.socket - SSSD Kerberos Cache Manager responder socket
     Loaded: loaded (/usr/lib/systemd/system/sssd-kcm.socket; enabled; preset: enabled)
     Active: active (listening) since Mon 2026-02-02 09:00:45 UTC; 1h 24min ago
      Until: Mon 2026-02-02 09:00:45 UTC; 1h 24min ago
   Triggers: ● sssd-kcm.service
       Docs: man:sssd-kcm(8)
     Listen: /run/.heim_org.h5l.kcm-socket (Stream)
     CGroup: /system.slice/sssd-kcm.socket

Feb 02 09:00:45 localhost systemd[1]: Listening on SSSD Kerberos Cache Manager responder socket.

○ syslog.socket - Syslog Socket
     Loaded: loaded (/usr/lib/systemd/system/syslog.socket; static)
     Active: inactive (dead)
   Triggers: ● syslog.service
       Docs: man:systemd.special(7)
             https://www.freedesktop.org/wiki/Software/systemd/syslog
     Listen: /run/systemd/journal/syslog (Datagram)

● systemd-coredump.socket - Process Core Dump Socket
     Loaded: loaded (/usr/lib/systemd/system/systemd-coredump.socket; static)
     Active: active (listening) since Mon 2026-02-02 09:00:45 UTC; 1h 24min ago
      Until: Mon 2026-02-02 09:00:45 UTC; 1h 24min ago
       Docs: man:systemd-coredump(8)
     Listen: /run/systemd/coredump (SequentialPacket)
   Accepted: 13; Connected: 0;
     CGroup: /system.slice/systemd-coredump.socket

● systemd-initctl.socket - initctl Compatibility Named Pipe
     Loaded: loaded (/usr/lib/systemd/system/systemd-initctl.socket; static)
     Active: active (listening) since Mon 2026-02-02 09:00:45 UTC; 1h 24min ago
      Until: Mon 2026-02-02 09:00:45 UTC; 1h 24min ago
   Triggers: ● systemd-initctl.service
       Docs: man:systemd-initctl.socket(8)
     Listen: /run/initctl (FIFO)
     CGroup: /system.slice/systemd-initctl.socket

● systemd-journald-dev-log.socket - Journal Socket (/dev/log)
     Loaded: loaded (/usr/lib/systemd/system/systemd-journald-dev-log.socket; static)
     Active: active (running) since Mon 2026-02-02 09:00:41 UTC; 1h 24min ago
      Until: Mon 2026-02-02 09:00:41 UTC; 1h 24min ago
   Triggers: ● systemd-journald.service
       Docs: man:systemd-journald.service(8)
             man:journald.conf(5)
     Listen: /run/systemd/journal/dev-log (Datagram)
     CGroup: /system.slice/systemd-journald-dev-log.socket

Notice: journal has been rotated since unit was started, output may be incomplete.

● systemd-journald.socket - Journal Socket
     Loaded: loaded (/usr/lib/systemd/system/systemd-journald.socket; static)
     Active: active (running) since Mon 2026-02-02 09:00:41 UTC; 1h 24min ago
      Until: Mon 2026-02-02 09:00:41 UTC; 1h 24min ago
   Triggers: ● systemd-journald.service
       Docs: man:systemd-journald.service(8)
             man:journald.conf(5)
     Listen: /run/systemd/journal/socket (Datagram)
             /run/systemd/journal/stdout (Stream)
     CGroup: /system.slice/systemd-journald.socket

Notice: journal has been rotated since unit was started, output may be incomplete.

● systemd-rfkill.socket - Load/Save RF Kill Switch Status /dev/rfkill Watch
     Loaded: loaded (/usr/lib/systemd/system/systemd-rfkill.socket; static)
     Active: active (listening) since Mon 2026-02-02 09:00:47 UTC; 1h 24min ago
      Until: Mon 2026-02-02 09:00:47 UTC; 1h 24min ago
   Triggers: ● systemd-rfkill.service
       Docs: man:systemd-rfkill.socket(8)
     Listen: /dev/rfkill (Special)
     CGroup: /system.slice/systemd-rfkill.socket

Feb 02 09:00:47 np0005604790.novalocal systemd[1]: Listening on Load/Save RF Kill Switch Status /dev/rfkill Watch.

● systemd-udevd-control.socket - udev Control Socket
     Loaded: loaded (/usr/lib/systemd/system/systemd-udevd-control.socket; static)
     Active: active (running) since Mon 2026-02-02 09:00:45 UTC; 1h 24min ago
      Until: Mon 2026-02-02 09:00:45 UTC; 1h 24min ago
   Triggers: ● systemd-udevd.service
       Docs: man:systemd-udevd-control.socket(8)
             man:udev(7)
     Listen: /run/udev/control (SequentialPacket)
     CGroup: /system.slice/systemd-udevd-control.socket

● systemd-udevd-kernel.socket - udev Kernel Socket
     Loaded: loaded (/usr/lib/systemd/system/systemd-udevd-kernel.socket; static)
     Active: active (running) since Mon 2026-02-02 09:00:45 UTC; 1h 24min ago
      Until: Mon 2026-02-02 09:00:45 UTC; 1h 24min ago
   Triggers: ● systemd-udevd.service
       Docs: man:systemd-udevd-kernel.socket(8)
             man:udev(7)
     Listen: kobject-uevent 1 (Netlink)
     CGroup: /system.slice/systemd-udevd-kernel.socket

○ virtinterfaced-admin.socket - libvirt interface daemon admin socket
     Loaded: loaded (/usr/lib/systemd/system/virtinterfaced-admin.socket; enabled; preset: enabled)
     Active: inactive (dead)
   Triggers: ● virtinterfaced.service
     Listen: /run/libvirt/virtinterfaced-admin-sock (Stream)

○ virtinterfaced-ro.socket - libvirt interface daemon read-only socket
     Loaded: loaded (/usr/lib/systemd/system/virtinterfaced-ro.socket; enabled; preset: enabled)
     Active: inactive (dead)
   Triggers: ● virtinterfaced.service
     Listen: /run/libvirt/virtinterfaced-sock-ro (Stream)

○ virtinterfaced.socket - libvirt interface daemon socket
     Loaded: loaded (/usr/lib/systemd/system/virtinterfaced.socket; enabled; preset: enabled)
     Active: inactive (dead)
   Triggers: ● virtinterfaced.service
     Listen: /run/libvirt/virtinterfaced-sock (Stream)

○ virtlockd-admin.socket - libvirt locking daemon admin socket
     Loaded: loaded (/usr/lib/systemd/system/virtlockd-admin.socket; enabled; preset: enabled)
     Active: inactive (dead)
   Triggers: ● virtlockd.service
     Listen: /run/libvirt/virtlockd-admin-sock (Stream)

● virtlockd.socket - libvirt locking daemon socket
     Loaded: loaded (/usr/lib/systemd/system/virtlockd.socket; enabled; preset: enabled)
     Active: active (listening) since Mon 2026-02-02 09:54:31 UTC; 30min ago
      Until: Mon 2026-02-02 09:54:31 UTC; 30min ago
   Triggers: ● virtlockd.service
     Listen: /run/libvirt/virtlockd-sock (Stream)
     CGroup: /system.slice/virtlockd.socket

Feb 02 09:54:31 compute-0 systemd[1]: Listening on libvirt locking daemon socket.

● virtlogd-admin.socket - libvirt logging daemon admin socket
     Loaded: loaded (/usr/lib/systemd/system/virtlogd-admin.socket; enabled; preset: enabled)
    Drop-In: /etc/systemd/system/virtlogd-admin.socket.d
             └─override.conf
     Active: active (running) since Mon 2026-02-02 09:54:28 UTC; 30min ago
      Until: Mon 2026-02-02 09:54:28 UTC; 30min ago
   Triggers: ● virtlogd.service
     Listen: /run/libvirt/virtlogd-admin-sock (Stream)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48560)
     Memory: 0B (peak: 512.0K)
        CPU: 3ms
     CGroup: /system.slice/virtlogd-admin.socket

Feb 02 09:54:28 compute-0 systemd[1]: Starting libvirt logging daemon admin socket...
Feb 02 09:54:28 compute-0 systemd[1]: Listening on libvirt logging daemon admin socket.

● virtlogd.socket - libvirt logging daemon socket
     Loaded: loaded (/usr/lib/systemd/system/virtlogd.socket; enabled; preset: enabled)
    Drop-In: /etc/systemd/system/virtlogd.socket.d
             └─override.conf
     Active: active (running) since Mon 2026-02-02 09:54:28 UTC; 30min ago
      Until: Mon 2026-02-02 09:54:28 UTC; 30min ago
   Triggers: ● virtlogd.service
     Listen: /run/libvirt/virtlogd-sock (Stream)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48560)
     Memory: 0B (peak: 512.0K)
        CPU: 3ms
     CGroup: /system.slice/virtlogd.socket

Feb 02 09:54:28 compute-0 systemd[1]: Starting libvirt logging daemon socket...
Feb 02 09:54:28 compute-0 systemd[1]: Listening on libvirt logging daemon socket.

○ virtnetworkd-admin.socket - libvirt network daemon admin socket
     Loaded: loaded (/usr/lib/systemd/system/virtnetworkd-admin.socket; enabled; preset: enabled)
     Active: inactive (dead)
   Triggers: ● virtnetworkd.service
     Listen: /run/libvirt/virtnetworkd-admin-sock (Stream)

○ virtnetworkd-ro.socket - libvirt network daemon read-only socket
     Loaded: loaded (/usr/lib/systemd/system/virtnetworkd-ro.socket; enabled; preset: enabled)
     Active: inactive (dead)
   Triggers: ● virtnetworkd.service
     Listen: /run/libvirt/virtnetworkd-sock-ro (Stream)

○ virtnetworkd.socket - libvirt network daemon socket
     Loaded: loaded (/usr/lib/systemd/system/virtnetworkd.socket; enabled; preset: enabled)
     Active: inactive (dead)
   Triggers: ● virtnetworkd.service
     Listen: /run/libvirt/virtnetworkd-sock (Stream)

● virtnodedevd-admin.socket - libvirt nodedev daemon admin socket
     Loaded: loaded (/usr/lib/systemd/system/virtnodedevd-admin.socket; enabled; preset: enabled)
    Drop-In: /etc/systemd/system/virtnodedevd-admin.socket.d
             └─override.conf
     Active: active (running) since Mon 2026-02-02 09:54:29 UTC; 30min ago
      Until: Mon 2026-02-02 09:54:29 UTC; 30min ago
   Triggers: ● virtnodedevd.service
     Listen: /run/libvirt/virtnodedevd-admin-sock (Stream)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48560)
     Memory: 4.0K (peak: 512.0K)
        CPU: 5ms
     CGroup: /system.slice/virtnodedevd-admin.socket

Feb 02 09:54:29 compute-0 systemd[1]: Starting libvirt nodedev daemon admin socket...
Feb 02 09:54:29 compute-0 systemd[1]: Listening on libvirt nodedev daemon admin socket.

● virtnodedevd-ro.socket - libvirt nodedev daemon read-only socket
     Loaded: loaded (/usr/lib/systemd/system/virtnodedevd-ro.socket; enabled; preset: enabled)
    Drop-In: /etc/systemd/system/virtnodedevd-ro.socket.d
             └─override.conf
     Active: active (running) since Mon 2026-02-02 09:54:29 UTC; 30min ago
      Until: Mon 2026-02-02 09:54:29 UTC; 30min ago
   Triggers: ● virtnodedevd.service
     Listen: /run/libvirt/virtnodedevd-sock-ro (Stream)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48560)
     Memory: 0B (peak: 512.0K)
        CPU: 3ms
     CGroup: /system.slice/virtnodedevd-ro.socket

Feb 02 09:54:29 compute-0 systemd[1]: Starting libvirt nodedev daemon read-only socket...
Feb 02 09:54:29 compute-0 systemd[1]: Listening on libvirt nodedev daemon read-only socket.

● virtnodedevd.socket - libvirt nodedev daemon socket
     Loaded: loaded (/usr/lib/systemd/system/virtnodedevd.socket; enabled; preset: enabled)
    Drop-In: /etc/systemd/system/virtnodedevd.socket.d
             └─override.conf
     Active: active (running) since Mon 2026-02-02 09:54:29 UTC; 30min ago
      Until: Mon 2026-02-02 09:54:29 UTC; 30min ago
   Triggers: ● virtnodedevd.service
     Listen: /run/libvirt/virtnodedevd-sock (Stream)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48560)
     Memory: 0B (peak: 512.0K)
        CPU: 2ms
     CGroup: /system.slice/virtnodedevd.socket

Feb 02 09:54:29 compute-0 systemd[1]: Starting libvirt nodedev daemon socket...
Feb 02 09:54:29 compute-0 systemd[1]: Listening on libvirt nodedev daemon socket.

○ virtnwfilterd-admin.socket - libvirt nwfilter daemon admin socket
     Loaded: loaded (/usr/lib/systemd/system/virtnwfilterd-admin.socket; enabled; preset: enabled)
     Active: inactive (dead)
   Triggers: ● virtnwfilterd.service
     Listen: /run/libvirt/virtnwfilterd-admin-sock (Stream)

○ virtnwfilterd-ro.socket - libvirt nwfilter daemon read-only socket
     Loaded: loaded (/usr/lib/systemd/system/virtnwfilterd-ro.socket; enabled; preset: enabled)
     Active: inactive (dead)
   Triggers: ● virtnwfilterd.service
     Listen: /run/libvirt/virtnwfilterd-sock-ro (Stream)

○ virtnwfilterd.socket - libvirt nwfilter daemon socket
     Loaded: loaded (/usr/lib/systemd/system/virtnwfilterd.socket; enabled; preset: enabled)
     Active: inactive (dead)
   Triggers: ● virtnwfilterd.service
     Listen: /run/libvirt/virtnwfilterd-sock (Stream)

● virtproxyd-admin.socket - libvirt proxy daemon admin socket
     Loaded: loaded (/usr/lib/systemd/system/virtproxyd-admin.socket; enabled; preset: enabled)
    Drop-In: /etc/systemd/system/virtproxyd-admin.socket.d
             └─override.conf
     Active: active (listening) since Mon 2026-02-02 09:54:30 UTC; 30min ago
      Until: Mon 2026-02-02 09:54:30 UTC; 30min ago
   Triggers: ● virtproxyd.service
     Listen: /run/libvirt/libvirt-admin-sock (Stream)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48560)
     Memory: 0B (peak: 512.0K)
        CPU: 2ms
     CGroup: /system.slice/virtproxyd-admin.socket

Feb 02 09:54:30 compute-0 systemd[1]: Starting libvirt proxy daemon admin socket...
Feb 02 09:54:30 compute-0 systemd[1]: Listening on libvirt proxy daemon admin socket.

● virtproxyd-ro.socket - libvirt proxy daemon read-only socket
     Loaded: loaded (/usr/lib/systemd/system/virtproxyd-ro.socket; enabled; preset: enabled)
    Drop-In: /etc/systemd/system/virtproxyd-ro.socket.d
             └─override.conf
     Active: active (listening) since Mon 2026-02-02 09:54:30 UTC; 30min ago
      Until: Mon 2026-02-02 09:54:30 UTC; 30min ago
   Triggers: ● virtproxyd.service
     Listen: /run/libvirt/libvirt-sock-ro (Stream)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48560)
     Memory: 4.0K (peak: 512.0K)
        CPU: 4ms
     CGroup: /system.slice/virtproxyd-ro.socket

Feb 02 09:54:30 compute-0 systemd[1]: Starting libvirt proxy daemon read-only socket...
Feb 02 09:54:30 compute-0 systemd[1]: Listening on libvirt proxy daemon read-only socket.

● virtproxyd-tls.socket - libvirt proxy daemon TLS IP socket
     Loaded: loaded (/usr/lib/systemd/system/virtproxyd-tls.socket; enabled; preset: disabled)
     Active: active (listening) since Mon 2026-02-02 09:53:20 UTC; 31min ago
      Until: Mon 2026-02-02 09:53:20 UTC; 31min ago
   Triggers: ● virtproxyd.service
     Listen: [::]:16514 (Stream)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48560)
     Memory: 8.0K (peak: 256.0K)
        CPU: 1ms
     CGroup: /system.slice/virtproxyd-tls.socket

Feb 02 09:53:20 compute-0 systemd[1]: Listening on libvirt proxy daemon TLS IP socket.

● virtproxyd.socket - libvirt proxy daemon socket
     Loaded: loaded (/usr/lib/systemd/system/virtproxyd.socket; enabled; preset: enabled)
    Drop-In: /etc/systemd/system/virtproxyd.socket.d
             └─override.conf
     Active: active (listening) since Mon 2026-02-02 09:53:20 UTC; 31min ago
      Until: Mon 2026-02-02 09:53:20 UTC; 31min ago
   Triggers: ● virtproxyd.service
     Listen: /run/libvirt/libvirt-sock (Stream)
     CGroup: /system.slice/virtproxyd.socket

Feb 02 09:53:20 compute-0 systemd[1]: Listening on libvirt proxy daemon socket.

● virtqemud-admin.socket - libvirt QEMU daemon admin socket
     Loaded: loaded (/usr/lib/systemd/system/virtqemud-admin.socket; enabled; preset: enabled)
    Drop-In: /etc/systemd/system/virtqemud-admin.socket.d
             └─override.conf
     Active: active (running) since Mon 2026-02-02 09:54:31 UTC; 30min ago
      Until: Mon 2026-02-02 09:54:31 UTC; 30min ago
   Triggers: ● virtqemud.service
     Listen: /run/libvirt/virtqemud-admin-sock (Stream)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48560)
     Memory: 0B (peak: 560.0K)
        CPU: 4ms
     CGroup: /system.slice/virtqemud-admin.socket

Feb 02 09:54:31 compute-0 systemd[1]: Starting libvirt QEMU daemon admin socket...
Feb 02 09:54:31 compute-0 systemd[1]: Listening on libvirt QEMU daemon admin socket.

● virtqemud-ro.socket - libvirt QEMU daemon read-only socket
     Loaded: loaded (/usr/lib/systemd/system/virtqemud-ro.socket; enabled; preset: enabled)
    Drop-In: /etc/systemd/system/virtqemud-ro.socket.d
             └─override.conf
     Active: active (running) since Mon 2026-02-02 09:54:31 UTC; 30min ago
      Until: Mon 2026-02-02 09:54:31 UTC; 30min ago
   Triggers: ● virtqemud.service
     Listen: /run/libvirt/virtqemud-sock-ro (Stream)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48560)
     Memory: 0B (peak: 512.0K)
        CPU: 2ms
     CGroup: /system.slice/virtqemud-ro.socket

Feb 02 09:54:31 compute-0 systemd[1]: Starting libvirt QEMU daemon read-only socket...
Feb 02 09:54:31 compute-0 systemd[1]: Listening on libvirt QEMU daemon read-only socket.

● virtqemud.socket - libvirt QEMU daemon socket
     Loaded: loaded (/usr/lib/systemd/system/virtqemud.socket; enabled; preset: enabled)
    Drop-In: /etc/systemd/system/virtqemud.socket.d
             └─override.conf
     Active: active (running) since Mon 2026-02-02 09:54:31 UTC; 30min ago
      Until: Mon 2026-02-02 09:54:31 UTC; 30min ago
   Triggers: ● virtqemud.service
     Listen: /run/libvirt/virtqemud-sock (Stream)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48560)
     Memory: 0B (peak: 512.0K)
        CPU: 4ms
     CGroup: /system.slice/virtqemud.socket

Feb 02 09:54:31 compute-0 systemd[1]: Starting libvirt QEMU daemon socket...
Feb 02 09:54:31 compute-0 systemd[1]: Listening on libvirt QEMU daemon socket.

● virtsecretd-admin.socket - libvirt secret daemon admin socket
     Loaded: loaded (/usr/lib/systemd/system/virtsecretd-admin.socket; enabled; preset: enabled)
    Drop-In: /etc/systemd/system/virtsecretd-admin.socket.d
             └─override.conf
     Active: active (listening) since Mon 2026-02-02 09:54:32 UTC; 30min ago
      Until: Mon 2026-02-02 09:54:32 UTC; 30min ago
   Triggers: ● virtsecretd.service
     Listen: /run/libvirt/virtsecretd-admin-sock (Stream)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48560)
     Memory: 0B (peak: 596.0K)
        CPU: 2ms
     CGroup: /system.slice/virtsecretd-admin.socket

Feb 02 09:54:32 compute-0 systemd[1]: Starting libvirt secret daemon admin socket...
Feb 02 09:54:32 compute-0 systemd[1]: Listening on libvirt secret daemon admin socket.

● virtsecretd-ro.socket - libvirt secret daemon read-only socket
     Loaded: loaded (/usr/lib/systemd/system/virtsecretd-ro.socket; enabled; preset: enabled)
    Drop-In: /etc/systemd/system/virtsecretd-ro.socket.d
             └─override.conf
     Active: active (listening) since Mon 2026-02-02 09:54:32 UTC; 30min ago
      Until: Mon 2026-02-02 09:54:32 UTC; 30min ago
   Triggers: ● virtsecretd.service
     Listen: /run/libvirt/virtsecretd-sock-ro (Stream)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48560)
     Memory: 4.0K (peak: 512.0K)
        CPU: 2ms
     CGroup: /system.slice/virtsecretd-ro.socket

Feb 02 09:54:32 compute-0 systemd[1]: Starting libvirt secret daemon read-only socket...
Feb 02 09:54:32 compute-0 systemd[1]: Listening on libvirt secret daemon read-only socket.

● virtsecretd.socket - libvirt secret daemon socket
     Loaded: loaded (/usr/lib/systemd/system/virtsecretd.socket; enabled; preset: enabled)
    Drop-In: /etc/systemd/system/virtsecretd.socket.d
             └─override.conf
     Active: active (listening) since Mon 2026-02-02 09:54:32 UTC; 30min ago
      Until: Mon 2026-02-02 09:54:32 UTC; 30min ago
   Triggers: ● virtsecretd.service
     Listen: /run/libvirt/virtsecretd-sock (Stream)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48560)
     Memory: 0B (peak: 740.0K)
        CPU: 2ms
     CGroup: /system.slice/virtsecretd.socket

Feb 02 09:54:32 compute-0 systemd[1]: Starting libvirt secret daemon socket...
Feb 02 09:54:32 compute-0 systemd[1]: Listening on libvirt secret daemon socket.

○ virtstoraged-admin.socket - libvirt storage daemon admin socket
     Loaded: loaded (/usr/lib/systemd/system/virtstoraged-admin.socket; enabled; preset: enabled)
     Active: inactive (dead)
   Triggers: ● virtstoraged.service
     Listen: /run/libvirt/virtstoraged-admin-sock (Stream)

○ virtstoraged-ro.socket - libvirt storage daemon read-only socket
     Loaded: loaded (/usr/lib/systemd/system/virtstoraged-ro.socket; enabled; preset: enabled)
     Active: inactive (dead)
   Triggers: ● virtstoraged.service
     Listen: /run/libvirt/virtstoraged-sock-ro (Stream)

○ virtstoraged.socket - libvirt storage daemon socket
     Loaded: loaded (/usr/lib/systemd/system/virtstoraged.socket; enabled; preset: enabled)
     Active: inactive (dead)
   Triggers: ● virtstoraged.service
     Listen: /run/libvirt/virtstoraged-sock (Stream)

● swap.swap - /swap
     Loaded: loaded (/etc/fstab; generated)
     Active: active since Mon 2026-02-02 09:32:57 UTC; 52min ago
      Until: Mon 2026-02-02 09:32:57 UTC; 52min ago
       What: /swap
       Docs: man:fstab(5)
             man:systemd-fstab-generator(8)

● basic.target - Basic System
     Loaded: loaded (/usr/lib/systemd/system/basic.target; static)
     Active: active since Mon 2026-02-02 09:00:45 UTC; 1h 24min ago
      Until: Mon 2026-02-02 09:00:45 UTC; 1h 24min ago
       Docs: man:systemd.special(7)

Feb 02 09:00:45 localhost systemd[1]: Reached target Basic System.

○ blockdev@dev-disk-by\x2duuid-822f14ea\x2d6e7e\x2d41df\x2db0d8\x2dfbe282d9ded8.target - Block Device Preparation for /dev/disk/by-uuid/822f14ea-6e7e-41df-b0d8-fbe282d9ded8
     Loaded: loaded (/usr/lib/systemd/system/blockdev@.target; static)
     Active: inactive (dead)
       Docs: man:systemd.special(7)

○ blockdev@dev-vda1.target - Block Device Preparation for /dev/vda1
     Loaded: loaded (/usr/lib/systemd/system/blockdev@.target; static)
     Active: inactive (dead)
       Docs: man:systemd.special(7)

● ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1.target - Ceph cluster d241d473-9fcb-5f74-b163-f1ca4454e7f1
     Loaded: loaded (/etc/systemd/system/ceph-d241d473-9fcb-5f74-b163-f1ca4454e7f1.target; enabled; preset: disabled)
     Active: active since Mon 2026-02-02 09:37:42 UTC; 47min ago
      Until: Mon 2026-02-02 09:37:42 UTC; 47min ago

Feb 02 09:37:42 compute-0 systemd[1]: Reached target Ceph cluster d241d473-9fcb-5f74-b163-f1ca4454e7f1.

● ceph.target - All Ceph clusters and services
     Loaded: loaded (/etc/systemd/system/ceph.target; enabled; preset: disabled)
     Active: active since Mon 2026-02-02 09:37:42 UTC; 47min ago
      Until: Mon 2026-02-02 09:37:42 UTC; 47min ago

Feb 02 09:37:42 compute-0 systemd[1]: Reached target All Ceph clusters and services.

● cloud-config.target - Cloud-config availability
     Loaded: loaded (/usr/lib/systemd/system/cloud-config.target; static)
     Active: active since Mon 2026-02-02 09:00:50 UTC; 1h 24min ago
      Until: Mon 2026-02-02 09:00:50 UTC; 1h 24min ago

Feb 02 09:00:50 np0005604790.novalocal systemd[1]: Reached target Cloud-config availability.

● cloud-init.target - Cloud-init target
     Loaded: loaded (/usr/lib/systemd/system/cloud-init.target; enabled-runtime; preset: disabled)
     Active: active since Mon 2026-02-02 09:00:51 UTC; 1h 24min ago
      Until: Mon 2026-02-02 09:00:51 UTC; 1h 24min ago

Feb 02 09:00:51 np0005604790.novalocal systemd[1]: Reached target Cloud-init target.

○ cryptsetup-pre.target - Local Encrypted Volumes (Pre)
     Loaded: loaded (/usr/lib/systemd/system/cryptsetup-pre.target; static)
     Active: inactive (dead)
       Docs: man:systemd.special(7)

● cryptsetup.target - Local Encrypted Volumes
     Loaded: loaded (/usr/lib/systemd/system/cryptsetup.target; static)
     Active: active since Mon 2026-02-02 09:00:45 UTC; 1h 24min ago
      Until: Mon 2026-02-02 09:00:45 UTC; 1h 24min ago
       Docs: man:systemd.special(7)

● edpm_libvirt.target
     Loaded: loaded (/etc/systemd/system/edpm_libvirt.target; static)
     Active: active since Mon 2026-02-02 09:55:08 UTC; 29min ago
      Until: Mon 2026-02-02 09:55:08 UTC; 29min ago

Feb 02 09:55:08 compute-0 systemd[1]: Reached target edpm_libvirt.target.

○ emergency.target - Emergency Mode
     Loaded: loaded (/usr/lib/systemd/system/emergency.target; static)
     Active: inactive (dead)
       Docs: man:systemd.special(7)

○ first-boot-complete.target - First Boot Complete
     Loaded: loaded (/usr/lib/systemd/system/first-boot-complete.target; static)
     Active: inactive (dead)
  Condition: start condition failed at Mon 2026-02-02 09:00:45 UTC; 1h 24min ago
       Docs: man:systemd.special(7)

Feb 02 09:00:45 localhost systemd[1]: First Boot Complete was skipped because of an unmet condition check (ConditionFirstBoot=yes).

○ getty-pre.target - Preparation for Logins
     Loaded: loaded (/usr/lib/systemd/system/getty-pre.target; static)
     Active: inactive (dead)
       Docs: man:systemd.special(7)
             man:systemd-getty-generator(8)
             http://0pointer.de/blog/projects/serial-console.html

● getty.target - Login Prompts
     Loaded: loaded (/usr/lib/systemd/system/getty.target; static)
     Active: active since Mon 2026-02-02 09:00:50 UTC; 1h 24min ago
      Until: Mon 2026-02-02 09:00:50 UTC; 1h 24min ago
       Docs: man:systemd.special(7)
             man:systemd-getty-generator(8)
             http://0pointer.de/blog/projects/serial-console.html

Feb 02 09:00:50 np0005604790.novalocal systemd[1]: Reached target Login Prompts.

○ graphical.target - Graphical Interface
     Loaded: loaded (/usr/lib/systemd/system/graphical.target; static)
     Active: inactive (dead)
       Docs: man:systemd.special(7)

○ initrd-fs.target - Initrd File Systems
     Loaded: loaded (/usr/lib/systemd/system/initrd-fs.target; static)
     Active: inactive (dead) since Mon 2026-02-02 09:00:45 UTC; 1h 24min ago
       Docs: man:systemd.special(7)

Feb 02 09:00:43 localhost systemd[1]: Reached target Initrd File Systems.

○ initrd-root-device.target - Initrd Root Device
     Loaded: loaded (/usr/lib/systemd/system/initrd-root-device.target; static)
     Active: inactive (dead) since Mon 2026-02-02 09:00:44 UTC; 1h 24min ago
       Docs: man:systemd.special(7)

Feb 02 09:00:42 localhost systemd[1]: Reached target Initrd Root Device.
Feb 02 09:00:44 localhost systemd[1]: Stopped target Initrd Root Device.

○ initrd-root-fs.target - Initrd Root File System
     Loaded: loaded (/usr/lib/systemd/system/initrd-root-fs.target; static)
     Active: inactive (dead) since Mon 2026-02-02 09:00:45 UTC; 1h 24min ago
       Docs: man:systemd.special(7)

Feb 02 09:00:43 localhost systemd[1]: Reached target Initrd Root File System.

○ initrd-switch-root.target - Switch Root
     Loaded: loaded (/usr/lib/systemd/system/initrd-switch-root.target; static)
     Active: inactive (dead) since Mon 2026-02-02 09:00:45 UTC; 1h 24min ago

Feb 02 09:00:44 localhost systemd[1]: Reached target Switch Root.

○ initrd-usr-fs.target - Initrd /usr File System
     Loaded: loaded (/usr/lib/systemd/system/initrd-usr-fs.target; static)
     Active: inactive (dead) since Mon 2026-02-02 09:00:44 UTC; 1h 24min ago
       Docs: man:systemd.special(7)

Feb 02 09:00:44 localhost systemd[1]: Stopped target Initrd /usr File System.
Notice: journal has been rotated since unit was started, output may be incomplete.

○ initrd.target - Initrd Default Target
     Loaded: loaded (/usr/lib/systemd/system/initrd.target; static)
     Active: inactive (dead) since Mon 2026-02-02 09:00:44 UTC; 1h 24min ago
       Docs: man:systemd.special(7)

Feb 02 09:00:43 localhost systemd[1]: Reached target Initrd Default Target.
Feb 02 09:00:44 localhost systemd[1]: Stopped target Initrd Default Target.

● integritysetup.target - Local Integrity Protected Volumes
     Loaded: loaded (/usr/lib/systemd/system/integritysetup.target; static)
     Active: active since Mon 2026-02-02 09:00:45 UTC; 1h 24min ago
      Until: Mon 2026-02-02 09:00:45 UTC; 1h 24min ago
       Docs: man:systemd.special(7)

● local-fs-pre.target - Preparation for Local File Systems
     Loaded: loaded (/usr/lib/systemd/system/local-fs-pre.target; static)
     Active: active since Mon 2026-02-02 09:00:45 UTC; 1h 24min ago
      Until: Mon 2026-02-02 09:00:45 UTC; 1h 24min ago
       Docs: man:systemd.special(7)

Feb 02 09:00:45 localhost systemd[1]: Reached target Preparation for Local File Systems.

● local-fs.target - Local File Systems
     Loaded: loaded (/usr/lib/systemd/system/local-fs.target; static)
     Active: active since Mon 2026-02-02 09:00:45 UTC; 1h 24min ago
      Until: Mon 2026-02-02 09:00:45 UTC; 1h 24min ago
       Docs: man:systemd.special(7)

Feb 02 09:00:45 localhost systemd[1]: Reached target Local File Systems.

● multi-user.target - Multi-User System
     Loaded: loaded (/usr/lib/systemd/system/multi-user.target; indirect; preset: disabled)
     Active: active since Mon 2026-02-02 09:00:50 UTC; 1h 24min ago
      Until: Mon 2026-02-02 09:00:50 UTC; 1h 24min ago
       Docs: man:systemd.special(7)

Feb 02 09:00:50 np0005604790.novalocal systemd[1]: Reached target Multi-User System.

● network-online.target - Network is Online
     Loaded: loaded (/usr/lib/systemd/system/network-online.target; static)
     Active: active since Mon 2026-02-02 09:00:50 UTC; 1h 24min ago
      Until: Mon 2026-02-02 09:00:50 UTC; 1h 24min ago
       Docs: man:systemd.special(7)
             https://systemd.io/NETWORK_ONLINE

Feb 02 09:00:50 np0005604790.novalocal systemd[1]: Reached target Network is Online.

● network-pre.target - Preparation for Network
     Loaded: loaded (/usr/lib/systemd/system/network-pre.target; static)
     Active: active since Mon 2026-02-02 09:00:47 UTC; 1h 24min ago
      Until: Mon 2026-02-02 09:00:47 UTC; 1h 24min ago
       Docs: man:systemd.special(7)
             https://systemd.io/NETWORK_ONLINE

Feb 02 09:00:47 np0005604790.novalocal systemd[1]: Reached target Preparation for Network.

● network.target - Network
     Loaded: loaded (/usr/lib/systemd/system/network.target; static)
     Active: active since Mon 2026-02-02 09:00:47 UTC; 1h 24min ago
      Until: Mon 2026-02-02 09:00:47 UTC; 1h 24min ago
       Docs: man:systemd.special(7)
             https://systemd.io/NETWORK_ONLINE

Feb 02 09:00:47 np0005604790.novalocal systemd[1]: Reached target Network.

● nfs-client.target - NFS client services
     Loaded: loaded (/usr/lib/systemd/system/nfs-client.target; enabled; preset: disabled)
     Active: active since Mon 2026-02-02 09:00:47 UTC; 1h 24min ago
      Until: Mon 2026-02-02 09:00:47 UTC; 1h 24min ago

Feb 02 09:00:47 np0005604790.novalocal systemd[1]: Reached target NFS client services.

○ nss-lookup.target - Host and Network Name Lookups
     Loaded: loaded (/usr/lib/systemd/system/nss-lookup.target; static)
     Active: inactive (dead)
       Docs: man:systemd.special(7)

● nss-user-lookup.target - User and Group Name Lookups
     Loaded: loaded (/usr/lib/systemd/system/nss-user-lookup.target; static)
     Active: active since Mon 2026-02-02 09:00:45 UTC; 1h 24min ago
      Until: Mon 2026-02-02 09:00:45 UTC; 1h 24min ago
       Docs: man:systemd.special(7)

Feb 02 09:00:45 localhost systemd[1]: Reached target User and Group Name Lookups.

● paths.target - Path Units
     Loaded: loaded (/usr/lib/systemd/system/paths.target; static)
     Active: active since Mon 2026-02-02 09:00:45 UTC; 1h 24min ago
      Until: Mon 2026-02-02 09:00:45 UTC; 1h 24min ago
       Docs: man:systemd.special(7)

○ remote-cryptsetup.target - Remote Encrypted Volumes
     Loaded: loaded (/usr/lib/systemd/system/remote-cryptsetup.target; disabled; preset: enabled)
     Active: inactive (dead) since Mon 2026-02-02 09:00:44 UTC; 1h 24min ago
       Docs: man:systemd.special(7)

Feb 02 09:00:43 localhost systemd[1]: Reached target Remote Encrypted Volumes.
Feb 02 09:00:44 localhost systemd[1]: Stopped target Remote Encrypted Volumes.

● remote-fs-pre.target - Preparation for Remote File Systems
     Loaded: loaded (/usr/lib/systemd/system/remote-fs-pre.target; static)
     Active: active since Mon 2026-02-02 09:00:47 UTC; 1h 24min ago
      Until: Mon 2026-02-02 09:00:47 UTC; 1h 24min ago
       Docs: man:systemd.special(7)

Feb 02 09:00:47 np0005604790.novalocal systemd[1]: Reached target Preparation for Remote File Systems.

● remote-fs.target - Remote File Systems
     Loaded: loaded (/usr/lib/systemd/system/remote-fs.target; enabled; preset: enabled)
     Active: active since Mon 2026-02-02 09:00:47 UTC; 1h 24min ago
      Until: Mon 2026-02-02 09:00:47 UTC; 1h 24min ago
       Docs: man:systemd.special(7)

Feb 02 09:00:47 np0005604790.novalocal systemd[1]: Reached target Remote File Systems.

○ remote-veritysetup.target - Remote Verity Protected Volumes
     Loaded: loaded (/usr/lib/systemd/system/remote-veritysetup.target; disabled; preset: disabled)
     Active: inactive (dead)
       Docs: man:systemd.special(7)

○ rescue.target - Rescue Mode
     Loaded: loaded (/usr/lib/systemd/system/rescue.target; static)
     Active: inactive (dead)
       Docs: man:systemd.special(7)

● rpc_pipefs.target
     Loaded: loaded (/usr/lib/systemd/system/rpc_pipefs.target; static)
     Active: active since Mon 2026-02-02 09:00:45 UTC; 1h 24min ago
      Until: Mon 2026-02-02 09:00:45 UTC; 1h 24min ago

● rpcbind.target - RPC Port Mapper
     Loaded: loaded (/usr/lib/systemd/system/rpcbind.target; static)
     Active: active since Mon 2026-02-02 09:00:45 UTC; 1h 24min ago
      Until: Mon 2026-02-02 09:00:45 UTC; 1h 24min ago
       Docs: man:systemd.special(7)

○ shutdown.target - System Shutdown
     Loaded: loaded (/usr/lib/systemd/system/shutdown.target; static)
     ActiUnit syslog.target could not be found.
ve: inactive (dead)
       Docs: man:systemd.special(7)

● slices.target - Slice Units
     Loaded: loaded (/usr/lib/systemd/system/slices.target; static)
     Active: active since Mon 2026-02-02 09:00:45 UTC; 1h 24min ago
      Until: Mon 2026-02-02 09:00:45 UTC; 1h 24min ago
       Docs: man:systemd.special(7)

● sockets.target - Socket Units
     Loaded: loaded (/usr/lib/systemd/system/sockets.target; static)
     Active: active since Mon 2026-02-02 09:00:45 UTC; 1h 24min ago
      Until: Mon 2026-02-02 09:00:45 UTC; 1h 24min ago
       Docs: man:systemd.special(7)

Feb 02 09:00:45 localhost systemd[1]: Reached target Socket Units.

● sshd-keygen.target
     Loaded: loaded (/usr/lib/systemd/system/sshd-keygen.target; static)
     Active: active since Mon 2026-02-02 09:53:02 UTC; 32min ago
      Until: Mon 2026-02-02 09:53:02 UTC; 32min ago

Feb 02 09:53:02 compute-0 systemd[1]: Reached target sshd-keygen.target.

● swap.target - Swaps
     Loaded: loaded (/usr/lib/systemd/system/swap.target; static)
     Active: active since Mon 2026-02-02 09:00:45 UTC; 1h 24min ago
      Until: Mon 2026-02-02 09:00:45 UTC; 1h 24min ago
       Docs: man:systemd.special(7)

● sysinit.target - System Initialization
     Loaded: loaded (/usr/lib/systemd/system/sysinit.target; static)
     Active: active since Mon 2026-02-02 09:00:45 UTC; 1h 24min ago
      Until: Mon 2026-02-02 09:00:45 UTC; 1h 24min ago
       Docs: man:systemd.special(7)

Feb 02 09:00:45 localhost systemd[1]: Reached target System Initialization.

● time-set.target - System Time Set
     Loaded: loaded (/usr/lib/systemd/system/time-set.target; static)
     Active: active since Mon 2026-02-02 09:37:43 UTC; 47min ago
      Until: Mon 2026-02-02 09:37:43 UTC; 47min ago
       Docs: man:systemd.special(7)

Feb 02 09:37:43 compute-0 systemd[1]: Reached target System Time Set.

● time-sync.target - System Time Synchronized
     Loaded: loaded (/usr/lib/systemd/system/time-sync.target; static)
     Active: active since Mon 2026-02-02 09:37:43 UTC; 47min ago
      Until: Mon 2026-02-02 09:37:43 UTC; 47min ago
       Docs: man:systemd.special(7)

Feb 02 09:37:43 compute-0 systemd[1]: Reached target System Time Synchronized.

● timers.target - Timer Units
     Loaded: loaded (/usr/lib/systemd/system/timers.target; static)
     Active: active since Mon 2026-02-02 09:00:45 UTC; 1h 24min ago
      Until: Mon 2026-02-02 09:00:45 UTC; 1h 24min ago
       Docs: man:systemd.special(7)

Feb 02 09:00:45 localhost systemd[1]: Reached target Timer Units.

○ umount.target - Unmount All Filesystems
     Loaded: loaded (/usr/lib/systemd/system/umount.target; static)
     Active: inactive (dead)
       Docs: man:systemd.special(7)

○ veritysetup-pre.target - Local Verity Protected Volumes (Pre)
     Loaded: loaded (/usr/lib/systemd/system/veritysetup-pre.target; static)
     Active: inactive (dead)
       Docs: man:systemd.special(7)

● veritysetup.target - Local Verity Protected Volumes
     Loaded: loaded (/usr/lib/systemd/system/veritysetup.target; static)
     Active: active since Mon 2026-02-02 09:00:45 UTC; 1h 24min ago
      Until: Mon 2026-02-02 09:00:45 UTC; 1h 24min ago
       Docs: man:systemd.special(7)

○ virt-guest-shutdown.target - Libvirt guests shutdown
     Loaded: loaded (/etc/systemd/system/virt-guest-shutdown.target; static)
     Active: inactive (dead)
       Docs: https://libvirt.org

● 29bea7eb4976451e3dfdb1e9a3aba30b10e64cbce131bf8d09548b4a224f8ee8-70356c1a3b924523.timer - /usr/bin/podman healthcheck run 29bea7eb4976451e3dfdb1e9a3aba30b10e64cbce131bf8d09548b4a224f8ee8
     Loaded: loaded (/run/systemd/transient/29bea7eb4976451e3dfdb1e9a3aba30b10e64cbce131bf8d09548b4a224f8ee8-70356c1a3b924523.timer; transient)
  Transient: yes
     Active: active (waiting) since Mon 2026-02-02 09:50:43 UTC; 34min ago
      Until: Mon 2026-02-02 09:50:43 UTC; 34min ago
    Trigger: Mon 2026-02-02 10:25:31 UTC; 23s left
   Triggers: ● 29bea7eb4976451e3dfdb1e9a3aba30b10e64cbce131bf8d09548b4a224f8ee8-70356c1a3b924523.service

Feb 02 09:50:43 compute-0 systemd[1]: Started /usr/bin/podman healthcheck run 29bea7eb4976451e3dfdb1e9a3aba30b10e64cbce131bf8d09548b4a224f8ee8.

● dnf-makecache.timer - dnf makecache --timer
     Loaded: loaded (/usr/lib/systemd/system/dnf-makecache.timer; enabled; preset: enabled)
     Active: active (waiting) since Mon 2026-02-02 09:00:45 UTC; 1h 24min ago
      Until: Mon 2026-02-02 09:00:45 UTC; 1h 24min ago
    Trigger: Mon 2026-02-02 11:07:51 UTC; 42min left
   Triggers: ● dnf-makecache.service

Feb 02 09:00:45 localhost systemd[1]: Started dnf makecache --timer.

● e39122d9482da8df802204ab6c35fb7c982874580968140e6f06bdfc8eefae36-72a4a8d8f632219c.timer - /usr/bin/podman healthcheck run e39122d9482da8df802204ab6c35fb7c982874580968140e6f06bdfc8eefae36
     Loaded: loaded (/run/systemd/transient/e39122d9482da8df802204ab6c35fb7c982874580968140e6f06bdfc8eefae36-72a4a8d8f632219c.timer; transient)
  Transient: yes
     Active: active (waiting) since Mon 2026-02-02 09:49:28 UTC; 35min ago
      Until: Mon 2026-02-02 09:49:28 UTC; 35min ago
    Trigger: Mon 2026-02-02 10:25:23 UTC; 15s left
   Triggers: ● e39122d9482da8df802204ab6c35fb7c982874580968140e6f06bdfc8eefae36-72a4a8d8f632219c.service

Feb 02 09:49:28 compute-0 systemd[1]: Started /usr/bin/podman healthcheck run e39122d9482da8df802204ab6c35fb7c982874580968140e6f06bdfc8eefae36.

● logrotate.timer - Daily rotation of log files
     Loaded: loaded (/usr/lib/systemd/system/logrotate.timer; enabled; preset: enabled)
     Active: active (waiting) since Mon 2026-02-02 09:00:45 UTC; 1h 24min ago
      Until: Mon 2026-02-02 09:00:45 UTC; 1h 24min ago
    Trigger: Tue 2026-02-03 00:00:00 UTC; 13h left
   Triggers: ● logrotate.service
       Docs: man:logrotate(8)
             man:logrotate.conf(5)

Feb 02 09:00:45 localhost systemd[1]: Started Daily rotation of log files.

○ raid-check.timer - Weekly RAID setup health check
     Loaded: loaded (/usr/lib/systemd/system/raid-check.timer; enabled; preset: enabled)
     Active: inactive (dead)
    Trigger: n/a
   Triggers: ● raid-check.service

○ sysstat-collect.timer - Run system activity accounting tool every 10 minutes
     Loaded: loaded (/usr/lib/systemd/system/sysstat-collect.timer; enabled; preset: disabled)
     Active: inactive (dead)
    Trigger: n/a
   Triggers: ● sysstat-collect.service

○ sysstat-summary.timer - Generate summary of yesterday's process accounting
     Loaded: loaded (/usr/lib/systemd/system/sysstat-summary.timer; enabled; preset: disabled)
     Active: inactive (dead)
    Trigger: n/a
   Triggers: ● sysstat-summary.service

● systemd-tmpfiles-clean.timer - Daily Cleanup of Temporary Directories
     Loaded: loaded (/usr/lib/systemd/system/systemd-tmpfiles-clean.timer; static)
     Active: active (waiting) since Mon 2026-02-02 09:00:45 UTC; 1h 24min ago
      Until: Mon 2026-02-02 09:00:45 UTC; 1h 24min ago
    Trigger: Tue 2026-02-03 09:15:51 UTC; 22h left
   Triggers: ● systemd-tmpfiles-clean.service
       Docs: man:tmpfiles.d(5)
             man:systemd-tmpfiles(8)

Feb 02 09:00:45 localhost systemd[1]: Started Daily Cleanup of Temporary Directories.

● unbound-anchor.timer - daily update of the root trust anchor for DNSSEC
     Loaded: loaded (/usr/lib/systemd/system/unbound-anchor.timer; enabled; preset: enabled)
     Active: active (waiting) since Mon 2026-02-02 09:33:27 UTC; 51min ago
      Until: Mon 2026-02-02 09:33:27 UTC; 51min ago
    Trigger: Tue 2026-02-03 00:00:00 UTC; 13h left
   Triggers: ● unbound-anchor.service
       Docs: man:unbound-anchor(8)

Feb 02 09:33:27 compute-0 systemd[1]: Started daily update of the root trust anchor for DNSSEC.
