● np0005626466.localdomain
    State: running
    Units: 534 loaded (incl. loaded aliases)
     Jobs: 0 queued
   Failed: 0 units
    Since: Mon 2026-02-23 06:36:02 UTC; 3h 35min ago
  systemd: 252-14.el9_2.8
   CGroup: /
           ├─init.scope
           │ └─1 /usr/lib/systemd/systemd --system --deserialize 25
           ├─machine.slice
           │ ├─libpod-122635f0e8d39054eca80d2e540d00d16b4a0e4979d56e5a2e398526a74c57c4.scope
           │ │ └─container
           │ │   ├─156068 dumb-init --single-child -- kolla_start
           │ │   └─156071 /usr/bin/ovn-controller --pidfile unix:/run/openvswitch/db.sock
           │ ├─libpod-2812510312ec1c5f807144229711d7908bd35d002349ffc7e5627b311d4edf79.scope
           │ │ └─container
           │ │   ├─280694 dumb-init --single-child -- kolla_start
           │ │   ├─280697 /usr/bin/python3 /usr/bin/nova-compute
           │ │   ├─307841 /usr/bin/python3 /bin/privsep-helper --config-file /etc/nova/nova.conf --config-file /etc/nova/nova-compute.conf --config-dir /etc/nova/nova.conf.d --privsep_context vif_plug_ovs.privsep.vif_plug --privsep_sock_path /tmp/tmpqn5dfyp_/privsep.sock
           │ │   └─324666 /usr/bin/python3 /bin/privsep-helper --config-file /etc/nova/nova.conf --config-file /etc/nova/nova-compute.conf --config-dir /etc/nova/nova.conf.d --privsep_context os_brick.privileged.default --privsep_sock_path /tmp/tmpa7k05g5b/privsep.sock
           │ ├─libpod-3648631d61f37afb17aa928ef74b239e7715b60e5cab331e4a494567755448eb.scope
           │ │ └─container
           │ │   ├─256427 dumb-init --single-child -- kolla_start
           │ │   └─256429 "neutron-sriov-nic-agent (/usr/bin/python3 /usr/bin/neutron-sriov-nic-agent)"
           │ ├─libpod-6a5bdcbb336925d11e98e079f4b8b7828b30e368d9b142bcbf78246faf17115e.scope
           │ │ └─container
           │ │   └─243612 /app/openstack-network-exporter
           │ ├─libpod-8813b336059ca31aa5e43a3a140b1dec6d9019f75a06f121cdebad271f8eb62c.scope
           │ │ └─container
           │ │   ├─264101 dumb-init --single-child -- /usr/sbin/dnsmasq -k --no-hosts --no-resolv --pid-file=/var/lib/neutron/dhcp/91fdc6a7-b901-4255-83f7-4b37365658a3/pid --dhcp-hostsfile=/var/lib/neutron/dhcp/91fdc6a7-b901-4255-83f7-4b37365658a3/host --addn-hosts=/var/lib/neutron/dhcp/91fdc6a7-b901-4255-83f7-4b37365658a3/addn_hosts --dhcp-optsfile=/var/lib/neutron/dhcp/91fdc6a7-b901-4255-83f7-4b37365658a3/opts --dhcp-leasefile=/var/lib/neutron/dhcp/91fdc6a7-b901-4255-83f7-4b37365658a3/leases --dhcp-match=set:ipxe,175 --dhcp-userclass=set:ipxe6,iPXE --local-service --bind-dynamic --dhcp-range=set:subnet-21d77760-119f-4466-bc85-a0e9167487a9,192.168.122.0,static,255.255.255.0,86400s --dhcp-option-force=option:mtu,1350 --dhcp-lease-max=256 --conf-file=/dev/null --domain=openstacklocal
           │ │   └─264111 /usr/sbin/dnsmasq -k --no-hosts --no-resolv --pid-file=/var/lib/neutron/dhcp/91fdc6a7-b901-4255-83f7-4b37365658a3/pid --dhcp-hostsfile=/var/lib/neutron/dhcp/91fdc6a7-b901-4255-83f7-4b37365658a3/host --addn-hosts=/var/lib/neutron/dhcp/91fdc6a7-b901-4255-83f7-4b37365658a3/addn_hosts --dhcp-optsfile=/var/lib/neutron/dhcp/91fdc6a7-b901-4255-83f7-4b37365658a3/opts --dhcp-leasefile=/var/lib/neutron/dhcp/91fdc6a7-b901-4255-83f7-4b37365658a3/leases --dhcp-match=set:ipxe,175 --dhcp-userclass=set:ipxe6,iPXE --local-service --bind-dynamic --dhcp-range=set:subnet-21d77760-119f-4466-bc85-a0e9167487a9,192.168.122.0,static,255.255.255.0,86400s --dhcp-option-force=option:mtu,1350 --dhcp-lease-max=256 --conf-file=/dev/null --domain=openstacklocal
           │ ├─libpod-8ca16bfb2f59ef8fc3fb8572c3270f7856db7f62fc874baed2fd5084beb7f01e.scope
           │ │ └─container
           │ │   ├─161941 dumb-init --single-child -- kolla_start
           │ │   ├─161944 "neutron-ovn-metadata-agent (/usr/bin/python3 /usr/bin/neutron-ovn-metadata-agent)"
           │ │   ├─162130 "neutron-ovn-metadata-agent (/usr/bin/python3 /usr/bin/neutron-ovn-metadata-agent)"
           │ │   ├─162175 /usr/bin/python3 /bin/privsep-helper --config-file /etc/neutron/neutron.conf --config-dir /etc/neutron.conf.d --privsep_context neutron.privileged.namespace_cmd --privsep_sock_path /tmp/tmp84iwktpn/privsep.sock
           │ │   ├─264002 /usr/bin/python3 /bin/privsep-helper --config-file /etc/neutron/neutron.conf --config-dir /etc/neutron.conf.d --privsep_context neutron.privileged.default --privsep_sock_path /tmp/tmpbtevuisl/privsep.sock
           │ │   └─308309 /usr/bin/python3 /bin/privsep-helper --config-file /etc/neutron/neutron.conf --config-dir /etc/neutron.conf.d --privsep_context neutron.privileged.link_cmd --privsep_sock_path /tmp/tmpkfen9wub/privsep.sock
           │ ├─libpod-a43c45687bee0664320d95d80d3e7a94940f1f0a4aba277a159d8b3eeefd4d41.scope
           │ │ └─container
           │ │   ├─264098 dumb-init --single-child -- /usr/sbin/dnsmasq -k --no-hosts --no-resolv --pid-file=/var/lib/neutron/dhcp/9da5b53d-3184-450f-9a5b-bdba1a6c9f6d/pid --dhcp-hostsfile=/var/lib/neutron/dhcp/9da5b53d-3184-450f-9a5b-bdba1a6c9f6d/host --addn-hosts=/var/lib/neutron/dhcp/9da5b53d-3184-450f-9a5b-bdba1a6c9f6d/addn_hosts --dhcp-optsfile=/var/lib/neutron/dhcp/9da5b53d-3184-450f-9a5b-bdba1a6c9f6d/opts --dhcp-leasefile=/var/lib/neutron/dhcp/9da5b53d-3184-450f-9a5b-bdba1a6c9f6d/leases --dhcp-match=set:ipxe,175 --dhcp-userclass=set:ipxe6,iPXE --local-service --bind-dynamic --dhcp-range=set:subnet-33e5fe9f-f246-45b2-86ae-3c10c0318cd7,192.168.0.0,static,255.255.255.0,86400s --dhcp-option-force=option:mtu,1292 --dhcp-lease-max=256 --conf-file=/dev/null --domain=openstacklocal
           │ │   └─264104 /usr/sbin/dnsmasq -k --no-hosts --no-resolv --pid-file=/var/lib/neutron/dhcp/9da5b53d-3184-450f-9a5b-bdba1a6c9f6d/pid --dhcp-hostsfile=/var/lib/neutron/dhcp/9da5b53d-3184-450f-9a5b-bdba1a6c9f6d/host --addn-hosts=/var/lib/neutron/dhcp/9da5b53d-3184-450f-9a5b-bdba1a6c9f6d/addn_hosts --dhcp-optsfile=/var/lib/neutron/dhcp/9da5b53d-3184-450f-9a5b-bdba1a6c9f6d/opts --dhcp-leasefile=/var/lib/neutron/dhcp/9da5b53d-3184-450f-9a5b-bdba1a6c9f6d/leases --dhcp-match=set:ipxe,175 --dhcp-userclass=set:ipxe6,iPXE --local-service --bind-dynamic --dhcp-range=set:subnet-33e5fe9f-f246-45b2-86ae-3c10c0318cd7,192.168.0.0,static,255.255.255.0,86400s --dhcp-option-force=option:mtu,1292 --dhcp-lease-max=256 --conf-file=/dev/null --domain=openstacklocal
           │ ├─libpod-bdeaed09d67af00ebcf3af49678b8d850a09c9b8240ebddb072ba501276f201d.scope
           │ │ └─container
           │ │   ├─263821 dumb-init --single-child -- kolla_start
           │ │   ├─263823 "neutron-dhcp-agent (/usr/bin/python3 /usr/bin/neutron-dhcp-agent)"
           │ │   ├─263857 /usr/bin/python3 /bin/privsep-helper --config-file /etc/neutron/neutron.conf --config-dir /etc/neutron.conf.d --privsep_context neutron.privileged.default --privsep_sock_path /tmp/tmpg0yua8_o/privsep.sock
           │ │   ├─263887 /usr/bin/python3 /bin/privsep-helper --config-file /etc/neutron/neutron.conf --config-dir /etc/neutron.conf.d --privsep_context neutron.privileged.namespace_cmd --privsep_sock_path /tmp/tmp7urwfrmj/privsep.sock
           │ │   ├─263903 /usr/bin/python3 /bin/privsep-helper --config-file /etc/neutron/neutron.conf --config-dir /etc/neutron.conf.d --privsep_context neutron.privileged.link_cmd --privsep_sock_path /tmp/tmpopxbn0rr/privsep.sock
           │ │   └─315526 /usr/bin/python3 /bin/privsep-helper --config-file /etc/neutron/neutron.conf --config-dir /etc/neutron.conf.d --privsep_context neutron.privileged.dhcp_release_cmd --privsep_sock_path /tmp/tmp02asih0a/privsep.sock
           │ ├─libpod-c0dd95da0b8090e1dfefd899739a6f9a1e0b730eec9db8d0d0094fa6bfc519c8.scope
           │ │ └─container
           │ │   └─241166 /bin/podman_exporter
           │ ├─libpod-cd17978adeb2b0468483202f98a4facd0357edbe019deb13313adab6b6b71f52.scope
           │ │ └─container
           │ │   └─238755 /bin/node_exporter --web.disable-exporter-metrics --collector.systemd "--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service" --no-collector.dmi --no-collector.entropy --no-collector.thermal_zone --no-collector.time --no-collector.timex --no-collector.uname --no-collector.stat --no-collector.hwmon --no-collector.os --no-collector.selinux --no-collector.textfile --no-collector.powersupplyclass --no-collector.pressure --no-collector.rapl --path.rootfs=/rootfs
           │ ├─libpod-conmon-8813b336059ca31aa5e43a3a140b1dec6d9019f75a06f121cdebad271f8eb62c.scope
           │ │ └─264099 /usr/bin/conmon --api-version 1 -c 8813b336059ca31aa5e43a3a140b1dec6d9019f75a06f121cdebad271f8eb62c -u 8813b336059ca31aa5e43a3a140b1dec6d9019f75a06f121cdebad271f8eb62c -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/8813b336059ca31aa5e43a3a140b1dec6d9019f75a06f121cdebad271f8eb62c/userdata -p /run/containers/storage/overlay-containers/8813b336059ca31aa5e43a3a140b1dec6d9019f75a06f121cdebad271f8eb62c/userdata/pidfile -n neutron-dnsmasq-qdhcp-91fdc6a7-b901-4255-83f7-4b37365658a3 --exit-dir /run/libpod/exits --full-attach -s -l journald --log-level warning --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/8813b336059ca31aa5e43a3a140b1dec6d9019f75a06f121cdebad271f8eb62c/userdata/oci-log --conmon-pidfile /run/containers/storage/overlay-containers/8813b336059ca31aa5e43a3a140b1dec6d9019f75a06f121cdebad271f8eb62c/userdata/conmon.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --transient-store=false --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg 8813b336059ca31aa5e43a3a140b1dec6d9019f75a06f121cdebad271f8eb62c
           │ ├─libpod-conmon-a43c45687bee0664320d95d80d3e7a94940f1f0a4aba277a159d8b3eeefd4d41.scope
           │ │ └─264095 /usr/bin/conmon --api-version 1 -c a43c45687bee0664320d95d80d3e7a94940f1f0a4aba277a159d8b3eeefd4d41 -u a43c45687bee0664320d95d80d3e7a94940f1f0a4aba277a159d8b3eeefd4d41 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/a43c45687bee0664320d95d80d3e7a94940f1f0a4aba277a159d8b3eeefd4d41/userdata -p /run/containers/storage/overlay-containers/a43c45687bee0664320d95d80d3e7a94940f1f0a4aba277a159d8b3eeefd4d41/userdata/pidfile -n neutron-dnsmasq-qdhcp-9da5b53d-3184-450f-9a5b-bdba1a6c9f6d --exit-dir /run/libpod/exits --full-attach -s -l journald --log-level warning --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/a43c45687bee0664320d95d80d3e7a94940f1f0a4aba277a159d8b3eeefd4d41/userdata/oci-log --conmon-pidfile /run/containers/storage/overlay-containers/a43c45687bee0664320d95d80d3e7a94940f1f0a4aba277a159d8b3eeefd4d41/userdata/conmon.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --transient-store=false --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg a43c45687bee0664320d95d80d3e7a94940f1f0a4aba277a159d8b3eeefd4d41
           │ └─libpod-fe7013c5595cf36029c5acd08a92109de37036b8a083179769dbc4e27b30aaf9.scope
           │   └─container
           │     ├─236491 dumb-init --single-child -- kolla_start
           │     ├─236494 "ceilometer-polling: master process [/usr/bin/ceilometer-polling --polling-namespaces compute --logfile /dev/stdout]"
           │     └─236558 "ceilometer-polling: AgentManager worker(0)"
           ├─system.slice
           │ ├─NetworkManager.service
           │ │ └─5981 /usr/sbin/NetworkManager --no-daemon
           │ ├─auditd.service
           │ │ ├─726 /sbin/auditd
           │ │ └─728 /usr/sbin/sedispatch
           │ ├─chronyd.service
           │ │ └─139217 /usr/sbin/chronyd -F 2
           │ ├─crond.service
           │ │ └─1139 /usr/sbin/crond -n
           │ ├─dbus-broker.service
           │ │ ├─751 /usr/bin/dbus-broker-launch --scope system --audit
           │ │ └─755 dbus-broker --log 4 --controller 9 --machine-id c0212a8b024a111cfc61293864f36c87 --max-bytes 536870912 --max-fds 4096 --max-matches 131072 --audit
           │ ├─edpm_ceilometer_agent_compute.service
           │ │ └─236489 /usr/bin/conmon --api-version 1 -c fe7013c5595cf36029c5acd08a92109de37036b8a083179769dbc4e27b30aaf9 -u fe7013c5595cf36029c5acd08a92109de37036b8a083179769dbc4e27b30aaf9 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/fe7013c5595cf36029c5acd08a92109de37036b8a083179769dbc4e27b30aaf9/userdata -p /run/containers/storage/overlay-containers/fe7013c5595cf36029c5acd08a92109de37036b8a083179769dbc4e27b30aaf9/userdata/pidfile -n ceilometer_agent_compute --exit-dir /run/libpod/exits --full-attach -s -l journald --log-level warning --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/fe7013c5595cf36029c5acd08a92109de37036b8a083179769dbc4e27b30aaf9/userdata/oci-log --conmon-pidfile /run/ceilometer_agent_compute.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --transient-store=false --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg fe7013c5595cf36029c5acd08a92109de37036b8a083179769dbc4e27b30aaf9
           │ ├─edpm_neutron_dhcp_agent.service
           │ │ └─263819 /usr/bin/conmon --api-version 1 -c bdeaed09d67af00ebcf3af49678b8d850a09c9b8240ebddb072ba501276f201d -u bdeaed09d67af00ebcf3af49678b8d850a09c9b8240ebddb072ba501276f201d -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/bdeaed09d67af00ebcf3af49678b8d850a09c9b8240ebddb072ba501276f201d/userdata -p /run/containers/storage/overlay-containers/bdeaed09d67af00ebcf3af49678b8d850a09c9b8240ebddb072ba501276f201d/userdata/pidfile -n neutron_dhcp_agent --exit-dir /run/libpod/exits --full-attach -s -l journald --log-level warning --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/bdeaed09d67af00ebcf3af49678b8d850a09c9b8240ebddb072ba501276f201d/userdata/oci-log --conmon-pidfile /run/neutron_dhcp_agent.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --transient-store=false --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg bdeaed09d67af00ebcf3af49678b8d850a09c9b8240ebddb072ba501276f201d
           │ ├─edpm_neutron_sriov_agent.service
           │ │ └─256425 /usr/bin/conmon --api-version 1 -c 3648631d61f37afb17aa928ef74b239e7715b60e5cab331e4a494567755448eb -u 3648631d61f37afb17aa928ef74b239e7715b60e5cab331e4a494567755448eb -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/3648631d61f37afb17aa928ef74b239e7715b60e5cab331e4a494567755448eb/userdata -p /run/containers/storage/overlay-containers/3648631d61f37afb17aa928ef74b239e7715b60e5cab331e4a494567755448eb/userdata/pidfile -n neutron_sriov_agent --exit-dir /run/libpod/exits --full-attach -s -l journald --log-level warning --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/3648631d61f37afb17aa928ef74b239e7715b60e5cab331e4a494567755448eb/userdata/oci-log --conmon-pidfile /run/neutron_sriov_agent.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --transient-store=false --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg 3648631d61f37afb17aa928ef74b239e7715b60e5cab331e4a494567755448eb
           │ ├─edpm_node_exporter.service
           │ │ └─238753 /usr/bin/conmon --api-version 1 -c cd17978adeb2b0468483202f98a4facd0357edbe019deb13313adab6b6b71f52 -u cd17978adeb2b0468483202f98a4facd0357edbe019deb13313adab6b6b71f52 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/cd17978adeb2b0468483202f98a4facd0357edbe019deb13313adab6b6b71f52/userdata -p /run/containers/storage/overlay-containers/cd17978adeb2b0468483202f98a4facd0357edbe019deb13313adab6b6b71f52/userdata/pidfile -n node_exporter --exit-dir /run/libpod/exits --full-attach -s -l journald --log-level warning --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/cd17978adeb2b0468483202f98a4facd0357edbe019deb13313adab6b6b71f52/userdata/oci-log --conmon-pidfile /run/node_exporter.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --transient-store=false --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg cd17978adeb2b0468483202f98a4facd0357edbe019deb13313adab6b6b71f52
           │ ├─edpm_nova_compute.service
           │ │ └─280690 /usr/bin/conmon --api-version 1 -c 2812510312ec1c5f807144229711d7908bd35d002349ffc7e5627b311d4edf79 -u 2812510312ec1c5f807144229711d7908bd35d002349ffc7e5627b311d4edf79 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/2812510312ec1c5f807144229711d7908bd35d002349ffc7e5627b311d4edf79/userdata -p /run/containers/storage/overlay-containers/2812510312ec1c5f807144229711d7908bd35d002349ffc7e5627b311d4edf79/userdata/pidfile -n nova_compute --exit-dir /run/libpod/exits --full-attach -s -l journald --log-level warning --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/2812510312ec1c5f807144229711d7908bd35d002349ffc7e5627b311d4edf79/userdata/oci-log --conmon-pidfile /run/nova_compute.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --transient-store=false --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg 2812510312ec1c5f807144229711d7908bd35d002349ffc7e5627b311d4edf79
           │ ├─edpm_openstack_network_exporter.service
           │ │ └─243610 /usr/bin/conmon --api-version 1 -c 6a5bdcbb336925d11e98e079f4b8b7828b30e368d9b142bcbf78246faf17115e -u 6a5bdcbb336925d11e98e079f4b8b7828b30e368d9b142bcbf78246faf17115e -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/6a5bdcbb336925d11e98e079f4b8b7828b30e368d9b142bcbf78246faf17115e/userdata -p /run/containers/storage/overlay-containers/6a5bdcbb336925d11e98e079f4b8b7828b30e368d9b142bcbf78246faf17115e/userdata/pidfile -n openstack_network_exporter --exit-dir /run/libpod/exits --full-attach -s -l journald --log-level warning --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/6a5bdcbb336925d11e98e079f4b8b7828b30e368d9b142bcbf78246faf17115e/userdata/oci-log --conmon-pidfile /run/openstack_network_exporter.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --transient-store=false --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg 6a5bdcbb336925d11e98e079f4b8b7828b30e368d9b142bcbf78246faf17115e
           │ ├─edpm_ovn_controller.service
           │ │ └─156066 /usr/bin/conmon --api-version 1 -c 122635f0e8d39054eca80d2e540d00d16b4a0e4979d56e5a2e398526a74c57c4 -u 122635f0e8d39054eca80d2e540d00d16b4a0e4979d56e5a2e398526a74c57c4 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/122635f0e8d39054eca80d2e540d00d16b4a0e4979d56e5a2e398526a74c57c4/userdata -p /run/containers/storage/overlay-containers/122635f0e8d39054eca80d2e540d00d16b4a0e4979d56e5a2e398526a74c57c4/userdata/pidfile -n ovn_controller --exit-dir /run/libpod/exits --full-attach -s -l journald --log-level warning --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/122635f0e8d39054eca80d2e540d00d16b4a0e4979d56e5a2e398526a74c57c4/userdata/oci-log --conmon-pidfile /run/ovn_controller.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --transient-store=false --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg 122635f0e8d39054eca80d2e540d00d16b4a0e4979d56e5a2e398526a74c57c4
           │ ├─edpm_ovn_metadata_agent.service
           │ │ └─161939 /usr/bin/conmon --api-version 1 -c 8ca16bfb2f59ef8fc3fb8572c3270f7856db7f62fc874baed2fd5084beb7f01e -u 8ca16bfb2f59ef8fc3fb8572c3270f7856db7f62fc874baed2fd5084beb7f01e -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/8ca16bfb2f59ef8fc3fb8572c3270f7856db7f62fc874baed2fd5084beb7f01e/userdata -p /run/containers/storage/overlay-containers/8ca16bfb2f59ef8fc3fb8572c3270f7856db7f62fc874baed2fd5084beb7f01e/userdata/pidfile -n ovn_metadata_agent --exit-dir /run/libpod/exits --full-attach -s -l journald --log-level warning --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/8ca16bfb2f59ef8fc3fb8572c3270f7856db7f62fc874baed2fd5084beb7f01e/userdata/oci-log --conmon-pidfile /run/ovn_metadata_agent.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --transient-store=false --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg 8ca16bfb2f59ef8fc3fb8572c3270f7856db7f62fc874baed2fd5084beb7f01e
           │ ├─edpm_podman_exporter.service
           │ │ └─241164 /usr/bin/conmon --api-version 1 -c c0dd95da0b8090e1dfefd899739a6f9a1e0b730eec9db8d0d0094fa6bfc519c8 -u c0dd95da0b8090e1dfefd899739a6f9a1e0b730eec9db8d0d0094fa6bfc519c8 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/c0dd95da0b8090e1dfefd899739a6f9a1e0b730eec9db8d0d0094fa6bfc519c8/userdata -p /run/containers/storage/overlay-containers/c0dd95da0b8090e1dfefd899739a6f9a1e0b730eec9db8d0d0094fa6bfc519c8/userdata/pidfile -n podman_exporter --exit-dir /run/libpod/exits --full-attach -s -l journald --log-level warning --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/c0dd95da0b8090e1dfefd899739a6f9a1e0b730eec9db8d0d0094fa6bfc519c8/userdata/oci-log --conmon-pidfile /run/podman_exporter.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --transient-store=false --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg c0dd95da0b8090e1dfefd899739a6f9a1e0b730eec9db8d0d0094fa6bfc519c8
           │ ├─gssproxy.service
           │ │ └─802 /usr/sbin/gssproxy -D
           │ ├─irqbalance.service
           │ │ └─758 /usr/sbin/irqbalance --foreground
           │ ├─iscsid.service
           │ │ └─212770 /usr/sbin/iscsid -f
           │ ├─multipathd.service
           │ │ └─218083 /sbin/multipathd -d -s
           │ ├─ovs-vswitchd.service
           │ │ └─22519 ovs-vswitchd unix:/var/run/openvswitch/db.sock -vconsole:emer -vsyslog:err -vfile:info --mlockall --user openvswitch:hugetlbfs --no-chdir --log-file=/var/log/openvswitch/ovs-vswitchd.log --pidfile=/var/run/openvswitch/ovs-vswitchd.pid --detach
           │ ├─ovsdb-server.service
           │ │ └─22437 ovsdb-server /etc/openvswitch/conf.db -vconsole:emer -vsyslog:err -vfile:info --remote=punix:/var/run/openvswitch/db.sock --private-key=db:Open_vSwitch,SSL,private_key --certificate=db:Open_vSwitch,SSL,certificate --bootstrap-ca-cert=db:Open_vSwitch,SSL,ca_cert --user openvswitch:hugetlbfs --no-chdir --log-file=/var/log/openvswitch/ovsdb-server.log --pidfile=/var/run/openvswitch/ovsdb-server.pid --detach
           │ ├─podman.service
           │ │ └─241175 /usr/bin/podman --log-level=info system service
           │ ├─polkit.service
           │ │ └─1036 /usr/lib/polkit-1/polkitd --no-debug
           │ ├─rhsm.service
           │ │ └─6643 /usr/bin/python3 /usr/libexec/rhsm-service
           │ ├─rhsmcertd.service
           │ │ └─795 /usr/bin/rhsmcertd
           │ ├─rpcbind.service
           │ │ └─724 /usr/bin/rpcbind -w -f
           │ ├─rsyslog.service
           │ │ └─759 /usr/sbin/rsyslogd -n
           │ ├─snmpd.service
           │ │ └─67626 /usr/sbin/snmpd -LS0-5d -f
           │ ├─sshd.service
           │ │ └─186135 "sshd: /usr/sbin/sshd -D [listener] 0 of 10-100 startups"
           │ ├─system-ceph\x2df1fea371\x2dcb69\x2d578d\x2da3d0\x2db5c472a84b46.slice
           │ │ ├─ceph-f1fea371-cb69-578d-a3d0-b5c472a84b46@crash.np0005626466.service
           │ │ │ ├─libpod-payload-975e762c1132156a568538ebdacd7315274fa978e3e6da81d658e2351a66b71e
           │ │ │ │ ├─29138 /run/podman-init -- /usr/bin/ceph-crash -n client.crash.np0005626466
           │ │ │ │ └─29140 /usr/bin/python3 -s /usr/bin/ceph-crash -n client.crash.np0005626466
           │ │ │ └─runtime
           │ │ │   └─29136 /usr/bin/conmon --api-version 1 -c 975e762c1132156a568538ebdacd7315274fa978e3e6da81d658e2351a66b71e -u 975e762c1132156a568538ebdacd7315274fa978e3e6da81d658e2351a66b71e -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/975e762c1132156a568538ebdacd7315274fa978e3e6da81d658e2351a66b71e/userdata -p /run/containers/storage/overlay-containers/975e762c1132156a568538ebdacd7315274fa978e3e6da81d658e2351a66b71e/userdata/pidfile -n ceph-f1fea371-cb69-578d-a3d0-b5c472a84b46-crash-np0005626466 --exit-dir /run/libpod/exits --full-attach -l journald --log-level warning --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/975e762c1132156a568538ebdacd7315274fa978e3e6da81d658e2351a66b71e/userdata/oci-log --conmon-pidfile /run/ceph-f1fea371-cb69-578d-a3d0-b5c472a84b46@crash.np0005626466.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --transient-store=false --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --rm --exit-command-arg 975e762c1132156a568538ebdacd7315274fa978e3e6da81d658e2351a66b71e
           │ │ ├─ceph-f1fea371-cb69-578d-a3d0-b5c472a84b46@mds.mds.np0005626466.vaywlp.service
           │ │ │ ├─libpod-payload-13d86d0fc7f1279ceb33114723f84eb57a13a8060cd7e1f6fc0b04a1a71bdedf
           │ │ │ │ ├─285095 /run/podman-init -- /usr/bin/ceph-mds -n mds.mds.np0005626466.vaywlp -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
           │ │ │ │ └─285097 /usr/bin/ceph-mds -n mds.mds.np0005626466.vaywlp -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
           │ │ │ └─runtime
           │ │ │   └─285093 /usr/bin/conmon --api-version 1 -c 13d86d0fc7f1279ceb33114723f84eb57a13a8060cd7e1f6fc0b04a1a71bdedf -u 13d86d0fc7f1279ceb33114723f84eb57a13a8060cd7e1f6fc0b04a1a71bdedf -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/13d86d0fc7f1279ceb33114723f84eb57a13a8060cd7e1f6fc0b04a1a71bdedf/userdata -p /run/containers/storage/overlay-containers/13d86d0fc7f1279ceb33114723f84eb57a13a8060cd7e1f6fc0b04a1a71bdedf/userdata/pidfile -n ceph-f1fea371-cb69-578d-a3d0-b5c472a84b46-mds-mds-np0005626466-vaywlp --exit-dir /run/libpod/exits --full-attach -l journald --log-level warning --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/13d86d0fc7f1279ceb33114723f84eb57a13a8060cd7e1f6fc0b04a1a71bdedf/userdata/oci-log --conmon-pidfile /run/ceph-f1fea371-cb69-578d-a3d0-b5c472a84b46@mds.mds.np0005626466.vaywlp.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --transient-store=false --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --rm --exit-command-arg 13d86d0fc7f1279ceb33114723f84eb57a13a8060cd7e1f6fc0b04a1a71bdedf
           │ │ ├─ceph-f1fea371-cb69-578d-a3d0-b5c472a84b46@mgr.np0005626466.nisqfq.service
           │ │ │ ├─libpod-payload-9f7e89f74412aa4d39adf99ebe74e72087671d5f3c70fe114ec0db57835e5bcf
           │ │ │ │ ├─286340 /run/podman-init -- /usr/bin/ceph-mgr -n mgr.np0005626466.nisqfq -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
           │ │ │ │ └─286342 /usr/bin/ceph-mgr -n mgr.np0005626466.nisqfq -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
           │ │ │ └─runtime
           │ │ │   └─286338 /usr/bin/conmon --api-version 1 -c 9f7e89f74412aa4d39adf99ebe74e72087671d5f3c70fe114ec0db57835e5bcf -u 9f7e89f74412aa4d39adf99ebe74e72087671d5f3c70fe114ec0db57835e5bcf -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/9f7e89f74412aa4d39adf99ebe74e72087671d5f3c70fe114ec0db57835e5bcf/userdata -p /run/containers/storage/overlay-containers/9f7e89f74412aa4d39adf99ebe74e72087671d5f3c70fe114ec0db57835e5bcf/userdata/pidfile -n ceph-f1fea371-cb69-578d-a3d0-b5c472a84b46-mgr-np0005626466-nisqfq --exit-dir /run/libpod/exits --full-attach -l journald --log-level warning --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/9f7e89f74412aa4d39adf99ebe74e72087671d5f3c70fe114ec0db57835e5bcf/userdata/oci-log --conmon-pidfile /run/ceph-f1fea371-cb69-578d-a3d0-b5c472a84b46@mgr.np0005626466.nisqfq.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --transient-store=false --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --rm --exit-command-arg 9f7e89f74412aa4d39adf99ebe74e72087671d5f3c70fe114ec0db57835e5bcf
           │ │ ├─ceph-f1fea371-cb69-578d-a3d0-b5c472a84b46@mon.np0005626466.service
           │ │ │ ├─libpod-payload-2ceecf5aca3e1844048535c9c84c12fc98d118507e171300ca4a76145722e20a
           │ │ │ │ ├─300839 /run/podman-init -- /usr/bin/ceph-mon -n mon.np0005626466 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false
           │ │ │ │ └─300841 /usr/bin/ceph-mon -n mon.np0005626466 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false
           │ │ │ └─runtime
           │ │ │   └─300837 /usr/bin/conmon --api-version 1 -c 2ceecf5aca3e1844048535c9c84c12fc98d118507e171300ca4a76145722e20a -u 2ceecf5aca3e1844048535c9c84c12fc98d118507e171300ca4a76145722e20a -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/2ceecf5aca3e1844048535c9c84c12fc98d118507e171300ca4a76145722e20a/userdata -p /run/containers/storage/overlay-containers/2ceecf5aca3e1844048535c9c84c12fc98d118507e171300ca4a76145722e20a/userdata/pidfile -n ceph-f1fea371-cb69-578d-a3d0-b5c472a84b46-mon-np0005626466 --exit-dir /run/libpod/exits --full-attach -l journald --log-level warning --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/2ceecf5aca3e1844048535c9c84c12fc98d118507e171300ca4a76145722e20a/userdata/oci-log --conmon-pidfile /run/ceph-f1fea371-cb69-578d-a3d0-b5c472a84b46@mon.np0005626466.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --transient-store=false --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --rm --exit-command-arg 2ceecf5aca3e1844048535c9c84c12fc98d118507e171300ca4a76145722e20a
           │ │ ├─ceph-f1fea371-cb69-578d-a3d0-b5c472a84b46@osd.1.service
           │ │ │ ├─libpod-payload-f272037b1a08b3104e31c243e058b49b6cfe9c3898252c2d34ed954af84eea3b
           │ │ │ │ ├─31871 /run/podman-init -- /usr/bin/ceph-osd -n osd.1 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
           │ │ │ │ └─31873 /usr/bin/ceph-osd -n osd.1 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
           │ │ │ └─runtime
           │ │ │   └─31869 /usr/bin/conmon --api-version 1 -c f272037b1a08b3104e31c243e058b49b6cfe9c3898252c2d34ed954af84eea3b -u f272037b1a08b3104e31c243e058b49b6cfe9c3898252c2d34ed954af84eea3b -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/f272037b1a08b3104e31c243e058b49b6cfe9c3898252c2d34ed954af84eea3b/userdata -p /run/containers/storage/overlay-containers/f272037b1a08b3104e31c243e058b49b6cfe9c3898252c2d34ed954af84eea3b/userdata/pidfile -n ceph-f1fea371-cb69-578d-a3d0-b5c472a84b46-osd-1 --exit-dir /run/libpod/exits --full-attach -l journald --log-level warning --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/f272037b1a08b3104e31c243e058b49b6cfe9c3898252c2d34ed954af84eea3b/userdata/oci-log --conmon-pidfile /run/ceph-f1fea371-cb69-578d-a3d0-b5c472a84b46@osd.1.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --transient-store=false --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --rm --exit-command-arg f272037b1a08b3104e31c243e058b49b6cfe9c3898252c2d34ed954af84eea3b
           │ │ └─ceph-f1fea371-cb69-578d-a3d0-b5c472a84b46@osd.4.service
           │ │   ├─libpod-payload-149ab20d951c6d638af0c8d29ce496a98dac476b4b563eca8217acb3595a760e
           │ │   │ ├─32811 /run/podman-init -- /usr/bin/ceph-osd -n osd.4 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
           │ │   │ └─32813 /usr/bin/ceph-osd -n osd.4 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
           │ │   └─runtime
           │ │     └─32809 /usr/bin/conmon --api-version 1 -c 149ab20d951c6d638af0c8d29ce496a98dac476b4b563eca8217acb3595a760e -u 149ab20d951c6d638af0c8d29ce496a98dac476b4b563eca8217acb3595a760e -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/149ab20d951c6d638af0c8d29ce496a98dac476b4b563eca8217acb3595a760e/userdata -p /run/containers/storage/overlay-containers/149ab20d951c6d638af0c8d29ce496a98dac476b4b563eca8217acb3595a760e/userdata/pidfile -n ceph-f1fea371-cb69-578d-a3d0-b5c472a84b46-osd-4 --exit-dir /run/libpod/exits --full-attach -l journald --log-level warning --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/149ab20d951c6d638af0c8d29ce496a98dac476b4b563eca8217acb3595a760e/userdata/oci-log --conmon-pidfile /run/ceph-f1fea371-cb69-578d-a3d0-b5c472a84b46@osd.4.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --transient-store=false --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --rm --exit-command-arg 149ab20d951c6d638af0c8d29ce496a98dac476b4b563eca8217acb3595a760e
           │ ├─system-getty.slice
           │ │ └─getty@tty1.service
           │ │   └─1140 /sbin/agetty -o "-p -- \\u" --noclear - linux
           │ ├─system-serial\x2dgetty.slice
           │ │ └─serial-getty@ttyS0.service
           │ │   └─1142 /sbin/agetty -o "-p -- \\u" --keep-baud 115200,57600,38400,9600 - vt220
           │ ├─systemd-hostnamed.service
           │ │ └─334843 /usr/lib/systemd/systemd-hostnamed
           │ ├─systemd-journald.sUnit boot.automount could not be found.
ervice
           │ │ └─47881 /usr/lib/systemd/systemd-journald
           │ ├─systemd-logind.service
           │ │ └─760 /usr/lib/systemd/systemd-logind
           │ ├─systemd-machined.service
           │ │ └─205757 /usr/lib/systemd/systemd-machined
           │ ├─systemd-udevd.service
           │ │ └─udev
           │ │   ├─ 47889 /usr/lib/systemd/systemd-udevd
           │ │   └─339919 /usr/lib/systemd/systemd-udevd
           │ ├─tuned.service
           │ │ └─125602 /usr/bin/python3 -Es /usr/sbin/tuned -l -P
           │ ├─virtlogd.service
           │ │ └─205249 /usr/sbin/virtlogd
           │ ├─virtnodedevd.service
           │ │ └─229338 /usr/sbin/virtnodedevd --timeout 120
           │ ├─virtqemud.service
           │ │ └─229010 /usr/sbin/virtqemud --timeout 120
           │ └─virtsecretd.service
           │   └─323059 /usr/sbin/virtsecretd --timeout 120
           └─user.slice
             ├─user-1000.slice
             │ ├─session-1.scope
             │ │ └─4336 /usr/bin/python3
             │ ├─session-84.scope
             │ │ ├─331435 "sshd: zuul [priv]"
             │ │ ├─331438 "sshd: zuul@notty"
             │ │ ├─331439 sudo bash -c "rm -rf /var/tmp/sos-osp && mkdir /var/tmp/sos-osp && sos report --batch --all-logs --tmp-dir=/var/tmp/sos-osp  -p container,openstack_edpm,system,storage,virt"
             │ │ ├─331456 /usr/bin/python3 -s /sbin/sos report --batch --all-logs --tmp-dir=/var/tmp/sos-osp -p container,openstack_edpm,system,storage,virt
             │ │ ├─338387 timeout 15s turbostat --debug sleep 10
             │ │ ├─338389 turbostat --debug sleep 10
             │ │ ├─338406 sleep 10
             │ │ ├─339922 timeout 300s tuned-adm recommend
             │ │ ├─339923 /usr/bin/python3 -Es /usr/sbin/tuned-adm recommend
             │ │ ├─339926 timeout 300s ceph time-sync-status --format json-pretty
             │ │ ├─339927 /usr/bin/python3 -s /usr/bin/ceph time-sync-status --format json-pretty
             │ │ ├─340111 timeout 300s systemctl status --all
             │ │ └─340118 systemctl status --all
             │ └─user@1000.service
             │   ├─app.slice
             │   │ └─dbus-broker.service
             │   │   ├─14084 /usr/bin/dbus-broker-launch --scope user
             │   │   └─14097 dbus-broker --log 4 --controller 9 --machine-id c0212a8b024a111cfc61293864f36c87 --max-bytes 100000000000000 --max-fds 25000000000000 --max-matches 5000000000
             │   ├─init.scope
             │   │ ├─4178 /usr/lib/systemd/systemd --user
             │   │ └─4180 "(sd-pam)"
             │   └─user.slice
             │     └─podman-pause-de66dbe9.scope
             │       └─13932 podman
             └─user-1002.slice
               ├─session-71.scope
               │ ├─303883 "sshd: ceph-admin [priv]"
               │ └─303886 "sshd: ceph-admin@notty"
               └─user@1002.service
                 └─init.scope
                   ├─26358 /usr/lib/systemd/systemd --user
                   └─26360 "(sd-pam)"

● efi.automount - EFI System Partition Automount
     Loaded: loaded (/run/systemd/generator.late/efi.automount; generated)
     Active: active (running) since Mon 2026-02-23 06:36:03 UTC; 3h 34min ago
      Until: Mon 2026-02-23 06:36:03 UTC; 3h 34min ago
   Triggers: ● efi.mount
      Where: /efi
       Docs: man:systemd-gpt-auto-generator(8)

Feb 23 06:36:03 localhost systemd[1]: Set up automount EFI System Partition Automount.
Feb 23 06:36:04 localhost systemd[1]: efi.automount: Got automount request for /efi, triggered by 717 (bootctl)
Feb 23 10:10:27 np0005626466.localdomain systemd[1]: efi.automount: Got automount request for /efi, triggered by 331906 (lsinitrd)

● proc-sys-fs-binfmt_misc.automount - Arbitrary Executable File Formats File System Automount Point
     Loaded: loaded (/usr/lib/systemd/system/proc-sys-fs-binfmt_misc.automount; static)
     Active: active (running) since Mon 2026-02-23 06:36:02 UTC; 3h 34min ago
      Until: Mon 2026-02-23 06:36:02 UTC; 3h 34min ago
   Triggers: ● proc-sys-fs-binfmt_misc.mount
      Where: /proc/sys/fs/binfmt_misc
       Docs: https://docs.kernel.org/admin-guide/binfmt-misc.html
             https://www.freedesktop.org/wiki/Software/systemd/APIFileSystems

Feb 23 07:37:40 np0005626466.localdomain systemd[1]: proc-sys-fs-binfmt_misc.automount: Got automount request for /proc/sys/fs/binfmt_misc, triggered by 26782 (sysctl)

● dev-cdrom.device - QEMU_DVD-ROM config-2
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:01.1-ata1-host0-target0:0:0-0:0:0:0-block-sr0.device
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-23 06:36:03 UTC; 3h 34min ago
      Until: Mon 2026-02-23 06:36:03 UTC; 3h 34min ago
     Device: /sys/devices/pci0000:00/0000:00:01.1/ata1/host0/target0:0:0/0:0:0:0/block/sr0

● dev-ceph_vg0-ceph_lv0.device - /dev/ceph_vg0/ceph_lv0
    Follows: unit currently follows state of sys-devices-virtual-block-dm\x2d0.device
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-23 07:34:16 UTC; 2h 36min ago
      Until: Mon 2026-02-23 07:34:16 UTC; 2h 36min ago
     Device: /sys/devices/virtual/block/dm-0

● dev-ceph_vg1-ceph_lv1.device - /dev/ceph_vg1/ceph_lv1
    Follows: unit currently follows state of sys-devices-virtual-block-dm\x2d1.device
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-23 07:34:24 UTC; 2h 36min ago
      Until: Mon 2026-02-23 07:34:24 UTC; 2h 36min ago
     Device: /sys/devices/virtual/block/dm-1

● dev-disk-by\x2ddiskseq-1.device - /dev/disk/by-diskseq/1
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:04.0-virtio2-block-vda.device
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-23 06:36:03 UTC; 3h 34min ago
      Until: Mon 2026-02-23 06:36:03 UTC; 3h 34min ago
     Device: /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda

● dev-disk-by\x2ddiskseq-3.device - QEMU_DVD-ROM config-2
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:01.1-ata1-host0-target0:0:0-0:0:0:0-block-sr0.device
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-23 06:36:03 UTC; 3h 34min ago
      Until: Mon 2026-02-23 06:36:03 UTC; 3h 34min ago
     Device: /sys/devices/pci0000:00/0000:00:01.1/ata1/host0/target0:0:0/0:0:0:0/block/sr0

● dev-disk-by\x2ddiskseq-4.device - /dev/disk/by-diskseq/4
    Follows: unit currently follows state of sys-devices-virtual-block-loop3.device
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-23 07:34:15 UTC; 2h 36min ago
      Until: Mon 2026-02-23 07:34:15 UTC; 2h 36min ago
     Device: /sys/devices/virtual/block/loop3

● dev-disk-by\x2ddiskseq-5.device - /dev/disk/by-diskseq/5
    Follows: unit currently follows state of sys-devices-virtual-block-dm\x2d0.device
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-23 09:26:01 UTC; 45min ago
      Until: Mon 2026-02-23 09:26:01 UTC; 45min ago
     Device: /sys/devices/virtual/block/dm-0

● dev-disk-by\x2ddiskseq-6.device - /dev/disk/by-diskseq/6
    Follows: unit currently follows state of sys-devices-virtual-block-loop4.device
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-23 07:34:23 UTC; 2h 36min ago
      Until: Mon 2026-02-23 07:34:23 UTC; 2h 36min ago
     Device: /sys/devices/virtual/block/loop4

● dev-disk-by\x2ddiskseq-7.device - /dev/disk/by-diskseq/7
    Follows: unit currently follows state of sys-devices-virtual-block-dm\x2d1.device
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-23 09:26:01 UTC; 45min ago
      Until: Mon 2026-02-23 09:26:01 UTC; 45min ago
     Device: /sys/devices/virtual/block/dm-1

● dev-disk-by\x2did-ata\x2dQEMU_DVD\x2dROM_QM00001.device - QEMU_DVD-ROM config-2
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:01.1-ata1-host0-target0:0:0-0:0:0:0-block-sr0.device
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-23 06:36:03 UTC; 3h 34min ago
      Until: Mon 2026-02-23 06:36:03 UTC; 3h 34min ago
     Device: /sys/devices/pci0000:00/0000:00:01.1/ata1/host0/target0:0:0/0:0:0:0/block/sr0

● dev-disk-by\x2did-dm\x2dname\x2dceph_vg0\x2dceph_lv0.device - /dev/disk/by-id/dm-name-ceph_vg0-ceph_lv0
    Follows: unit currently follows state of sys-devices-virtual-block-dm\x2d0.device
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-23 07:34:16 UTC; 2h 36min ago
      Until: Mon 2026-02-23 07:34:16 UTC; 2h 36min ago
     Device: /sys/devices/virtual/block/dm-0

● dev-disk-by\x2did-dm\x2dname\x2dceph_vg1\x2dceph_lv1.device - /dev/disk/by-id/dm-name-ceph_vg1-ceph_lv1
    Follows: unit currently follows state of sys-devices-virtual-block-dm\x2d1.device
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-23 07:34:24 UTC; 2h 36min ago
      Until: Mon 2026-02-23 07:34:24 UTC; 2h 36min ago
     Device: /sys/devices/virtual/block/dm-1

● dev-disk-by\x2did-dm\x2duuid\x2dLVM\x2d51BBmRbYIgIQgJfpFDms7Tf4qjsYQ2wvle24cklYZmurby38lAqYLLsm9sc8T81v.device - /dev/disk/by-id/dm-uuid-LVM-51BBmRbYIgIQgJfpFDms7Tf4qjsYQ2wvle24cklYZmurby38lAqYLLsm9sc8T81v
    Follows: unit currently follows state of sys-devices-virtual-block-dm\x2d1.device
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-23 07:34:24 UTC; 2h 36min ago
      Until: Mon 2026-02-23 07:34:24 UTC; 2h 36min ago
     Device: /sys/devices/virtual/block/dm-1

● dev-disk-by\x2did-dm\x2duuid\x2dLVM\x2dUGQ8PeNfDFIxzovyHE6WvW6DeUYlbp1mIEuZ3yCOoCrsfC7nF81svEzSECtbu8Zn.device - /dev/disk/by-id/dm-uuid-LVM-UGQ8PeNfDFIxzovyHE6WvW6DeUYlbp1mIEuZ3yCOoCrsfC7nF81svEzSECtbu8Zn
    Follows: unit currently follows state of sys-devices-virtual-block-dm\x2d0.device
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-23 07:34:16 UTC; 2h 36min ago
      Until: Mon 2026-02-23 07:34:16 UTC; 2h 36min ago
     Device: /sys/devices/virtual/block/dm-0

● dev-disk-by\x2did-lvm\x2dpv\x2duuid\x2dIeAeNB\x2dIciF\x2dgTHh\x2dHQlW\x2drP1R\x2d4nBn\x2deaVFBv.device - /dev/disk/by-id/lvm-pv-uuid-IeAeNB-IciF-gTHh-HQlW-rP1R-4nBn-eaVFBv
    Follows: unit currently follows state of sys-devices-virtual-block-loop3.device
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-23 07:34:16 UTC; 2h 36min ago
      Until: Mon 2026-02-23 07:34:16 UTC; 2h 36min ago
     Device: /sys/devices/virtual/block/loop3

● dev-disk-by\x2did-lvm\x2dpv\x2duuid\x2dZAh3ZL\x2dVFGf\x2dSPvZ\x2dTgpe\x2do3OF\x2deWpX\x2dNDy8cd.device - /dev/disk/by-id/lvm-pv-uuid-ZAh3ZL-VFGf-SPvZ-Tgpe-o3OF-eWpX-NDy8cd
    Follows: unit currently follows state of sys-devices-virtual-block-loop4.device
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-23 07:34:24 UTC; 2h 36min ago
      Until: Mon 2026-02-23 07:34:24 UTC; 2h 36min ago
     Device: /sys/devices/virtual/block/loop4

● dev-disk-by\x2dlabel-boot.device - /dev/disk/by-label/boot
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:04.0-virtio2-block-vda-vda3.device
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-23 06:36:03 UTC; 3h 34min ago
      Until: Mon 2026-02-23 06:36:03 UTC; 3h 34min ago
     Device: /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda/vda3

● dev-disk-by\x2dlabel-config\x2d2.device - QEMU_DVD-ROM config-2
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:01.1-ata1-host0-target0:0:0-0:0:0:0-block-sr0.device
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-23 06:36:03 UTC; 3h 34min ago
      Until: Mon 2026-02-23 06:36:03 UTC; 3h 34min ago
     Device: /sys/devices/pci0000:00/0000:00:01.1/ata1/host0/target0:0:0/0:0:0:0/block/sr0

● dev-disk-by\x2dlabel-root.device - /dev/disk/by-label/root
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:04.0-virtio2-block-vda-vda4.device
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-23 06:36:03 UTC; 3h 34min ago
      Until: Mon 2026-02-23 06:36:03 UTC; 3h 34min ago
     Device: /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda/vda4

● dev-disk-by\x2dpartuuid-6264d520\x2d3fb9\x2d423f\x2d8ab8\x2d7a0a8e3d3562.device - /dev/disk/by-partuuid/6264d520-3fb9-423f-8ab8-7a0a8e3d3562
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:04.0-virtio2-block-vda-vda4.device
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-23 06:36:03 UTC; 3h 34min ago
      Until: Mon 2026-02-23 06:36:03 UTC; 3h 34min ago
     Device: /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda/vda4

● dev-disk-by\x2dpartuuid-68b2905b\x2ddf3e\x2d4fb3\x2d80fa\x2d49d1e773aa33.device - /dev/disk/by-partuuid/68b2905b-df3e-4fb3-80fa-49d1e773aa33
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:04.0-virtio2-block-vda-vda2.device
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-23 06:36:03 UTC; 3h 34min ago
      Until: Mon 2026-02-23 06:36:03 UTC; 3h 34min ago
     Device: /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda/vda2

● dev-disk-by\x2dpartuuid-cb07c243\x2dbc44\x2d4717\x2d853e\x2d28852021225b.device - /dev/disk/by-partuuid/cb07c243-bc44-4717-853e-28852021225b
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:04.0-virtio2-block-vda-vda3.device
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-23 06:36:03 UTC; 3h 34min ago
      Until: Mon 2026-02-23 06:36:03 UTC; 3h 34min ago
     Device: /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda/vda3

● dev-disk-by\x2dpartuuid-fac7f1fb\x2d3e8d\x2d4137\x2da512\x2d961de09a5549.device - /dev/disk/by-partuuid/fac7f1fb-3e8d-4137-a512-961de09a5549
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:04.0-virtio2-block-vda-vda1.device
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-23 06:36:03 UTC; 3h 34min ago
      Until: Mon 2026-02-23 06:36:03 UTC; 3h 34min ago
     Device: /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda/vda1

● dev-disk-by\x2dpath-pci\x2d0000:00:01.1\x2data\x2d1.0.device - QEMU_DVD-ROM config-2
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:01.1-ata1-host0-target0:0:0-0:0:0:0-block-sr0.device
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-23 06:36:03 UTC; 3h 34min ago
      Until: Mon 2026-02-23 06:36:03 UTC; 3h 34min ago
     Device: /sys/devices/pci0000:00/0000:00:01.1/ata1/host0/target0:0:0/0:0:0:0/block/sr0

● dev-disk-by\x2dpath-pci\x2d0000:00:01.1\x2data\x2d1.device - QEMU_DVD-ROM config-2
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:01.1-ata1-host0-target0:0:0-0:0:0:0-block-sr0.device
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-23 06:36:03 UTC; 3h 34min ago
      Until: Mon 2026-02-23 06:36:03 UTC; 3h 34min ago
     Device: /sys/devices/pci0000:00/0000:00:01.1/ata1/host0/target0:0:0/0:0:0:0/block/sr0

● dev-disk-by\x2dpath-pci\x2d0000:00:04.0.device - /dev/disk/by-path/pci-0000:00:04.0
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:04.0-virtio2-block-vda.device
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-23 06:36:03 UTC; 3h 34min ago
      Until: Mon 2026-02-23 06:36:03 UTC; 3h 34min ago
     Device: /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda

● dev-disk-by\x2dpath-pci\x2d0000:00:04.0\x2dpart1.device - /dev/disk/by-path/pci-0000:00:04.0-part1
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:04.0-virtio2-block-vda-vda1.device
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-23 06:36:03 UTC; 3h 34min ago
      Until: Mon 2026-02-23 06:36:03 UTC; 3h 34min ago
     Device: /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda/vda1

● dev-disk-by\x2dpath-pci\x2d0000:00:04.0\x2dpart2.device - /dev/disk/by-path/pci-0000:00:04.0-part2
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:04.0-virtio2-block-vda-vda2.device
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-23 06:36:03 UTC; 3h 34min ago
      Until: Mon 2026-02-23 06:36:03 UTC; 3h 34min ago
     Device: /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda/vda2

● dev-disk-by\x2dpath-pci\x2d0000:00:04.0\x2dpart3.device - /dev/disk/by-path/pci-0000:00:04.0-part3
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:04.0-virtio2-block-vda-vda3.device
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-23 06:36:03 UTC; 3h 34min ago
      Until: Mon 2026-02-23 06:36:03 UTC; 3h 34min ago
     Device: /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda/vda3

● dev-disk-by\x2dpath-pci\x2d0000:00:04.0\x2dpart4.device - /dev/disk/by-path/pci-0000:00:04.0-part4
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:04.0-virtio2-block-vda-vda4.device
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-23 06:36:03 UTC; 3h 34min ago
      Until: Mon 2026-02-23 06:36:03 UTC; 3h 34min ago
     Device: /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda/vda4

● dev-disk-by\x2dpath-virtio\x2dpci\x2d0000:00:04.0.device - /dev/disk/by-path/virtio-pci-0000:00:04.0
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:04.0-virtio2-block-vda.device
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-23 06:36:03 UTC; 3h 34min ago
      Until: Mon 2026-02-23 06:36:03 UTC; 3h 34min ago
     Device: /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda

● dev-disk-by\x2dpath-virtio\x2dpci\x2d0000:00:04.0\x2dpart1.device - /dev/disk/by-path/virtio-pci-0000:00:04.0-part1
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:04.0-virtio2-block-vda-vda1.device
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-23 06:36:03 UTC; 3h 34min ago
      Until: Mon 2026-02-23 06:36:03 UTC; 3h 34min ago
     Device: /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda/vda1

● dev-disk-by\x2dpath-virtio\x2dpci\x2d0000:00:04.0\x2dpart2.device - /dev/disk/by-path/virtio-pci-0000:00:04.0-part2
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:04.0-virtio2-block-vda-vda2.device
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-23 06:36:03 UTC; 3h 34min ago
      Until: Mon 2026-02-23 06:36:03 UTC; 3h 34min ago
     Device: /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda/vda2

● dev-disk-by\x2dpath-virtio\x2dpci\x2d0000:00:04.0\x2dpart3.device - /dev/disk/by-path/virtio-pci-0000:00:04.0-part3
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:04.0-virtio2-block-vda-vda3.device
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-23 06:36:03 UTC; 3h 34min ago
      Until: Mon 2026-02-23 06:36:03 UTC; 3h 34min ago
     Device: /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda/vda3

● dev-disk-by\x2dpath-virtio\x2dpci\x2d0000:00:04.0\x2dpart4.device - /dev/disk/by-path/virtio-pci-0000:00:04.0-part4
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:04.0-virtio2-block-vda-vda4.device
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-23 06:36:03 UTC; 3h 34min ago
      Until: Mon 2026-02-23 06:36:03 UTC; 3h 34min ago
     Device: /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda/vda4

● dev-disk-by\x2duuid-2026\x2d02\x2d23\x2d06\x2d35\x2d51\x2d00.device - QEMU_DVD-ROM config-2
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:01.1-ata1-host0-target0:0:0-0:0:0:0-block-sr0.device
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-23 06:36:03 UTC; 3h 34min ago
      Until: Mon 2026-02-23 06:36:03 UTC; 3h 34min ago
     Device: /sys/devices/pci0000:00/0000:00:01.1/ata1/host0/target0:0:0/0:0:0:0/block/sr0

● dev-disk-by\x2duuid-7B77\x2d95E7.device - /dev/disk/by-uuid/7B77-95E7
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:04.0-virtio2-block-vda-vda2.device
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-23 06:36:03 UTC; 3h 34min ago
      Until: Mon 2026-02-23 06:36:03 UTC; 3h 34min ago
     Device: /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda/vda2

Feb 23 06:36:03 localhost systemd[1]: Condition check resulted in /dev/disk/by-uuid/7B77-95E7 being skipped.

● dev-disk-by\x2duuid-a3dd82de\x2dffc6\x2d4652\x2d88b9\x2d80e003b8f20a.device - /dev/disk/by-uuid/a3dd82de-ffc6-4652-88b9-80e003b8f20a
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:04.0-virtio2-block-vda-vda4.device
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-23 06:36:03 UTC; 3h 34min ago
      Until: Mon 2026-02-23 06:36:03 UTC; 3h 34min ago
     Device: /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda/vda4

Feb 23 06:36:00 localhost systemd[1]: Found device /dev/disk/by-uuid/a3dd82de-ffc6-4652-88b9-80e003b8f20a.

● dev-disk-by\x2duuid-b141154b\x2d6a70\x2d437a\x2da97f\x2dd160c9ba37eb.device - /dev/disk/by-uuid/b141154b-6a70-437a-a97f-d160c9ba37eb
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:04.0-virtio2-block-vda-vda3.device
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-23 06:36:03 UTC; 3h 34min ago
      Until: Mon 2026-02-23 06:36:03 UTC; 3h 34min ago
     Device: /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda/vda3

Feb 23 06:36:03 localhost systemd[1]: Condition check resulted in /dev/disk/by-uuid/b141154b-6a70-437a-a97f-d160c9ba37eb being skipped.

● dev-dm\x2d0.device - /dev/dm-0
    Follows: unit currently follows state of sys-devices-virtual-block-dm\x2d0.device
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-23 07:34:16 UTC; 2h 36min ago
      Until: Mon 2026-02-23 07:34:16 UTC; 2h 36min ago
     Device: /sys/devices/virtual/block/dm-0

● dev-dm\x2d1.device - /dev/dm-1
    Follows: unit currently follows state of sys-devices-virtual-block-dm\x2d1.device
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-23 07:34:24 UTC; 2h 36min ago
      Until: Mon 2026-02-23 07:34:24 UTC; 2h 36min ago
     Device: /sys/devices/virtual/block/dm-1

● dev-loop3.device - /dev/loop3
    Follows: unit currently follows state of sys-devices-virtual-block-loop3.device
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-23 07:34:15 UTC; 2h 36min ago
      Until: Mon 2026-02-23 07:34:15 UTC; 2h 36min ago
     Device: /sys/devices/virtual/block/loop3

● dev-loop4.device - /dev/loop4
    Follows: unit currently follows state of sys-devices-virtual-block-loop4.device
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-23 07:34:23 UTC; 2h 36min ago
      Until: Mon 2026-02-23 07:34:23 UTC; 2h 36min ago
     Device: /sys/devices/virtual/block/loop4

● dev-mapper-ceph_vg0\x2dceph_lv0.device - /dev/mapper/ceph_vg0-ceph_lv0
    Follows: unit currently follows state of sys-devices-virtual-block-dm\x2d0.device
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-23 07:34:16 UTC; 2h 36min ago
      Until: Mon 2026-02-23 07:34:16 UTC; 2h 36min ago
     Device: /sys/devices/virtual/block/dm-0

● dev-mapper-ceph_vg1\x2dceph_lv1.device - /dev/mapper/ceph_vg1-ceph_lv1
    Follows: unit currently follows state of sys-devices-virtual-block-dm\x2d1.device
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-23 07:34:24 UTC; 2h 36min ago
      Until: Mon 2026-02-23 07:34:24 UTC; 2h 36min ago
     Device: /sys/devices/virtual/block/dm-1

● dev-rfkill.device - /dev/rfkill
    Follows: unit currently follows state of sys-devices-virtual-misc-rfkill.device
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-23 06:36:05 UTC; 3h 34min ago
      Until: Mon 2026-02-23 06:36:05 UTC; 3h 34min ago
     Device: /sys/devices/virtual/misc/rfkill

● dev-sr0.device - QEMU_DVD-ROM config-2
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:01.1-ata1-host0-target0:0:0-0:0:0:0-block-sr0.device
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-23 06:36:03 UTC; 3h 34min ago
      Until: Mon 2026-02-23 06:36:03 UTC; 3h 34min ago
     Device: /sys/devices/pci0000:00/0000:00:01.1/ata1/host0/target0:0:0/0:0:0:0/block/sr0

● dev-ttyS0.device - /dev/ttyS0
    Follows: unit currently follows state of sys-devices-pnp0-00:00-tty-ttyS0.device
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-23 06:36:03 UTC; 3h 34min ago
      Until: Mon 2026-02-23 06:36:03 UTC; 3h 34min ago
     Device: /sys/devices/pnp0/00:00/tty/ttyS0

Feb 23 06:36:03 localhost systemd[1]: Condition check resulted in /dev/ttyS0 being skipped.

● dev-ttyS1.device - /dev/ttyS1
    Follows: unit currently follows state of sys-devices-platform-serial8250-tty-ttyS1.device
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-23 06:36:03 UTC; 3h 34min ago
      Until: Mon 2026-02-23 06:36:03 UTC; 3h 34min ago
     Device: /sys/devices/platform/serial8250/tty/ttyS1

● dev-ttyS2.device - /dev/ttyS2
    Follows: unit currently follows state of sys-devices-platform-serial8250-tty-ttyS2.device
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-23 06:36:03 UTC; 3h 34min ago
      Until: Mon 2026-02-23 06:36:03 UTC; 3h 34min ago
     Device: /sys/devices/platform/serial8250/tty/ttyS2

● dev-ttyS3.device - /dev/ttyS3
    Follows: unit currently follows state of sys-devices-platform-serial8250-tty-ttyS3.device
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-23 06:36:03 UTC; 3h 34min ago
      Until: Mon 2026-02-23 06:36:03 UTC; 3h 34min ago
     Device: /sys/devices/platform/serial8250/tty/ttyS3

● dev-vda.device - /dev/vda
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:04.0-virtio2-block-vda.device
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-23 06:36:03 UTC; 3h 34min ago
      Until: Mon 2026-02-23 06:36:03 UTC; 3h 34min ago
     Device: /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda

● dev-vda1.device - /dev/vda1
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:04.0-virtio2-block-vda-vda1.device
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-23 06:36:03 UTC; 3h 34min ago
      Until: Mon 2026-02-23 06:36:03 UTC; 3h 34min ago
     Device: /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda/vda1

● dev-vda2.device - /dev/vda2
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:04.0-virtio2-block-vda-vda2.device
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-23 06:36:03 UTC; 3h 34min ago
      Until: Mon 2026-02-23 06:36:03 UTC; 3h 34min ago
     Device: /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda/vda2

● dev-vda3.device - /dev/vda3
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:04.0-virtio2-block-vda-vda3.device
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-23 06:36:03 UTC; 3h 34min ago
      Until: Mon 2026-02-23 06:36:03 UTC; 3h 34min ago
     Device: /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda/vda3

● dev-vda4.device - /dev/vda4
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:04.0-virtio2-block-vda-vda4.device
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-23 06:36:03 UTC; 3h 34min ago
      Until: Mon 2026-02-23 06:36:03 UTC; 3h 34min ago
     Device: /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda/vda4

● sys-devices-pci0000:00-0000:00:01.1-ata1-host0-target0:0:0-0:0:0:0-block-sr0.device - QEMU_DVD-ROM config-2
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-23 06:36:03 UTC; 3h 34min ago
      Until: Mon 2026-02-23 06:36:03 UTC; 3h 34min ago
     Device: /sys/devices/pci0000:00/0000:00:01.1/ata1/host0/target0:0:0/0:0:0:0/block/sr0

● sys-devices-pci0000:00-0000:00:03.0-virtio1-net-eth0.device - Virtio network device
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-23 06:36:03 UTC; 3h 34min ago
      Until: Mon 2026-02-23 06:36:03 UTC; 3h 34min ago
     Device: /sys/devices/pci0000:00/0000:00:03.0/virtio1/net/eth0

● sys-devices-pci0000:00-0000:00:04.0-virtio2-block-vda-vda1.device - /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda/vda1
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-23 06:36:03 UTC; 3h 34min ago
      Until: Mon 2026-02-23 06:36:03 UTC; 3h 34min ago
     Device: /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda/vda1

● sys-devices-pci0000:00-0000:00:04.0-virtio2-block-vda-vda2.device - /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda/vda2
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-23 06:36:03 UTC; 3h 34min ago
      Until: Mon 2026-02-23 06:36:03 UTC; 3h 34min ago
     Device: /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda/vda2

● sys-devices-pci0000:00-0000:00:04.0-virtio2-block-vda-vda3.device - /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda/vda3
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-23 06:36:03 UTC; 3h 34min ago
      Until: Mon 2026-02-23 06:36:03 UTC; 3h 34min ago
     Device: /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda/vda3

● sys-devices-pci0000:00-0000:00:04.0-virtio2-block-vda-vda4.device - /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda/vda4
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-23 06:36:03 UTC; 3h 34min ago
      Until: Mon 2026-02-23 06:36:03 UTC; 3h 34min ago
     Device: /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda/vda4

● sys-devices-pci0000:00-0000:00:04.0-virtio2-block-vda.device - /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-23 06:36:03 UTC; 3h 34min ago
      Until: Mon 2026-02-23 06:36:03 UTC; 3h 34min ago
     Device: /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda

● sys-devices-pci0000:00-0000:00:07.0-virtio5-net-eth1.device - Virtio network device
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-23 06:42:59 UTC; 3h 28min ago
      Until: Mon 2026-02-23 06:42:59 UTC; 3h 28min ago
     Device: /sys/devices/pci0000:00/0000:00:07.0/virtio5/net/eth1

● sys-devices-platform-serial8250-tty-ttyS1.device - /sys/devices/platform/serial8250/tty/ttyS1
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-23 06:36:03 UTC; 3h 34min ago
      Until: Mon 2026-02-23 06:36:03 UTC; 3h 34min ago
     Device: /sys/devices/platform/serial8250/tty/ttyS1

● sys-devices-platform-serial8250-tty-ttyS2.device - /sys/devices/platform/serial8250/tty/ttyS2
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-23 06:36:03 UTC; 3h 34min ago
      Until: Mon 2026-02-23 06:36:03 UTC; 3h 34min ago
     Device: /sys/devices/platform/serial8250/tty/ttyS2

● sys-devices-platform-serial8250-tty-ttyS3.device - /sys/devices/platform/serial8250/tty/ttyS3
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-23 06:36:03 UTC; 3h 34min ago
      Until: Mon 2026-02-23 06:36:03 UTC; 3h 34min ago
     Device: /sys/devices/platform/serial8250/tty/ttyS3

● sys-devices-pnp0-00:00-tty-ttyS0.device - /sys/devices/pnp0/00:00/tty/ttyS0
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-23 06:36:03 UTC; 3h 34min ago
      Until: Mon 2026-02-23 06:36:03 UTC; 3h 34min ago
     Device: /sys/devices/pnp0/00:00/tty/ttyS0

● sys-devices-virtual-block-dm\x2d0.device - /sys/devices/virtual/block/dm-0
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-23 07:34:16 UTC; 2h 36min ago
      Until: Mon 2026-02-23 07:34:16 UTC; 2h 36min ago
     Device: /sys/devices/virtual/block/dm-0

● sys-devices-virtual-block-dm\x2d1.device - /sys/devices/virtual/block/dm-1
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-23 07:34:24 UTC; 2h 36min ago
      Until: Mon 2026-02-23 07:34:24 UTC; 2h 36min ago
     Device: /sys/devices/virtual/block/dm-1

● sys-devices-virtual-block-loop3.device - /sys/devices/virtual/block/loop3
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-23 07:34:15 UTC; 2h 36min ago
      Until: Mon 2026-02-23 07:34:15 UTC; 2h 36min ago
     Device: /sys/devices/virtual/block/loop3

● sys-devices-virtual-block-loop4.device - /sys/devices/virtual/block/loop4
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-23 07:34:23 UTC; 2h 36min ago
      Until: Mon 2026-02-23 07:34:23 UTC; 2h 36min ago
     Device: /sys/devices/virtual/block/loop4

● sys-devices-virtual-misc-rfkill.device - /sys/devices/virtual/misc/rfkill
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-23 06:36:05 UTC; 3h 34min ago
      Until: Mon 2026-02-23 06:36:05 UTC; 3h 34min ago
     Device: /sys/devices/virtual/misc/rfkill

● sys-devices-virtual-net-br\x2dex.device - /sys/devices/virtual/net/br-ex
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-23 07:15:04 UTC; 2h 55min ago
      Until: Mon 2026-02-23 07:15:04 UTC; 2h 55min ago
     Device: /sys/devices/virtual/net/br-ex

● sys-devices-virtual-net-br\x2dint.device - /sys/devices/virtual/net/br-int
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-23 08:09:43 UTC; 2h 1min ago
      Until: Mon 2026-02-23 08:09:43 UTC; 2h 1min ago
     Device: /sys/devices/virtual/net/br-int

● sys-devices-virtual-net-genev_sys_6081.device - /sys/devices/virtual/net/genev_sys_6081
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-23 08:09:43 UTC; 2h 1min ago
      Until: Mon 2026-02-23 08:09:43 UTC; 2h 1min ago
     Device: /sys/devices/virtual/net/genev_sys_6081

● sys-devices-virtual-net-ovs\x2dsystem.device - /sys/devices/virtual/net/ovs-system
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-23 07:15:04 UTC; 2h 55min ago
      Until: Mon 2026-02-23 07:15:04 UTC; 2h 55min ago
     Device: /sys/devices/virtual/net/ovs-system

● sys-devices-virtual-net-vlan20.device - /sys/devices/virtual/net/vlan20
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-23 07:15:16 UTC; 2h 55min ago
      Until: Mon 2026-02-23 07:15:16 UTC; 2h 55min ago
     Device: /sys/devices/virtual/net/vlan20

● sys-devices-virtual-net-vlan21.device - /sys/devices/virtual/net/vlan21
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-23 07:15:19 UTC; 2h 55min ago
      Until: Mon 2026-02-23 07:15:19 UTC; 2h 55min ago
     Device: /sys/devices/virtual/net/vlan21

● sys-devices-virtual-net-vlan22.device - /sys/devices/virtual/net/vlan22
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-23 07:15:23 UTC; 2h 55min ago
      Until: Mon 2026-02-23 07:15:23 UTC; 2h 55min ago
     Device: /sys/devices/virtual/net/vlan22

● sys-devices-virtual-net-vlan23.device - /sys/devices/virtual/net/vlan23
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-23 07:15:13 UTC; 2h 55min ago
      Until: Mon 2026-02-23 07:15:13 UTC; 2h 55min ago
     Device: /sys/devices/virtual/net/vlan23

● sys-devices-virtual-net-vlan44.device - /sys/devices/virtual/net/vlan44
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-23 07:15:09 UTC; 2h 55min ago
      Until: Mon 2026-02-23 07:15:09 UTC; 2h 55min ago
     Device: /sys/devices/virtual/net/vlan44

● sys-module-configfs.device - /sys/module/configfs
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-23 06:36:03 UTC; 3h 34min ago
      Until: Mon 2026-02-23 06:36:03 UTC; 3h 34min ago
     Device: /sys/module/configfs

● sys-module-fuse.device - /sys/module/fuse
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-23 06:36:03 UTC; 3h 34min ago
      Until: Mon 2026-02-23 06:36:03 UTC; 3h 34min ago
     Device: /sys/module/fuse

● sys-subsystem-net-devices-br\x2dex.device - /sys/subsystem/net/devices/br-ex
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-23 07:15:04 UTC; 2h 55min ago
      Until: Mon 2026-02-23 07:15:04 UTC; 2h 55min ago
     Device: /sys/devices/virtual/net/br-ex

● sys-subsystem-net-devices-br\x2dint.device - /sys/subsystem/net/devices/br-int
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-23 08:09:43 UTC; 2h 1min ago
      Until: Mon 2026-02-23 08:09:43 UTC; 2h 1min ago
     Device: /sys/devices/virtual/net/br-int

● sys-subsystem-net-devices-eth0.device - Virtio network device
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-23 06:36:03 UTC; 3h 34min ago
      Until: Mon 2026-02-23 06:36:03 UTC; 3h 34min ago
     Device: /sys/devices/pci0000:00/0000:00:03.0/virtio1/net/eth0

● sys-subsystem-net-devices-eth1.device - Virtio network device
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-23 06:42:59 UTC; 3h 28min ago
      Until: Mon 2026-02-23 06:42:59 UTC; 3h 28min ago
     Device: /sys/devices/pci0000:00/0000:00:07.0/virtio5/net/eth1

● sys-subsystem-net-devices-genev_sys_6081.device - /sys/subsystem/net/devices/genev_sys_6081
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-23 08:09:43 UTC; 2h 1min ago
      Until: Mon 2026-02-23 08:09:43 UTC; 2h 1min ago
     Device: /sys/devices/virtual/net/genev_sys_6081

● sys-subsystem-net-devices-ovs\x2dsystem.device - /sys/subsystem/net/devices/ovs-system
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-23 07:15:04 UTC; 2h 55min ago
      Until: Mon 2026-02-23 07:15:04 UTC; 2h 55min ago
     Device: /sys/devices/virtual/net/ovs-system

● sys-subsystem-net-devices-vlan20.device - /sys/subsystem/net/devices/vlan20
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-23 07:15:16 UTC; 2h 55min ago
      Until: Mon 2026-02-23 07:15:16 UTC; 2h 55min ago
     Device: /sys/devices/virtual/net/vlan20

● sys-subsystem-net-devices-vlan21.device - /sys/subsystem/net/devices/vlan21
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-23 07:15:19 UTC; 2h 55min ago
      Until: Mon 2026-02-23 07:15:19 UTC; 2h 55min ago
     Device: /sys/devices/virtual/net/vlan21

● sys-subsystem-net-devices-vlan22.device - /sys/subsystem/net/devices/vlan22
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-23 07:15:23 UTC; 2h 55min ago
      Until: Mon 2026-02-23 07:15:23 UTC; 2h 55min ago
     Device: /sys/devices/virtual/net/vlan22

● sys-subsystem-net-devices-vlan23.device - /sys/subsystem/net/devices/vlan23
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-23 07:15:13 UTC; 2h 55min ago
      Until: Mon 2026-02-23 07:15:13 UTC; 2h 55min ago
     Device: /sys/devices/virtual/net/vlan23

● sys-subsystem-net-devices-vlan44.device - /sys/subsystem/net/devices/vlan44
     Loaded: loaded
     Active: active (plugged) since Mon 2026-02-23 07:15:09 UTC; 2h 55min ago
      Until: Mon 2026-02-23 07:15:09 UTC; 2h 55min ago
     Device: /sys/devices/virtual/net/vlan44

● -.mount - Root Mount
     Loaded: loaded (/etc/fstab; generated)
     Active: active (mounted)
      Where: /
       What: /dev/vda4
       Docs: man:fstab(5)
             man:systemd-fstab-generator(8)

● boot-efi.mount - /boot/efi
     Loaded: loaded (/etc/fstab; generated)
     Active: active (mounted) since Mon 2026-02-23 06:36:04 UTC; 3h 34min ago
      Until: Mon 2026-02-23 06:36:04 UTC; 3h 34min ago
      Where: /boot/efi
       What: /dev/vda2
       Docs: man:fstab(5)
             man:systemd-fstab-generator(8)
         IO: 16.5K read, 512B written
      Tasks: 0 (limit: 100220)
     Memory: 32.0K
        CPU: 3ms
     CGroup: /system.slice/boot-efi.mount

Feb 23 06:36:04 localhost systemd[1]: Mounting /boot/efi...
Feb 23 06:36:04 localhost systemd[1]: Mounted /boot/efi.

● boot.mount - /boot
     Loaded: loaded (/etc/fstab; generated)
     Active: active (mounted) since Mon 2026-02-23 06:36:03 UTC; 3h 34min ago
      Until: Mon 2026-02-23 06:36:03 UTC; 3h 34min ago
      Where: /boot
       What: /dev/vda3
       Docs: man:fstab(5)
             man:systemd-fstab-generator(8)
         IO: 347.5K read, 2.0M written
      Tasks: 0 (limit: 100220)
     Memory: 4.0K
        CPU: 20ms
     CGroup: /system.slice/boot.mount

Feb 23 06:36:03 localhost systemd[1]: Mounting /boot...
Feb 23 06:36:03 localhost systemd[1]: Mounted /boot.

● dev-hugepages.mount - Huge Pages File System
     Loaded: loaded (/usr/lib/systemd/system/dev-hugepages.mount; static)
     Active: active (mounted) since Mon 2026Unit home.mount could not be found.
-02-23 06:36:02 UTC; 3h 35min ago
      Until: Mon 2026-02-23 06:36:02 UTC; 3h 35min ago
      Where: /dev/hugepages
       What: hugetlbfs
       Docs: https://docs.kernel.org/admin-guide/mm/hugetlbpage.html
             https://www.freedesktop.org/wiki/Software/systemd/APIFileSystems
         IO: 48.0K read, 0B written
      Tasks: 0 (limit: 100220)
     Memory: 52.0K
        CPU: 7ms
     CGroup: /dev-hugepages.mount

Feb 23 06:36:02 localhost systemd[1]: Mounted Huge Pages File System.

● dev-hugepages1G.mount - /dev/hugepages1G
     Loaded: loaded (/etc/fstab; generated)
     Active: active (mounted) since Mon 2026-02-23 09:15:59 UTC; 55min ago
      Until: Mon 2026-02-23 09:15:59 UTC; 55min ago
      Where: /dev/hugepages1G
       What: none
       Docs: man:fstab(5)
             man:systemd-fstab-generator(8)

● dev-hugepages2M.mount - /dev/hugepages2M
     Loaded: loaded (/etc/fstab; generated)
     Active: active (mounted) since Mon 2026-02-23 09:16:00 UTC; 55min ago
      Until: Mon 2026-02-23 09:16:00 UTC; 55min ago
      Where: /dev/hugepages2M
       What: none
       Docs: man:fstab(5)
             man:systemd-fstab-generator(8)

● dev-mqueue.mount - POSIX Message Queue File System
     Loaded: loaded (/usr/lib/systemd/system/dev-mqueue.mount; static)
     Active: active (mounted) since Mon 2026-02-23 06:36:02 UTC; 3h 35min ago
      Until: Mon 2026-02-23 06:36:02 UTC; 3h 35min ago
      Where: /dev/mqueue
       What: mqueue
       Docs: man:mq_overview(7)
             https://www.freedesktop.org/wiki/Software/systemd/APIFileSystems
         IO: 0B read, 0B written
      Tasks: 0 (limit: 100220)
     Memory: 4.0K
        CPU: 3ms
     CGroup: /dev-mqueue.mount

Feb 23 06:36:02 localhost systemd[1]: Mounted POSIX Message Queue File System.

● efi.mount - EFI System Partition Automount
     Loaded: loaded (/run/systemd/generator.late/efi.mount; generated)
     Active: active (mounted) since Mon 2026-02-23 10:10:27 UTC; 35s ago
      Until: Mon 2026-02-23 10:10:27 UTC; 35s ago
TriggeredBy: ● efi.automount
      Where: /efi
       What: /dev/vda2
       Docs: man:systemd-gpt-auto-generator(8)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 100220)
     Memory: 8.0K
        CPU: 7ms
     CGroup: /system.slice/efi.mount

Feb 23 10:10:27 np0005626466.localdomain systemd[1]: Mounting EFI System Partition Automount...
Feb 23 10:10:27 np0005626466.localdomain systemd[1]: Mounted EFI System Partition Automount.

○ proc-fs-nfsd.mount - NFSD configuration filesystem
     Loaded: loaded (/usr/lib/systemd/system/proc-fs-nfsd.mount; static)
     Active: inactive (dead)
      Where: /proc/fs/nfsd
       What: nfsd

● proc-sys-fs-binfmt_misc.mount - Arbitrary Executable File Formats File System
     Loaded: loaded (/usr/lib/systemd/system/proc-sys-fs-binfmt_misc.mount; disabled; preset: disabled)
     Active: active (mounted) since Mon 2026-02-23 07:37:40 UTC; 2h 33min ago
      Until: Mon 2026-02-23 07:37:40 UTC; 2h 33min ago
TriggeredBy: ● proc-sys-fs-binfmt_misc.automount
      Where: /proc/sys/fs/binfmt_misc
       What: binfmt_misc
       Docs: https://docs.kernel.org/admin-guide/binfmt-misc.html
             https://www.freedesktop.org/wiki/Software/systemd/APIFileSystems
         IO: 0B read, 0B written
      Tasks: 0 (limit: 100220)
     Memory: 8.0K
        CPU: 8ms
     CGroup: /proc-sys-fs-binfmt_misc.mount

Feb 23 07:37:40 np0005626466.localdomain systemd[1]: Mounting Arbitrary Executable File Formats File System...
Feb 23 07:37:40 np0005626466.localdomain systemd[1]: Mounted Arbitrary Executable File Formats File System.

● run-credentials-systemd\x2dsysctl.service.mount - /run/credentials/systemd-sysctl.service
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Mon 2026-02-23 09:10:59 UTC; 1h 0min ago
      Until: Mon 2026-02-23 09:10:59 UTC; 1h 0min ago
      Where: /run/credentials/systemd-sysctl.service
       What: none

● run-credentials-systemd\x2dsysusers.service.mount - /run/credentials/systemd-sysusers.service
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Mon 2026-02-23 06:36:02 UTC; 3h 35min ago
      Until: Mon 2026-02-23 06:36:02 UTC; 3h 35min ago
      Where: /run/credentials/systemd-sysusers.service
       What: none

● run-credentials-systemd\x2dtmpfiles\x2dsetup.service.mount - /run/credentials/systemd-tmpfiles-setup.service
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Mon 2026-02-23 06:36:04 UTC; 3h 34min ago
      Until: Mon 2026-02-23 06:36:04 UTC; 3h 34min ago
      Where: /run/credentials/systemd-tmpfiles-setup.service
       What: none

● run-credentials-systemd\x2dtmpfiles\x2dsetup\x2ddev.service.mount - /run/credentials/systemd-tmpfiles-setup-dev.service
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Mon 2026-02-23 06:36:03 UTC; 3h 34min ago
      Until: Mon 2026-02-23 06:36:03 UTC; 3h 34min ago
      Where: /run/credentials/systemd-tmpfiles-setup-dev.service
       What: none

● run-netns-qdhcp\x2d91fdc6a7\x2db901\x2d4255\x2d83f7\x2d4b37365658a3.mount - /run/netns/qdhcp-91fdc6a7-b901-4255-83f7-4b37365658a3
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Mon 2026-02-23 09:35:31 UTC; 35min ago
      Until: Mon 2026-02-23 09:35:31 UTC; 35min ago
      Where: /run/netns/qdhcp-91fdc6a7-b901-4255-83f7-4b37365658a3
       What: nsfs

● run-netns-qdhcp\x2d9da5b53d\x2d3184\x2d450f\x2d9a5b\x2dbdba1a6c9f6d.mount - /run/netns/qdhcp-9da5b53d-3184-450f-9a5b-bdba1a6c9f6d
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Mon 2026-02-23 09:35:31 UTC; 35min ago
      Until: Mon 2026-02-23 09:35:31 UTC; 35min ago
      Where: /run/netns/qdhcp-9da5b53d-3184-450f-9a5b-bdba1a6c9f6d
       What: nsfs

● run-netns.mount - /run/netns
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Mon 2026-02-23 07:49:09 UTC; 2h 21min ago
      Until: Mon 2026-02-23 07:49:09 UTC; 2h 21min ago
      Where: /run/netns
       What: tmpfs

● run-user-1000.mount - /run/user/1000
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Mon 2026-02-23 06:36:32 UTC; 3h 34min ago
      Until: Mon 2026-02-23 06:36:32 UTC; 3h 34min ago
      Where: /run/user/1000
       What: tmpfs

● run-user-1002.mount - /run/user/1002
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Mon 2026-02-23 07:37:14 UTC; 2h 33min ago
      Until: Mon 2026-02-23 07:37:14 UTC; 2h 33min ago
      Where: /run/user/1002
       What: tmpfs

● sys-fs-fuse-connections.mount - FUSE Control File System
     Loaded: loaded (/usr/lib/systemd/system/sys-fs-fuse-connections.mount; static)
     Active: active (mounted) since Mon 2026-02-23 06:36:02 UTC; 3h 35min ago
      Until: Mon 2026-02-23 06:36:02 UTC; 3h 35min ago
      Where: /sys/fs/fuse/connections
       What: fusectl
       Docs: https://docs.kernel.org/filesystems/fuse.html
             https://www.freedesktop.org/wiki/Software/systemd/APIFileSystems
         IO: 0B read, 0B written
      Tasks: 0 (limit: 100220)
     Memory: 4.0K
        CPU: 2ms
     CGroup: /sys-fs-fuse-connections.mount

Feb 23 06:36:02 localhost systemd[1]: Mounting FUSE Control File System...
Feb 23 06:36:02 localhost systemd[1]: Mounted FUSE Control File System.

● sys-kernel-config.mount - Kernel Configuration File System
     Loaded: loaded (/usr/lib/systemd/system/sys-kernel-config.mount; static)
     Active: active (mounted) since Mon 2026-02-23 06:36:02 UTC; 3h 35min ago
      Until: Mon 2026-02-23 06:36:02 UTC; 3h 35min ago
      Where: /sys/kernel/config
       What: configfs
       Docs: https://docs.kernel.org/filesystems/configfs.html
             https://www.freedesktop.org/wiki/Software/systemd/APIFileSystems
         IO: 0B read, 0B written
      Tasks: 0 (limit: 100220)
     Memory: 4.0K
        CPU: 4ms
     CGroup: /sys-kernel-config.mount

Feb 23 06:36:02 localhost systemd[1]: Mounting Kernel Configuration File System...
Feb 23 06:36:02 localhost systemd[1]: Mounted Kernel Configuration File System.

● sys-kernel-debug-tracing.Unit sysroot.mount could not be found.
mount - /sys/kernel/debug/tracing
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Mon 2026-02-23 10:10:25 UTC; 37s ago
      Until: Mon 2026-02-23 10:10:25 UTC; 37s ago
      Where: /sys/kernel/debug/tracing
       What: tracefs

● sys-kernel-debug.mount - Kernel Debug File System
     Loaded: loaded (/usr/lib/systemd/system/sys-kernel-debug.mount; static)
     Active: active (mounted) since Mon 2026-02-23 06:36:02 UTC; 3h 35min ago
      Until: Mon 2026-02-23 06:36:02 UTC; 3h 35min ago
      Where: /sys/kernel/debug
       What: debugfs
       Docs: https://docs.kernel.org/filesystems/debugfs.html
             https://www.freedesktop.org/wiki/Software/systemd/APIFileSystems
         IO: 0B read, 0B written
      Tasks: 0 (limit: 100220)
     Memory: 4.0K
        CPU: 2ms
     CGroup: /sys-kernel-debug.mount

Feb 23 06:36:02 localhost systemd[1]: Mounted Kernel Debug File System.

● sys-kernel-tracing.mount - Kernel Trace File System
     Loaded: loaded (/usr/lib/systemd/system/sys-kernel-tracing.mount; static)
     Active: active (mounted) since Mon 2026-02-23 06:36:02 UTC; 3h 35min ago
      Until: Mon 2026-02-23 06:36:02 UTC; 3h 35min ago
      Where: /sys/kernel/tracing
       What: tracefs
       Docs: https://docs.kernel.org/trace/ftrace.html
             https://www.freedesktop.org/wiki/Software/systemd/APIFileSystems
         IO: 0B read, 0B written
      Tasks: 0 (limit: 100220)
     Memory: 4.0K
        CPU: 2ms
     CGroup: /sys-kernel-tracing.mount

Feb 23 06:36:02 localhost systemd[1]: Mounted Kernel Trace File System.

○ tmp.mount - Temporary Directory /tmp
     Loaded: loaded (/usr/lib/systemd/system/tmp.mount; disabled; preset: disabled)
     Active: inactive (dead)
      Where: /tmp
       What: tmpfs
       Docs: https://systemd.io/TEMPORARY_DIRECTORIES
             man:file-hierarchy(7)
             https://www.freedesktop.org/wiki/Software/systemd/APIFileSystems

● var-lib-containers-storage-overlay-03da4e7bac3b1b91f7a766e2d36283752572b59f314a30646afa3b1c1cad918f-merged.mount - /var/lib/containers/storage/overlay/03da4e7bac3b1b91f7a766e2d36283752572b59f314a30646afa3b1c1cad918f/merged
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Mon 2026-02-23 09:18:38 UTC; 52min ago
      Until: Mon 2026-02-23 09:18:38 UTC; 52min ago
      Where: /var/lib/containers/storage/overlay/03da4e7bac3b1b91f7a766e2d36283752572b59f314a30646afa3b1c1cad918f/merged
       What: overlay

● var-lib-containers-storage-overlay-0723d764ac16cda264c57c82cdfb768258a274589b7f042358e003daa402fc4d-merged.mount - /var/lib/containers/storage/overlay/0723d764ac16cda264c57c82cdfb768258a274589b7f042358e003daa402fc4d/merged
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Mon 2026-02-23 09:35:34 UTC; 35min ago
      Until: Mon 2026-02-23 09:35:34 UTC; 35min ago
      Where: /var/lib/containers/storage/overlay/0723d764ac16cda264c57c82cdfb768258a274589b7f042358e003daa402fc4d/merged
       What: overlay

● var-lib-containers-storage-overlay-16eb224acb883e5119671587471a1f65be094ea58a891062166b28cbbc9e912c-merged.mount - /var/lib/containers/storage/overlay/16eb224acb883e5119671587471a1f65be094ea58a891062166b28cbbc9e912c/merged
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Mon 2026-02-23 09:35:34 UTC; 35min ago
      Until: Mon 2026-02-23 09:35:34 UTC; 35min ago
      Where: /var/lib/containers/storage/overlay/16eb224acb883e5119671587471a1f65be094ea58a891062166b28cbbc9e912c/merged
       What: overlay

● var-lib-containers-storage-overlay-21b3f45e441ca65700d5c8d5d9b5aa4012386ffdc6cbc584d071f015462c2c64-merged.mount - /var/lib/containers/storage/overlay/21b3f45e441ca65700d5c8d5d9b5aa4012386ffdc6cbc584d071f015462c2c64/merged
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Mon 2026-02-23 07:39:03 UTC; 2h 31min ago
      Until: Mon 2026-02-23 07:39:03 UTC; 2h 31min ago
      Where: /var/lib/containers/storage/overlay/21b3f45e441ca65700d5c8d5d9b5aa4012386ffdc6cbc584d071f015462c2c64/merged
       What: overlay

● var-lib-containers-storage-overlay-2fde108237d6386031821ac3e8b41b1d17c9fce91e0bbb852aed2be2124c6f41-merged.mount - /var/lib/containers/storage/overlay/2fde108237d6386031821ac3e8b41b1d17c9fce91e0bbb852aed2be2124c6f41/merged
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Mon 2026-02-23 07:38:41 UTC; 2h 32min ago
      Until: Mon 2026-02-23 07:38:41 UTC; 2h 32min ago
      Where: /var/lib/containers/storage/overlay/2fde108237d6386031821ac3e8b41b1d17c9fce91e0bbb852aed2be2124c6f41/merged
       What: overlay

● var-lib-containers-storage-overlay-4c054d86402541ed56028475d36c32f8df8aefd09f7e31993dd2c4480ecf3ae1-merged.mount - /var/lib/containers/storage/overlay/4c054d86402541ed56028475d36c32f8df8aefd09f7e31993dd2c4480ecf3ae1/merged
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Mon 2026-02-23 09:35:21 UTC; 35min ago
      Until: Mon 2026-02-23 09:35:21 UTC; 35min ago
      Where: /var/lib/containers/storage/overlay/4c054d86402541ed56028475d36c32f8df8aefd09f7e31993dd2c4480ecf3ae1/merged
       What: overlay

● var-lib-containers-storage-overlay-5624bd7f85f18f1b2397de3d6f239bd8c1cf3dfa76951d724c33876b7a79bd56-merged.mount - /var/lib/containers/storage/overlay/5624bd7f85f18f1b2397de3d6f239bd8c1cf3dfa76951d724c33876b7a79bd56/merged
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Mon 2026-02-23 09:49:26 UTC; 21min ago
      Until: Mon 2026-02-23 09:49:26 UTC; 21min ago
      Where: /var/lib/containers/storage/overlay/5624bd7f85f18f1b2397de3d6f239bd8c1cf3dfa76951d724c33876b7a79bd56/merged
       What: overlay

● var-lib-containers-storage-overlay-63a03ac14add04ead062b75062282f35d2d69bca05d9ca5d66397f88286dd336-merged.mount - /var/lib/containers/storage/overlay/63a03ac14add04ead062b75062282f35d2d69bca05d9ca5d66397f88286dd336/merged
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Mon 2026-02-23 09:29:13 UTC; 41min ago
      Until: Mon 2026-02-23 09:29:13 UTC; 41min ago
      Where: /var/lib/containers/storage/overlay/63a03ac14add04ead062b75062282f35d2d69bca05d9ca5d66397f88286dd336/merged
       What: overlay

● var-lib-containers-storage-overlay-76da973e4ed6a023e92c21019b2771a4ba50f937b3fde85d80717cdb08a253d5-merged.mount - /var/lib/containers/storage/overlay/76da973e4ed6a023e92c21019b2771a4ba50f937b3fde85d80717cdb08a253d5/merged
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Mon 2026-02-23 09:38:47 UTC; 32min ago
      Until: Mon 2026-02-23 09:38:47 UTC; 32min ago
      Where: /var/lib/containers/storage/overlay/76da973e4ed6a023e92c21019b2771a4ba50f937b3fde85d80717cdb08a253d5/merged
       What: overlay

● var-lib-containers-storage-overlay-7f10180303320b679a160a98445a129e0021e711322442927269a0d33f5c7f04-merged.mount - /var/lib/containers/storage/overlay/7f10180303320b679a160a98445a129e0021e711322442927269a0d33f5c7f04/merged
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Mon 2026-02-23 09:30:19 UTC; 40min ago
      Until: Mon 2026-02-23 09:30:19 UTC; 40min ago
      Where: /var/lib/containers/storage/overlay/7f10180303320b679a160a98445a129e0021e711322442927269a0d33f5c7f04/merged
       What: overlay

● var-lib-containers-storage-overlay-8751a1680802843339166f3c81487be2f267f75de6c9529731c2593cf4c88b08-merged.mount - /var/lib/containers/storage/overlay/8751a1680802843339166f3c81487be2f267f75de6c9529731c2593cf4c88b08/merged
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Mon 2026-02-23 09:28:53 UTC; 42min ago
      Until: Mon 2026-02-23 09:28:53 UTC; 42min ago
      Where: /var/lib/containers/storage/overlay/8751a1680802843339166f3c81487be2f267f75de6c9529731c2593cf4c88b08/merged
       What: overlay

● var-lib-containers-storage-overlay-8aa38fb9bdef7ce260b5fe4e223eb0d23c35c3063466ac1ecf8bd1d1270fc876-merged.mount - /var/lib/containers/storage/overlay/8aa38fb9bdef7ce260b5fe4e223eb0d23c35c3063466ac1ecf8bd1d1270fc876/merged
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Mon 2026-02-23 09:34:20 UTC; 36min ago
      Until: Mon 2026-02-23 09:34:20 UTC; 36min ago
      Where: /var/lib/containers/storage/overlay/8aa38fb9bdef7ce260b5fe4e223eb0d23c35c3063466ac1ecf8bd1d1270fc876/merged
       What: overlay

● var-lib-containers-storage-overlay-a0f5834975de1f9574fd39168112d8b0c6d90d33cecf74003cf7fc123733fe13-merged.mount - /var/lib/containers/storage/overlay/a0f5834975de1f9574fd39168112d8b0c6d90d33cecf74003cf7fc123733fe13/merged
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Mon 2026-02-23 09:44:17 UTC; 26min ago
      Until: Mon 2026-02-23 09:44:17 UTC; 26min ago
      Where: /var/lib/containers/storage/overlay/a0f5834975de1f9574fd39168112d8b0c6d90d33cecf74003cf7fc123733fe13/merged
       What: overlay

● var-lib-containers-storage-overlay-bdfb5c8731e3a32ca41b2a5b84d8f47b2954393205388ab47e22f0bae15959ca-merged.mount - /var/lib/containers/storage/overlay/bdfb5c8731e3a32ca41b2a5b84d8f47b2954393205388ab47e22f0bae15959ca/merged
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Mon 2026-02-23 07:38:59 UTC; 2h 32min ago
      Until: Mon 2026-02-23 07:38:59 UTC; 2h 32min ago
      Where: /var/lib/containers/storage/overlay/bdfb5c8731e3a32ca41b2a5b84d8f47b2954393205388ab47e22f0bae15959ca/merged
       What: overlay

● var-lib-containers-storage-overlay-befa5f746e547ab43ae2e99190afcb18b9fcb9604ee80aa4efb51f9758b82760-merged.mount - /var/lib/containers/storage/overlay/befa5f746e547ab43ae2e99190afcb18b9fcb9604ee80aa4efb51f9758b82760/merged
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Mon 2026-02-23 09:29:34 UTC; 41min ago
      Until: Mon 2026-02-23 09:29:34 UTC; 41min ago
      Where: /var/lib/containers/storage/overlay/befa5f746e547ab43ae2e99190afcb18b9fcb9604ee80aa4efb51f9758b82760/merged
       What: overlay

● var-lib-containers-storage-overlay-ccedf90b2ee5dd8d7269385f84046989df2e497ec922185139a6e3ad8f794ab7-merged.mount - /var/lib/containers/storage/overlay/ccedf90b2ee5dd8d7269385f84046989df2e497ec922185139a6e3ad8f794ab7/merged
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Mon 2026-02-23 09:19:48 UTC; 51min ago
      Until: Mon 2026-02-23 09:19:48 UTC; 51min ago
      Where: /var/lib/containers/storage/overlay/ccedf90b2ee5dd8d7269385f84046989df2e497ec922185139a6e3ad8f794ab7/merged
       What: overlay

● var-lib-containers-storage-overlay-eb69f2bae35d57da791b3d4a8c1a236ba4551f0ccbb1a9b4b70c113ce1af22dc-merged.mount - /var/lib/containers/storage/overlay/eb69f2bae35d57da791b3d4a8c1a236ba4551f0ccbb1a9b4b70c113ce1af22dc/merged
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Mon 2026-02-23 09:42:45 UTC; 28min ago
      Until: Mon 2026-02-23 09:42:45 UTC; 28min ago
      Where: /var/lib/containers/storage/overlay/eb69f2bae35d57da791b3d4a8c1a236ba4551f0ccbb1a9b4b70c113ce1af22dc/merged
       What: overlay

● var-lib-containers-storage-overlay.mount - /var/lib/containers/storage/overlay
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Mon 2026-02-23 07:38:41 UTC; 2h 32min ago
      Until: Mon 2026-02-23 07:38:41 UTC; 2h 32min ago
      Where: /var/lib/containers/storage/overlay
       What: /dev/vda4

● var-lib-containers-storage-overlay\x2dcontainers-122635f0e8d39054eca80d2e540d00d16b4a0e4979d56e5a2e398526a74c57c4-userdata-shm.mount - /var/lib/containers/storage/overlay-containers/122635f0e8d39054eca80d2e540d00d16b4a0e4979d56e5a2e398526a74c57c4/userdata/shm
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Mon 2026-02-23 09:18:38 UTC; 52min ago
      Until: Mon 2026-02-23 09:18:38 UTC; 52min ago
      Where: /var/lib/containers/storage/overlay-containers/122635f0e8d39054eca80d2e540d00d16b4a0e4979d56e5a2e398526a74c57c4/userdata/shm
       What: shm

● var-lib-containers-storage-overlay\x2dcontainers-2812510312ec1c5f807144229711d7908bd35d002349ffc7e5627b311d4edf79-userdata-shm.mount - /var/lib/containers/storage/overlay-containers/2812510312ec1c5f807144229711d7908bd35d002349ffc7e5627b311d4edf79/userdata/shm
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Mon 2026-02-23 09:38:47 UTC; 32min ago
      Until: Mon 2026-02-23 09:38:47 UTC; 32min ago
      Where: /var/lib/containers/storage/overlay-containers/2812510312ec1c5f807144229711d7908bd35d002349ffc7e5627b311d4edf79/userdata/shm
       What: shm

● var-lib-containers-storage-overlay\x2dcontainers-3648631d61f37afb17aa928ef74b239e7715b60e5cab331e4a494567755448eb-userdata-shm.mount - /var/lib/containers/storage/overlay-containers/3648631d61f37afb17aa928ef74b239e7715b60e5cab331e4a494567755448eb/userdata/shm
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Mon 2026-02-23 09:34:20 UTC; 36min ago
      Until: Mon 2026-02-23 09:34:20 UTC; 36min ago
      Where: /var/lib/containers/storage/overlay-containers/3648631d61f37afb17aa928ef74b239e7715b60e5cab331e4a494567755448eb/userdata/shm
       What: shm

● var-lib-containers-storage-overlay\x2dcontainers-6a5bdcbb336925d11e98e079f4b8b7828b30e368d9b142bcbf78246faf17115e-userdata-shm.mount - /var/lib/containers/storage/overlay-containers/6a5bdcbb336925d11e98e079f4b8b7828b30e368d9b142bcbf78246faf17115e/userdata/shm
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Mon 2026-02-23 09:30:17 UTC; 40min ago
      Until: Mon 2026-02-23 09:30:17 UTC; 40min ago
      Where: /var/lib/containers/storage/overlay-containers/6a5bdcbb336925d11e98e079f4b8b7828b30e368d9b142bcbf78246faf17115e/userdata/shm
       What: shm

● var-lib-containers-storage-overlay\x2dcontainers-8813b336059ca31aa5e43a3a140b1dec6d9019f75a06f121cdebad271f8eb62c-userdata-shm.mount - /var/lib/containers/storage/overlay-containers/8813b336059ca31aa5e43a3a140b1dec6d9019f75a06f121cdebad271f8eb62c/userdata/shm
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Mon 2026-02-23 09:35:34 UTC; 35min ago
      Until: Mon 2026-02-23 09:35:34 UTC; 35min ago
      Where: /var/lib/containers/storage/overlay-containers/8813b336059ca31aa5e43a3a140b1dec6d9019f75a06f121cdebad271f8eb62c/userdata/shm
       What: shm

● var-lib-containers-storage-overlay\x2dcontainers-8ca16bfb2f59ef8fc3fb8572c3270f7856db7f62fc874baed2fd5084beb7f01e-userdata-shm.mount - /var/lib/containers/storage/overlay-containers/8ca16bfb2f59ef8fc3fb8572c3270f7856db7f62fc874baed2fd5084beb7f01e/userdata/shm
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Mon 2026-02-23 09:19:48 UTC; 51min ago
      Until: Mon 2026-02-23 09:19:48 UTC; 51min ago
      Where: /var/lib/containers/storage/overlay-containers/8ca16bfb2f59ef8fc3fb8572c3270f7856db7f62fc874baed2fd5084beb7f01e/userdata/shm
       What: shm

● var-lib-containers-storage-overlay\x2dcontainers-a43c45687bee0664320d95d80d3e7a94940f1f0a4aba277a159d8b3eeefd4d41-userdata-shm.mount - /var/lib/containers/storage/overlay-containers/a43c45687bee0664320d95d80d3e7a94940f1f0a4aba277a159d8b3eeefd4d41/userdata/shm
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Mon 2026-02-23 09:35:34 UTC; 35min ago
      Until: Mon 2026-02-23 09:35:34 UTC; 35min ago
      Where: /var/lib/containers/storage/overlay-containers/a43c45687bee0664320d95d80d3e7a94940f1f0a4aba277a159d8b3eeefd4d41/userdata/shm
       What: shm

● var-lib-containers-storage-overlay\x2dcontainers-bdeaed09d67af00ebcf3af49678b8d850a09c9b8240ebddb072ba501276f201d-userdata-shm.mount - /var/lib/containers/storage/overlay-containers/bdeaed09d67af00ebcf3af49678b8d850a09c9b8240ebddb072ba501276f201d/userdata/shm
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Mon 2026-02-23 09:35:27 UTC; 35min ago
      Until: Mon 2026-02-23 09:35:27 UTC; 35min ago
      Where: /var/lib/containers/storage/overlay-containers/bdeaed09d67af00ebcf3af49678b8d850a09c9b8240ebddb072ba501276f201d/userdata/shm
       What: shm

● var-lib-containers-storage-overlay\x2dcontainers-c0dd95da0b8090e1dfefd899739a6f9a1e0b730eec9db8d0d0094fa6bfc519c8-userdata-shm.mount - /var/lib/containers/storage/overlay-containers/c0dd95da0b8090e1dfefd899739a6f9a1e0b730eec9db8d0d0094fa6bfc519c8/userdata/shm
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Mon 2026-02-23 09:29:34 UTC; 41min ago
      Until: Mon 2026-02-23 09:29:34 UTC; 41min ago
      Where: /var/lib/containers/storage/overlay-containers/c0dd95da0b8090e1dfefd899739a6f9a1e0b730eec9db8d0d0094fa6bfc519c8/userdata/shm
       What: shm

● var-lib-containers-storage-overlay\x2dcontainers-cd17978adeb2b0468483202f98a4facd0357edbe019deb13313adab6b6b71f52-userdata-shm.mount - /var/lib/containers/storage/overlay-containers/cd17978adeb2b0468483202f98a4facd0357edbe019deb13313adab6b6b71f52/userdata/shm
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Mon 2026-02-23 09:29:13 UTC; 41min ago
      Until: Mon 2026-02-23 09:29:13 UTC; 41min ago
      Where: /var/lib/containers/storage/overlay-containers/cd17978adeb2b0468483202f98a4facd0357edbe019deb13313adab6b6b71f52/userdata/shm
       What: shm

● var-lib-containers-storage-overlay\x2dcontainers-fe7013c5595cf36029c5acd08a92109de37036b8a083179769dbc4e27b30aaf9-userdata-shm.mount - /var/lib/containers/storage/overlay-containers/fe7013c5595cf36029c5acd08a92109de37036b8a083179769dbc4e27b30aaf9/userdata/shm
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Mon 2026-02-23 09:28:53 UTC; 42min ago
      Until: Mon 2026-02-23 09:28:53 UTC; 42min ago
      Where: /var/lib/containers/storage/overlay-containers/fe7013c5595cf36029c5acd08a92109de37036b8a083179769dbc4e27b30aaf9/userdata/shm
       What: shm

○ var-lib-machines.mount - Virtual Machine and Container Storage (Compatibility)
     Loaded: loaded (/usr/lib/systemd/system/var-lib-machines.mount; static)
     Active: inactive (dead)
  Condition: start condition failed at Mon 2026-02-23 09:24:08 UTC; 46min ago
      Where: /var/lib/machines
       What: /var/lib/machines.raw

Feb 23 09:24:08 np0005626466.localdomain systemd[1]: Virtual Machine and Container Storage (Compatibility) was skipped because of an unmet condition check (ConditionPathExists=/var/lib/machines.raw).

● var-lib-nfs-rpc_pipefs.mount - RPC Pipe File System
     Loaded: loaded (/proc/self/mountinfo; static)
     Active: active (mounted) since Mon 2026-02-23 06:36:02 UTC; 3h 35min ago
      Until: Mon 2026-02-23 06:36:02 UTC; 3h 35min ago
      Where: /var/lib/nfs/rpc_pipefs
       What: rpc_pipefs

● systemd-ask-password-console.path - Dispatch Password Requests to Console Directory Watch
     Loaded: loaded (/usr/lib/systemd/system/systemd-ask-password-console.path; static)
     Active: active (waiting) since Mon 2026-02-23 06:36:02 UTC; 3h 35min ago
      Until: Mon 2026-02-23 06:36:02 UTC; 3h 35min ago
   Triggers: ● systemd-ask-password-console.service
       Docs: man:systemd-ask-password-console.path(8)

● systemd-ask-password-wall.path - Forward Password Requests to Wall Directory Watch
     Loaded: loaded (/usr/lib/systemd/system/systemd-ask-password-wall.path; static)
     Active: active (waiting) since Mon 2026-02-23 06:36:02 UTC; 3h 35min ago
      Until: Mon 2026-02-23 06:36:02 UTC; 3h 35min ago
   Triggers: ● systemd-ask-password-wall.service
       Docs: man:systemd-ask-password-wall.path(8)

● init.scope - System and Service Manager
     Loaded: loaded
  Transient: yes
     Active: active (running) since Mon 2026-02-23 06:36:00 UTC; 3h 35min ago
       Docs: man:systemd(1)

Feb 23 10:10:44 np0005626466.localdomain systemd[1]: c0dd95da0b8090e1dfefd899739a6f9a1e0b730eec9db8d0d0094fa6bfc519c8.service: Deactivated successfully.
Feb 23 10:10:55 np0005626466.localdomain systemd[1]: Started /usr/bin/podman healthcheck run 6a5bdcbb336925d11e98e079f4b8b7828b30e368d9b142bcbf78246faf17115e.
Feb 23 10:10:55 np0005626466.localdomain systemd[1]: tmp-crun.WuJvtK.mount: Deactivated successfully.
Feb 23 10:10:55 np0005626466.localdomain systemd[1]: 6a5bdcbb336925d11e98e079f4b8b7828b30e368d9b142bcbf78246faf17115e.service: Deactivated successfully.
Feb 23 10:10:56 np0005626466.localdomain systemd[1]: Started /usr/bin/podman healthcheck run 122635f0e8d39054eca80d2e540d00d16b4a0e4979d56e5a2e398526a74c57c4.
Feb 23 10:10:56 np0005626466.localdomain systemd[1]: Started /usr/bin/podman healthcheck run fe7013c5595cf36029c5acd08a92109de37036b8a083179769dbc4e27b30aaf9.
Feb 23 10:10:56 np0005626466.localdomain systemd[1]: fe7013c5595cf36029c5acd08a92109de37036b8a083179769dbc4e27b30aaf9.service: Deactivated successfully.
Feb 23 10:10:56 np0005626466.localdomain systemd[1]: 122635f0e8d39054eca80d2e540d00d16b4a0e4979d56e5a2e398526a74c57c4.service: Deactivated successfully.
Feb 23 10:11:02 np0005626466.localdomain systemd[1]: Started /usr/bin/podman healthcheck run 8ca16bfb2f59ef8fc3fb8572c3270f7856db7f62fc874baed2fd5084beb7f01e.
Feb 23 10:11:02 np0005626466.localdomain systemd[1]: 8ca16bfb2f59ef8fc3fb8572c3270f7856db7f62fc874baed2fd5084beb7f01e.service: Deactivated successfully.

● libpod-122635f0e8d39054eca80d2e540d00d16b4a0e4979d56e5a2e398526a74c57c4.scope - libcrun container
     Loaded: loaded (/run/systemd/transient/libpod-122635f0e8d39054eca80d2e540d00d16b4a0e4979d56e5a2e398526a74c57c4.scope; transient)
  Transient: yes
    Drop-In: /run/systemd/transient/libpod-122635f0e8d39054eca80d2e540d00d16b4a0e4979d56e5a2e398526a74c57c4.scope.d
             └─dep.conf
     Active: active (running) since Mon 2026-02-23 09:18:38 UTC; 52min ago
         IO: 5.7M read, 4.0K written
      Tasks: 6 (limit: 100220)
     Memory: 16.7M
        CPU: 12.081s
     CGroup: /machine.slice/libpod-122635f0e8d39054eca80d2e540d00d16b4a0e4979d56e5a2e398526a74c57c4.scope
             └─container
               ├─156068 dumb-init --single-child -- kolla_start
               └─156071 /usr/bin/ovn-controller --pidfile unix:/run/openvswitch/db.sock

Feb 23 09:18:38 np0005626466.localdomain systemd[1]: Started libcrun container.

● libpod-2812510312ec1c5f807144229711d7908bd35d002349ffc7e5627b311d4edf79.scope - libcrun container
     Loaded: loaded (/run/systemd/transient/libpod-2812510312ec1c5f807144229711d7908bd35d002349ffc7e5627b311d4edf79.scope; transient)
  Transient: yes
     Active: active (running) since Mon 2026-02-23 09:39:05 UTC; 31min ago
         IO: 21.2M read, 41.7M written
      Tasks: 27 (limit: 100220)
     Memory: 426.9M
        CPU: 1min 596ms
     CGroup: /machine.slice/libpod-2812510312ec1c5f807144229711d7908bd35d002349ffc7e5627b311d4edf79.scope
             └─container
               ├─280694 dumb-init --single-child -- kolla_start
               ├─280697 /usr/bin/python3 /usr/bin/nova-compute
               ├─307841 /usr/bin/python3 /bin/privsep-helper --config-file /etc/nova/nova.conf --config-file /etc/nova/nova-compute.conf --config-dir /etc/nova/nova.conf.d --privsep_context vif_plug_ovs.privsep.vif_plug --privsep_sock_path /tmp/tmpqn5dfyp_/privsep.sock
               └─324666 /usr/bin/python3 /bin/privsep-helper --config-file /etc/nova/nova.conf --config-file /etc/nova/nova-compute.conf --config-dir /etc/nova/nova.conf.d --privsep_context os_brick.privileged.default --privsep_sock_path /tmp/tmpa7k05g5b/privsep.sock

Feb 23 09:39:05 np0005626466.localdomain systemd[1]: Started libcrun container.

● libpod-3648631d61f37afb17aa928ef74b239e7715b60e5cab331e4a494567755448eb.scope - libcrun container
     Loaded: loaded (/run/systemd/transient/libpod-3648631d61f37afb17aa928ef74b239e7715b60e5cab331e4a494567755448eb.scope; transient)
  Transient: yes
     Active: active (running) since Mon 2026-02-23 09:34:19 UTC; 36min ago
         IO: 0B read, 4.0K written
      Tasks: 2 (limit: 100220)
     Memory: 120.5M
        CPU: 5.676s
     CGroup: /machine.slice/libpod-3648631d61f37afb17aa928ef74b239e7715b60e5cab331e4a494567755448eb.scope
             └─container
               ├─256427 dumb-init --single-child -- kolla_start
               └─256429 "neutron-sriov-nic-agent (/usr/bin/python3 /usr/bin/neutron-sriov-nic-agent)"

Feb 23 09:34:19 np0005626466.localdomain systemd[1]: Started libcrun container.

● libpod-6a5bdcbb336925d11e98e079f4b8b7828b30e368d9b142bcbf78246faf17115e.scope - libcrun container
     Loaded: loaded (/run/systemd/transient/libpod-6a5bdcbb336925d11e98e079f4b8b7828b30e368d9b142bcbf78246faf17115e.scope; transient)
  Transient: yes
     Active: active (running) since Mon 2026-02-23 09:30:19 UTC; 40min ago
         IO: 220.0K read, 0B written
      Tasks: 13 (limit: 100220)
     Memory: 21.4M
        CPU: 5.327s
     CGroup: /machine.slice/libpod-6a5bdcbb336925d11e98e079f4b8b7828b30e368d9b142bcbf78246faf17115e.scope
             └─container
               └─243612 /app/openstack-network-exporter

Feb 23 09:30:19 np0005626466.localdomain systemd[1]: Started libcrun container.

● libpod-8813b336059ca31aa5e43a3a140b1dec6d9019f75a06f121cdebad271f8eb62c.scope - libcrun container
     Loaded: loaded (/run/systemd/transient/libpod-8813b336059ca31aa5e43a3a140b1dec6d9019f75a06f121cdebad271f8eb62c.scope; transient)
  Transient: yes
     Active: active (running) since Mon 2026-02-23 09:35:34 UTC; 35min ago
         IO: 0B read, 8.0K written
      Tasks: 2 (limit: 100220)
     Memory: 1.0M
        CPU: 85ms
     CGroup: /machine.slice/libpod-8813b336059ca31aa5e43a3a140b1dec6d9019f75a06f121cdebad271f8eb62c.scope
             └─container
               ├─264101 dumb-init --single-child -- /usr/sbin/dnsmasq -k --no-hosts --no-resolv --pid-file=/var/lib/neutron/dhcp/91fdc6a7-b901-4255-83f7-4b37365658a3/pid --dhcp-hostsfile=/var/lib/neutron/dhcp/91fdc6a7-b901-4255-83f7-4b37365658a3/host --addn-hosts=/var/lib/neutron/dhcp/91fdc6a7-b901-4255-83f7-4b37365658a3/addn_hosts --dhcp-optsfile=/var/lib/neutron/dhcp/91fdc6a7-b901-4255-83f7-4b37365658a3/opts --dhcp-leasefile=/var/lib/neutron/dhcp/91fdc6a7-b901-4255-83f7-4b37365658a3/leases --dhcp-match=set:ipxe,175 --dhcp-userclass=set:ipxe6,iPXE --local-service --bind-dynamic --dhcp-range=set:subnet-21d77760-119f-4466-bc85-a0e9167487a9,192.168.122.0,static,255.255.255.0,86400s --dhcp-option-force=option:mtu,1350 --dhcp-lease-max=256 --conf-file=/dev/null --domain=openstacklocal
               └─264111 /usr/sbin/dnsmasq -k --no-hosts --no-resolv --pid-file=/var/lib/neutron/dhcp/91fdc6a7-b901-4255-83f7-4b37365658a3/pid --dhcp-hostsfile=/var/lib/neutron/dhcp/91fdc6a7-b901-4255-83f7-4b37365658a3/host --addn-hosts=/var/lib/neutron/dhcp/91fdc6a7-b901-4255-83f7-4b37365658a3/addn_hosts --dhcp-optsfile=/var/lib/neutron/dhcp/91fdc6a7-b901-4255-83f7-4b37365658a3/opts --dhcp-leasefile=/var/lib/neutron/dhcp/91fdc6a7-b901-4255-83f7-4b37365658a3/leases --dhcp-match=set:ipxe,175 --dhcp-userclass=set:ipxe6,iPXE --local-service --bind-dynamic --dhcp-range=set:subnet-21d77760-119f-4466-bc85-a0e9167487a9,192.168.122.0,static,255.255.255.0,86400s --dhcp-option-force=option:mtu,1350 --dhcp-lease-max=256 --conf-file=/dev/null --domain=openstacklocal

Feb 23 10:05:41 np0005626466.localdomain dnsmasq-dhcp[264111]: read /var/lib/neutron/dhcp/91fdc6a7-b901-4255-83f7-4b37365658a3/opts
Feb 23 10:05:44 np0005626466.localdomain dnsmasq[264111]: read /var/lib/neutron/dhcp/91fdc6a7-b901-4255-83f7-4b37365658a3/addn_hosts - 1 addresses
Feb 23 10:05:44 np0005626466.localdomain dnsmasq-dhcp[264111]: read /var/lib/neutron/dhcp/91fdc6a7-b901-4255-83f7-4b37365658a3/host
Feb 23 10:05:44 np0005626466.localdomain dnsmasq-dhcp[264111]: read /var/lib/neutron/dhcp/91fdc6a7-b901-4255-83f7-4b37365658a3/opts
Feb 23 10:08:19 np0005626466.localdomain dnsmasq[264111]: read /var/lib/neutron/dhcp/91fdc6a7-b901-4255-83f7-4b37365658a3/addn_hosts - 2 addresses
Feb 23 10:08:19 np0005626466.localdomain dnsmasq-dhcp[264111]: read /var/lib/neutron/dhcp/91fdc6a7-b901-4255-83f7-4b37365658a3/host
Feb 23 10:08:19 np0005626466.localdomain dnsmasq-dhcp[264111]: read /var/lib/neutron/dhcp/91fdc6a7-b901-4255-83f7-4b37365658a3/opts
Feb 23 10:08:27 np0005626466.localdomain dnsmasq[264111]: read /var/lib/neutron/dhcp/91fdc6a7-b901-4255-83f7-4b37365658a3/addn_hosts - 1 addresses
Feb 23 10:08:27 np0005626466.localdomain dnsmasq-dhcp[264111]: read /var/lib/neutron/dhcp/91fdc6a7-b901-4255-83f7-4b37365658a3/host
Feb 23 10:08:27 np0005626466.localdomain dnsmasq-dhcp[264111]: read /var/lib/neutron/dhcp/91fdc6a7-b901-4255-83f7-4b37365658a3/opts

● libpod-8ca16bfb2f59ef8fc3fb8572c3270f7856db7f62fc874baed2fd5084beb7f01e.scope - libcrun container
     Loaded: loaded (/run/systemd/transient/libpod-8ca16bfb2f59ef8fc3fb8572c3270f7856db7f62fc874baed2fd5084beb7f01e.scope; transient)
  Transient: yes
    Drop-In: /run/systemd/transient/libpod-8ca16bfb2f59ef8fc3fb8572c3270f7856db7f62fc874baed2fd5084beb7f01e.scope.d
             └─dep.conf
     Active: active (running) since Mon 2026-02-23 09:19:48 UTC; 51min ago
         IO: 4.0K read, 9.4M written
      Tasks: 11 (limit: 100220)
     Memory: 419.2M
        CPU: 27.259s
     CGroup: /machine.slice/libpod-8ca16bfb2f59ef8fc3fb8572c3270f7856db7f62fc874baed2fd5084beb7f01e.scope
             └─container
               ├─161941 dumb-init --single-child -- kolla_start
               ├─161944 "neutron-ovn-metadata-agent (/usr/bin/python3 /usr/bin/neutron-ovn-metadata-agent)"
               ├─162130 "neutron-ovn-metadata-agent (/usr/bin/python3 /usr/bin/neutron-ovn-metadata-agent)"
               ├─162175 /usr/bin/python3 /bin/privsep-helper --config-file /etc/neutron/neutron.conf --config-dir /etc/neutron.conf.d --privsep_context neutron.privileged.namespace_cmd --privsep_sock_path /tmp/tmp84iwktpn/privsep.sock
               ├─264002 /usr/bin/python3 /bin/privsep-helper --config-file /etc/neutron/neutron.conf --config-dir /etc/neutron.conf.d --privsep_context neutron.privileged.default --privsep_sock_path /tmp/tmpbtevuisl/privsep.sock
               └─308309 /usr/bin/python3 /bin/privsep-helper --config-file /etc/neutron/neutron.conf --config-dir /etc/neutron.conf.d --privsep_context neutron.privileged.link_cmd --privsep_sock_path /tmp/tmpkfen9wub/privsep.sock

Feb 23 09:55:57 np0005626466.localdomain podman[311897]: 2026-02-23 09:55:57.280909478 +0000 UTC m=+0.109890728 container cleanup 4cd8dd42a2a8ad7191b743e7968feb20891edac5f3e61b8ebf3d0672a801d480 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=neutron-haproxy-ovnmeta-488344bb-b2b1-4b3f-933b-1a9bfdff1d5c, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.43.0, maintainer=OpenStack Kubernetes Operator team, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, tcib_build_tag=8419493e1fd846703d277695e03fc5eb, org.label-schema.build-date=20260216, tcib_managed=true, org.label-schema.license=GPLv2)
Feb 23 09:55:57 np0005626466.localdomain podman[311925]: 2026-02-23 09:55:57.373503298 +0000 UTC m=+0.075419447 container remove 4cd8dd42a2a8ad7191b743e7968feb20891edac5f3e61b8ebf3d0672a801d480 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=neutron-haproxy-ovnmeta-488344bb-b2b1-4b3f-933b-1a9bfdff1d5c, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, tcib_build_tag=8419493e1fd846703d277695e03fc5eb, io.buildah.version=1.43.0, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20260216, tcib_managed=true)
Feb 23 09:59:31 np0005626466.localdomain podman[323221]: 
Feb 23 09:59:31 np0005626466.localdomain podman[323221]: 2026-02-23 09:59:31.046421505 +0000 UTC m=+0.093305617 container create 5b24afd5d2cf2014565b6c34ffc0b3a9254ef622427462c9f547bc7f258176d8 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=neutron-haproxy-ovnmeta-ff7aa220-5765-44c6-9121-cfbd718241c5, org.label-schema.build-date=20260216, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, tcib_build_tag=8419493e1fd846703d277695e03fc5eb, tcib_managed=true, maintainer=OpenStack Kubernetes Operator team, io.buildah.version=1.43.0, org.label-schema.vendor=CentOS)
Feb 23 09:59:31 np0005626466.localdomain podman[323221]: 2026-02-23 09:59:31.00569869 +0000 UTC m=+0.052582802 image pull  quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified
Feb 23 09:59:31 np0005626466.localdomain podman[323221]: 2026-02-23 09:59:31.109203282 +0000 UTC m=+0.156087404 container init 5b24afd5d2cf2014565b6c34ffc0b3a9254ef622427462c9f547bc7f258176d8 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=neutron-haproxy-ovnmeta-ff7aa220-5765-44c6-9121-cfbd718241c5, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, io.buildah.version=1.43.0, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20260216, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, tcib_build_tag=8419493e1fd846703d277695e03fc5eb, tcib_managed=true)
Feb 23 09:59:31 np0005626466.localdomain podman[323221]: 2026-02-23 09:59:31.118857152 +0000 UTC m=+0.165741274 container start 5b24afd5d2cf2014565b6c34ffc0b3a9254ef622427462c9f547bc7f258176d8 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=neutron-haproxy-ovnmeta-ff7aa220-5765-44c6-9121-cfbd718241c5, io.buildah.version=1.43.0, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, tcib_managed=true, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, org.label-schema.build-date=20260216, maintainer=OpenStack Kubernetes Operator team, tcib_build_tag=8419493e1fd846703d277695e03fc5eb)
Feb 23 10:00:18 np0005626466.localdomain podman[325489]: 2026-02-23 10:00:18.054190912 +0000 UTC m=+0.066043071 container died 5b24afd5d2cf2014565b6c34ffc0b3a9254ef622427462c9f547bc7f258176d8 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=neutron-haproxy-ovnmeta-ff7aa220-5765-44c6-9121-cfbd718241c5, io.buildah.version=1.43.0, org.label-schema.build-date=20260216, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, maintainer=OpenStack Kubernetes Operator team, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, tcib_managed=true, tcib_build_tag=8419493e1fd846703d277695e03fc5eb)
Feb 23 10:00:18 np0005626466.localdomain podman[325489]: 2026-02-23 10:00:18.099952051 +0000 UTC m=+0.111804180 container cleanup 5b24afd5d2cf2014565b6c34ffc0b3a9254ef622427462c9f547bc7f258176d8 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=neutron-haproxy-ovnmeta-ff7aa220-5765-44c6-9121-cfbd718241c5, tcib_build_tag=8419493e1fd846703d277695e03fc5eb, maintainer=OpenStack Kubernetes Operator team, io.buildah.version=1.43.0, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, tcib_managed=true, org.label-schema.build-date=20260216)
Feb 23 10:00:18 np0005626466.localdomain podman[325520]: 2026-02-23 10:00:18.182979368 +0000 UTC m=+0.066816375 container remove 5b24afd5d2cf2014565b6c34ffc0b3a9254ef622427462c9f547bc7f258176d8 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=neutron-haproxy-ovnmeta-ff7aa220-5765-44c6-9121-cfbd718241c5, tcib_managed=true, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.43.0, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, tcib_build_tag=8419493e1fd846703d277695e03fc5eb, org.label-schema.build-date=20260216, maintainer=OpenStack Kubernetes Operator team, org.label-schema.license=GPLv2)

● libpod-a43c45687bee0664320d95d80d3e7a94940f1f0a4aba277a159d8b3eeefd4d41.scope - libcrun container
     Loaded: loaded (/run/systemd/transient/libpod-a43c45687bee0664320d95d80d3e7a94940f1f0a4aba277a159d8b3eeefd4d41.scope; transient)
  Transient: yes
     Active: active (running) since Mon 2026-02-23 09:35:34 UTC; 35min ago
         IO: 0B read, 6.5K written
      Tasks: 2 (limit: 100220)
     Memory: 1.0M
        CPU: 16ms
     CGroup: /machine.slice/libpod-a43c45687bee0664320d95d80d3e7a94940f1f0a4aba277a159d8b3eeefd4d41.scope
             └─container
               ├─264098 dumb-init --single-child -- /usr/sbin/dnsmasq -k --no-hosts --no-resolv --pid-file=/var/lib/neutron/dhcp/9da5b53d-3184-450f-9a5b-bdba1a6c9f6d/pid --dhcp-hostsfile=/var/lib/neutron/dhcp/9da5b53d-3184-450f-9a5b-bdba1a6c9f6d/host --addn-hosts=/var/lib/neutron/dhcp/9da5b53d-3184-450f-9a5b-bdba1a6c9f6d/addn_hosts --dhcp-optsfile=/var/lib/neutron/dhcp/9da5b53d-3184-450f-9a5b-bdba1a6c9f6d/opts --dhcp-leasefile=/var/lib/neutron/dhcp/9da5b53d-3184-450f-9a5b-bdba1a6c9f6d/leases --dhcp-match=set:ipxe,175 --dhcp-userclass=set:ipxe6,iPXE --local-service --bind-dynamic --dhcp-range=set:subnet-33e5fe9f-f246-45b2-86ae-3c10c0318cd7,192.168.0.0,static,255.255.255.0,86400s --dhcp-option-force=option:mtu,1292 --dhcp-lease-max=256 --conf-file=/dev/null --domain=openstacklocal
               └─264104 /usr/sbin/dnsmasq -k --no-hosts --no-resolv --pid-file=/var/lib/neutron/dhcp/9da5b53d-3184-450f-9a5b-bdba1a6c9f6d/pid --dhcp-hostsfile=/var/lib/neutron/dhcp/9da5b53d-3184-450f-9a5b-bdba1a6c9f6d/host --addn-hosts=/var/lib/neutron/dhcp/9da5b53d-3184-450f-9a5b-bdba1a6c9f6d/addn_hosts --dhcp-optsfile=/var/lib/neutron/dhcp/9da5b53d-3184-450f-9a5b-bdba1a6c9f6d/opts --dhcp-leasefile=/var/lib/neutron/dhcp/9da5b53d-3184-450f-9a5b-bdba1a6c9f6d/leases --dhcp-match=set:ipxe,175 --dhcp-userclass=set:ipxe6,iPXE --local-service --bind-dynamic --dhcp-range=set:subnet-33e5fe9f-f246-45b2-86ae-3c10c0318cd7,192.168.0.0,static,255.255.255.0,86400s --dhcp-option-force=option:mtu,1292 --dhcp-lease-max=256 --conf-file=/dev/null --domain=openstacklocal

Feb 23 09:35:34 np0005626466.localdomain systemd[1]: Started libcrun container.
Feb 23 09:35:34 np0005626466.localdomain dnsmasq[264104]: started, version 2.85 cachesize 150
Feb 23 09:35:34 np0005626466.localdomain dnsmasq[264104]: DNS service limited to local subnets
Feb 23 09:35:34 np0005626466.localdomain dnsmasq[264104]: compile time options: IPv6 GNU-getopt DBus no-UBus no-i18n IDN2 DHCP DHCPv6 no-Lua TFTP no-conntrack ipset auth cryptohash DNSSEC loop-detect inotify dumpfile
Feb 23 09:35:34 np0005626466.localdomain dnsmasq[264104]: warning: no upstream servers configured
Feb 23 09:35:34 np0005626466.localdomain dnsmasq-dhcp[264104]: DHCP, static leases only on 192.168.0.0, lease time 1d
Feb 23 09:35:34 np0005626466.localdomain dnsmasq[264104]: read /var/lib/neutron/dhcp/9da5b53d-3184-450f-9a5b-bdba1a6c9f6d/addn_hosts - 2 addresses
Feb 23 09:35:34 np0005626466.localdomain dnsmasq-dhcp[264104]: read /var/lib/neutron/dhcp/9da5b53d-3184-450f-9a5b-bdba1a6c9f6d/host
Feb 23 09:35:34 np0005626466.localdomain dnsmasq-dhcp[264104]: read /var/lib/neutron/dhcp/9da5b53d-3184-450f-9a5b-bdba1a6c9f6d/opts

● libpod-bdeaed09d67af00ebcf3af49678b8d850a09c9b8240ebddb072ba501276f201d.scope - libcrun container
     Loaded: loaded (/run/systemd/transient/libpod-bdeaed09d67af00ebcf3af49678b8d850a09c9b8240ebddb072ba501276f201d.scope; transient)
  Transient: yes
     Active: active (running) since Mon 2026-02-23 09:35:26 UTC; 35min ago
         IO: 0B read, 22.6M written
      Tasks: 15 (limit: 100220)
     Memory: 469.8M
        CPU: 3min 2.951s
     CGroup: /machine.slice/libpod-bdeaed09d67af00ebcf3af49678b8d850a09c9b8240ebddb072ba501276f201d.scope
             └─container
               ├─263821 dumb-init --single-child -- kolla_start
               ├─263823 "neutron-dhcp-agent (/usr/bin/python3 /usr/bin/neutron-dhcp-agent)"
               ├─263857 /usr/bin/python3 /bin/privsep-helper --config-file /etc/neutron/neutron.conf --config-dir /etc/neutron.conf.d --privsep_context neutron.privileged.default --privsep_sock_path /tmp/tmpg0yua8_o/privsep.sock
               ├─263887 /usr/bin/python3 /bin/privsep-helper --config-file /etc/neutron/neutron.conf --config-dir /etc/neutron.conf.d --privsep_context neutron.privileged.namespace_cmd --privsep_sock_path /tmp/tmp7urwfrmj/privsep.sock
               ├─263903 /usr/bin/python3 /bin/privsep-helper --config-file /etc/neutron/neutron.conf --config-dir /etc/neutron.conf.d --privsep_context neutron.privileged.link_cmd --privsep_sock_path /tmp/tmpopxbn0rr/privsep.sock
               └─315526 /usr/bin/python3 /bin/privsep-helper --config-file /etc/neutron/neutron.conf --config-dir /etc/neutron.conf.d --privsep_context neutron.privileged.dhcp_release_cmd --privsep_sock_path /tmp/tmp02asih0a/privsep.sock

Feb 23 10:04:59 np0005626466.localdomain podman[328828]: 2026-02-23 10:04:59.151245396 +0000 UTC m=+0.061588294 container kill 8813b336059ca31aa5e43a3a140b1dec6d9019f75a06f121cdebad271f8eb62c (image=quay.io/podified-antelope-centos9/openstack-neutron-dhcp-agent:current-podified, name=neutron-dnsmasq-qdhcp-91fdc6a7-b901-4255-83f7-4b37365658a3, tcib_build_tag=8419493e1fd846703d277695e03fc5eb, tcib_managed=true, io.buildah.version=1.43.0, org.label-schema.name=CentOS Stream 9 Base Image, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20260216, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS)
Feb 23 10:05:04 np0005626466.localdomain podman[328866]: 2026-02-23 10:05:04.898369674 +0000 UTC m=+0.060093177 container kill 8813b336059ca31aa5e43a3a140b1dec6d9019f75a06f121cdebad271f8eb62c (image=quay.io/podified-antelope-centos9/openstack-neutron-dhcp-agent:current-podified, name=neutron-dnsmasq-qdhcp-91fdc6a7-b901-4255-83f7-4b37365658a3, maintainer=OpenStack Kubernetes Operator team, org.label-schema.name=CentOS Stream 9 Base Image, tcib_managed=true, io.buildah.version=1.43.0, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, org.label-schema.build-date=20260216, org.label-schema.vendor=CentOS, tcib_build_tag=8419493e1fd846703d277695e03fc5eb)
Feb 23 10:05:12 np0005626466.localdomain podman[328948]: 2026-02-23 10:05:12.589026034 +0000 UTC m=+0.060783688 container kill 8813b336059ca31aa5e43a3a140b1dec6d9019f75a06f121cdebad271f8eb62c (image=quay.io/podified-antelope-centos9/openstack-neutron-dhcp-agent:current-podified, name=neutron-dnsmasq-qdhcp-91fdc6a7-b901-4255-83f7-4b37365658a3, org.label-schema.vendor=CentOS, org.label-schema.build-date=20260216, tcib_build_tag=8419493e1fd846703d277695e03fc5eb, tcib_managed=true, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.43.0, maintainer=OpenStack Kubernetes Operator team, org.label-schema.schema-version=1.0)
Feb 23 10:05:15 np0005626466.localdomain podman[328988]: 2026-02-23 10:05:15.547961075 +0000 UTC m=+0.058231120 container kill 8813b336059ca31aa5e43a3a140b1dec6d9019f75a06f121cdebad271f8eb62c (image=quay.io/podified-antelope-centos9/openstack-neutron-dhcp-agent:current-podified, name=neutron-dnsmasq-qdhcp-91fdc6a7-b901-4255-83f7-4b37365658a3, io.buildah.version=1.43.0, org.label-schema.vendor=CentOS, tcib_managed=true, org.label-schema.license=GPLv2, tcib_build_tag=8419493e1fd846703d277695e03fc5eb, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20260216, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0)
Feb 23 10:05:21 np0005626466.localdomain podman[329086]: 2026-02-23 10:05:21.480028508 +0000 UTC m=+0.057356942 container kill 8813b336059ca31aa5e43a3a140b1dec6d9019f75a06f121cdebad271f8eb62c (image=quay.io/podified-antelope-centos9/openstack-neutron-dhcp-agent:current-podified, name=neutron-dnsmasq-qdhcp-91fdc6a7-b901-4255-83f7-4b37365658a3, tcib_build_tag=8419493e1fd846703d277695e03fc5eb, tcib_managed=true, org.label-schema.build-date=20260216, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, io.buildah.version=1.43.0, org.label-schema.license=GPLv2, maintainer=OpenStack Kubernetes Operator team)
Feb 23 10:05:29 np0005626466.localdomain podman[329162]: 2026-02-23 10:05:29.300167652 +0000 UTC m=+0.070134999 container kill 8813b336059ca31aa5e43a3a140b1dec6d9019f75a06f121cdebad271f8eb62c (image=quay.io/podified-antelope-centos9/openstack-neutron-dhcp-agent:current-podified, name=neutron-dnsmasq-qdhcp-91fdc6a7-b901-4255-83f7-4b37365658a3, org.label-schema.vendor=CentOS, tcib_build_tag=8419493e1fd846703d277695e03fc5eb, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, tcib_managed=true, io.buildah.version=1.43.0, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20260216, org.label-schema.name=CentOS Stream 9 Base Image)
Feb 23 10:05:41 np0005626466.localdomain podman[329361]: 2026-02-23 10:05:41.146934911 +0000 UTC m=+0.059671675 container kill 8813b336059ca31aa5e43a3a140b1dec6d9019f75a06f121cdebad271f8eb62c (image=quay.io/podified-antelope-centos9/openstack-neutron-dhcp-agent:current-podified, name=neutron-dnsmasq-qdhcp-91fdc6a7-b901-4255-83f7-4b37365658a3, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20260216, tcib_build_tag=8419493e1fd846703d277695e03fc5eb, org.label-schema.schema-version=1.0, io.buildah.version=1.43.0, org.label-schema.vendor=CentOS, tcib_managed=true)
Feb 23 10:05:44 np0005626466.localdomain podman[329400]: 2026-02-23 10:05:44.255256521 +0000 UTC m=+0.071507601 container kill 8813b336059ca31aa5e43a3a140b1dec6d9019f75a06f121cdebad271f8eb62c (image=quay.io/podified-antelope-centos9/openstack-neutron-dhcp-agent:current-podified, name=neutron-dnsmasq-qdhcp-91fdc6a7-b901-4255-83f7-4b37365658a3, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, maintainer=OpenStack Kubernetes Operator team, org.label-schema.schema-version=1.0, tcib_build_tag=8419493e1fd846703d277695e03fc5eb, org.label-schema.vendor=CentOS, tcib_managed=true, io.buildah.version=1.43.0, org.label-schema.build-date=20260216)
Feb 23 10:08:19 np0005626466.localdomain podman[330360]: 2026-02-23 10:08:19.166275822 +0000 UTC m=+0.060382597 container kill 8813b336059ca31aa5e43a3a140b1dec6d9019f75a06f121cdebad271f8eb62c (image=quay.io/podified-antelope-centos9/openstack-neutron-dhcp-agent:current-podified, name=neutron-dnsmasq-qdhcp-91fdc6a7-b901-4255-83f7-4b37365658a3, org.label-schema.vendor=CentOS, tcib_build_tag=8419493e1fd846703d277695e03fc5eb, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, tcib_managed=true, io.buildah.version=1.43.0, org.label-schema.build-date=20260216, org.label-schema.name=CentOS Stream 9 Base Image, maintainer=OpenStack Kubernetes Operator team)
Feb 23 10:08:27 np0005626466.localdomain podman[330463]: 2026-02-23 10:08:27.045885553 +0000 UTC m=+0.070316515 container kill 8813b336059ca31aa5e43a3a140b1dec6d9019f75a06f121cdebad271f8eb62c (image=quay.io/podified-antelope-centos9/openstack-neutron-dhcp-agent:current-podified, name=neutron-dnsmasq-qdhcp-91fdc6a7-b901-4255-83f7-4b37365658a3, org.label-schema.name=CentOS Stream 9 Base Image, tcib_managed=true, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20260216, org.label-schema.license=GPLv2, io.buildah.version=1.43.0, org.label-schema.vendor=CentOS, tcib_build_tag=8419493e1fd846703d277695e03fc5eb, org.label-schema.schema-version=1.0)

● libpod-c0dd95da0b8090e1dfefd899739a6f9a1e0b730eec9db8d0d0094fa6bfc519c8.scope - libcrun container
     Loaded: loaded (/run/systemd/transient/libpod-c0dd95da0b8090e1dfefd899739a6f9a1e0b730eec9db8d0d0094fa6bfc519c8.scope; transient)
  Transient: yes
     Active: active (running) since Mon 2026-02-23 09:29:34 UTC; 41min ago
         IO: 416.0K read, 0B written
      Tasks: 10 (limit: 100220)
     Memory: 13.2M
        CPU: 4.502s
     CGroup: /machine.slice/libpod-c0dd95da0b8090e1dfefd899739a6f9a1e0b730eec9db8d0d0094fa6bfc519c8.scope
             └─container
               └─241166 /bin/podman_exporter

Feb 23 09:29:34 np0005626466.localdomain systemd[1]: Started libcrun container.

● libpod-cd17978adeb2b0468483202f98a4facd0357edbe019deb13313adab6b6b71f52.scope - libcrun container
     Loaded: loaded (/run/systemd/transient/libpod-cd17978adeb2b0468483202f98a4facd0357edbe019deb13313adab6b6b71f52.scope; transient)
  Transient: yes
     Active: active (running) since Mon 2026-02-23 09:29:13 UTC; 41min ago
         IO: 0B read, 0B written
      Tasks: 5 (limit: 100220)
     Memory: 19.9M
        CPU: 5.134s
     CGroup: /machine.slice/libpod-cd17978adeb2b0468483202f98a4facd0357edbe019deb13313adab6b6b71f52.scope
             └─container
               └─238755 /bin/node_exporter --web.disable-exporter-metrics --collector.systemd "--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service" --no-collector.dmi --no-collector.entropy --no-collector.thermal_zone --no-collector.time --no-collector.timex --no-collector.uname --no-collector.stat --no-collector.hwmon --no-collector.os --no-collector.selinux --no-collector.textfile --no-collector.powersupplyclass --no-collector.pressure --no-collector.rapl --path.rootfs=/rootfs

Feb 23 09:29:13 np0005626466.localdomain systemd[1]: Started libcrun container.

● libpod-conmon-8813b336059ca31aa5e43a3a140b1dec6d9019f75a06f121cdebad271f8eb62c.scope
     Loaded: loaded (/run/systemd/transient/libpod-conmon-8813b336059ca31aa5e43a3a140b1dec6d9019f75a06f121cdebad271f8eb62c.scope; transient)
  Transient: yes
     Active: active (running) since Mon 2026-02-23 09:35:34 UTC; 35min ago
         IO: 0B read, 0B written
      Tasks: 1 (limit: 100220)
     Memory: 312.0K
        CPU: 13ms
     CGroup: /machine.slice/libpod-conmon-8813b336059ca31aa5e43a3a140b1dec6d9019f75a06f121cdebad271f8eb62c.scope
             └─264099 /usr/bin/conmon --api-version 1 -c 8813b336059ca31aa5e43a3a140b1dec6d9019f75a06f121cdebad271f8eb62c -u 8813b336059ca31aa5e43a3a140b1dec6d9019f75a06f121cdebad271f8eb62c -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/8813b336059ca31aa5e43a3a140b1dec6d9019f75a06f121cdebad271f8eb62c/userdata -p /run/containers/storage/overlay-containers/8813b336059ca31aa5e43a3a140b1dec6d9019f75a06f121cdebad271f8eb62c/userdata/pidfile -n neutron-dnsmasq-qdhcp-91fdc6a7-b901-4255-83f7-4b37365658a3 --exit-dir /run/libpod/exits --full-attach -s -l journald --log-level warning --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/8813b336059ca31aa5e43a3a140b1dec6d9019f75a06f121cdebad271f8eb62c/userdata/oci-log --conmon-pidfile /run/containers/storage/overlay-containers/8813b336059ca31aa5e43a3a140b1dec6d9019f75a06f121cdebad271f8eb62c/userdata/conmon.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --transient-store=false --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg 8813b336059ca31aa5e43a3a140b1dec6d9019f75a06f121cdebad271f8eb62c

Feb 23 09:35:34 np0005626466.localdomain systemd[1]: Started libpod-conmon-8813b336059ca31aa5e43a3a140b1dec6d9019f75a06f121cdebad271f8eb62c.scope.

● libpod-conmon-a43c45687bee0664320d95d80d3e7a94940f1f0a4aba277a159d8b3eeefd4d41.scope
     Loaded: loaded (/run/systemd/transient/libpod-conmon-a43c45687bee0664320d95d80d3e7a94940f1f0a4aba277a159d8b3eeefd4d41.scope; transient)
  Transient: yes
     Active: active (running) since Mon 2026-02-23 09:35:34 UTC; 35min ago
         IO: 0B read, 0B written
      Tasks: 1 (limit: 100220)
     Memory: 296.0K
        CPU: 14ms
     CGroup: /machine.slice/libpod-conmon-a43c45687bee0664320d95d80d3e7a94940f1f0a4aba277a159d8b3eeefd4d41.scope
             └─264095 /usr/bin/conmon --api-version 1 -c a43c45687bee0664320d95d80d3e7a94940f1f0a4aba277a159d8b3eeefd4d41 -u a43c45687bee0664320d95d80d3e7a94940f1f0a4aba277a159d8b3eeefd4d41 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/a43c45687bee0664320d95d80d3e7a94940f1f0a4aba277a159d8b3eeefd4d41/userdata -p /run/containers/storage/overlay-containers/a43c45687bee0664320d95d80d3e7a94940f1f0a4aba277a159d8b3eeefd4d41/userdata/pidfile -n neutron-dnsmasq-qdhcp-9da5b53d-3184-450f-9a5b-bdba1a6c9f6d --exit-dir /run/libpod/exits --full-attach -s -l journald --log-level warning --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/a43c45687bee0664320d95d80d3e7a94940f1f0a4aba277a159d8b3eeefd4d41/userdata/oci-log --conmon-pidfile /run/containers/storage/overlay-containers/a43c45687bee0664320d95d80d3e7a94940f1f0a4aba277a159d8b3eeefd4d41/userdata/conmon.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --transient-store=false --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg a43c45687bee0664320d95d80d3e7a94940f1f0a4aba277a159d8b3eeefd4d41

Feb 23 09:35:34 np0005626466.localdomain systemd[1]: Started libpod-conmon-a43c45687bee0664320d95d80d3e7a94940f1f0a4aba277a159d8b3eeefd4d41.scope.

● libpod-fe7013c5595cf36029c5acd08a92109de37036b8a083179769dbc4e27b30aaf9.scope - libcrun container
     Loaded: loaded (/run/systemd/transient/libpod-fe7013c5595cf36029c5acd08a92109de37036b8a083179769dbc4e27b30aaf9.scope; transient)
  Transient: yes
     Active: active (running) since Mon 2026-02-23 09:28:53 UTC; 42min ago
         IO: 920.0K read, 232.0K written
      Tasks: 7 (limit: 100220)
     Memory: 105.1M
        CPU: 2.327s
     CGroup: /machine.slice/libpod-fe7013c5595cf36029c5acd08a92109de37036b8a083179769dbc4e27b30aaf9.scope
             └─container
               ├─236491 dumb-init --single-child -- kolla_start
               ├─236494 "ceilometer-polling: master process [/usr/bin/ceilometer-polling --polling-namespaces compute --logfile /dev/stdout]"
               └─236558 "ceilometer-polling: AgentManager worker(0)"

Feb 23 09:28:53 np0005626466.localdomain systemd[1]: Started libcrun container.
Feb 23 09:28:53 np0005626466.localdomain sudo[236495]: ceilometer : PWD=/ ; USER=root ; COMMAND=/usr/local/bin/kolla_set_configs
Feb 23 09:28:53 np0005626466.localdomain sudo[236495]: pam_systemd(sudo:session): Failed to connect to system bus: No such file or directory
Feb 23 09:28:53 np0005626466.localdomain sudo[236495]: pam_unix(sudo:session): session opened for user root(uid=0) by (uid=42405)
Feb 23 09:28:53 np0005626466.localdomain sudo[236495]: pam_unix(sudo:session): session closed for user root
Feb 23 09:28:53 np0005626466.localdomain sudo[236517]: ceilometer : PWD=/ ; USER=root ; COMMAND=/usr/local/bin/kolla_copy_cacerts
Feb 23 09:28:53 np0005626466.localdomain sudo[236517]: pam_systemd(sudo:session): Failed to connect to system bus: No such file or directory
Feb 23 09:28:53 np0005626466.localdomain sudo[236517]: pam_unix(sudo:session): session opened for user root(uid=0) by (uid=42405)
Feb 23 09:28:53 np0005626466.localdomain sudo[236517]: pam_unix(sudo:session): session closed for user root

● session-1.scope - Session 1 of User zuul
     Loaded: loaded (/run/systemd/transient/session-1.scope; transient)
  Transient: yes
     Active: active (abandoned) since Mon 2026-02-23 06:36:32 UTC; 3h 34min ago
         IO: 0B read, 0B written
      Tasks: 1
     Memory: 25.0M
        CPU: 2min 20.643s
     CGroup: /user.slice/user-1000.slice/session-1.scope
             └─4336 /usr/bin/python3

Feb 23 06:38:28 np0005626466.novalocal sudo[5750]: pam_unix(sudo:session): session closed for user root
Feb 23 06:38:39 np0005626466.novalocal python3[5772]: ansible-ansible.legacy.command Invoked with executable=/bin/bash _raw_params=env
                                                       _uses_shell=True zuul_log_id=fa163ef9-e89a-16c2-0802-000000000024-1-overcloudnovacompute2 zuul_ansible_split_streams=False warn=False stdin_add_newline=True strip_empty_ends=True argv=None chdir=None creates=None removes=None stdin=None
Feb 23 06:38:41 np0005626466.novalocal python3[5790]: ansible-file Invoked with path=/home/zuul/workspace state=directory recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Feb 23 06:39:00 np0005626466.novalocal sudo[5806]:     zuul : PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c echo BECOME-SUCCESS-rudaulkoxfibvpupgntulhnariyblklc ; /usr/bin/python3
Feb 23 06:39:00 np0005626466.novalocal sudo[5806]: pam_unix(sudo:session): session opened for user root(uid=0) by (uid=1000)
Feb 23 06:39:00 np0005626466.novalocal python3[5808]: ansible-ansible.builtin.file Invoked with path=/etc/ci/env state=directory mode=0755 recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Feb 23 06:39:00 np0005626466.novalocal sudo[5806]: pam_unix(sudo:session): session closed for user root
Feb 23 06:40:00 np0005626466.novalocal sshd[4187]: Received disconnect from 38.102.83.114 port 59782:11: disconnected by user
Feb 23 06:40:00 np0005626466.novalocal sshd[4187]: Disconnected from user zuul 38.102.83.114 port 59782
Feb 23 06:40:00 np0005626466.novalocal sshd[4174]: pam_unix(sshd:session): session closed for user zuul

● session-71.scope - Session 71 of User ceph-admin
     Loaded: loaded (/run/systemd/transient/session-71.scope; transient)
  Transient: yes
     Active: active (running) since Mon 2026-02-23 09:51:05 UTC; 19min ago
         IO: 1.6M read, 3.0M written
      Tasks: 2
     Memory: 2.7M
        CPU: 26.554s
     CGroup: /user.slice/user-1002.slice/session-71.scope
             ├─303883 "sshd: ceph-admin [priv]"
             └─303886 "sshd: ceph-admin@notty"

Feb 23 10:10:40 np0005626466.localdomain podman[333879]: 2026-02-23 10:10:40.082579696 +0000 UTC m=+0.095411934 container create 7e66abf3937775ffe686996113c815576940c8beda8502961d82d4fd25f7de64 (image=registry.redhat.io/rhceph/rhceph-7-rhel9:latest, name=pensive_robinson, io.openshift.expose-services=, io.openshift.tags=rhceph ceph, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, org.opencontainers.image.revision=b3986a21dbd047e1edac0f24f7c0e811518e5b14, summary=Provides the latest Red Hat Ceph Storage 7 on RHEL 9 in a fully featured and supported base image., GIT_REPO=https://github.com/ceph/ceph-container.git, io.k8s.display-name=Red Hat Ceph Storage 7 on RHEL 9, version=7, name=rhceph, ceph=True, release=1770267347, org.opencontainers.image.created=2026-02-09T10:25:24Z, distribution-scope=public, maintainer=Guillaume Abrioux <gabrioux@redhat.com>, CEPH_POINT_RELEASE=, vendor=Red Hat, Inc., GIT_CLEAN=True, cpe=cpe:/a:redhat:enterprise_linux:9::appstream, architecture=x86_64, GIT_BRANCH=main, description=Red Hat Ceph Storage 7, build-date=2026-02-09T10:25:24Z, io.k8s.description=Red Hat Ceph Storage 7, RELEASE=main, com.redhat.component=rhceph-container, vcs-type=git, GIT_COMMIT=12717c0777377369ea674892da98b0d85250f5b0, url=https://catalog.redhat.com/en/search?searchType=containers, io.buildah.version=1.42.2, vcs-ref=b3986a21dbd047e1edac0f24f7c0e811518e5b14)
Feb 23 10:10:40 np0005626466.localdomain podman[333879]: 2026-02-23 10:10:40.053540795 +0000 UTC m=+0.066373093 image pull  registry.redhat.io/rhceph/rhceph-7-rhel9:latest
Feb 23 10:10:40 np0005626466.localdomain podman[333879]: 2026-02-23 10:10:40.157852304 +0000 UTC m=+0.170684572 container init 7e66abf3937775ffe686996113c815576940c8beda8502961d82d4fd25f7de64 (image=registry.redhat.io/rhceph/rhceph-7-rhel9:latest, name=pensive_robinson, vcs-type=git, CEPH_POINT_RELEASE=, io.k8s.display-name=Red Hat Ceph Storage 7 on RHEL 9, GIT_BRANCH=main, version=7, org.opencontainers.image.created=2026-02-09T10:25:24Z, org.opencontainers.image.revision=b3986a21dbd047e1edac0f24f7c0e811518e5b14, GIT_REPO=https://github.com/ceph/ceph-container.git, com.redhat.component=rhceph-container, vendor=Red Hat, Inc., io.openshift.tags=rhceph ceph, description=Red Hat Ceph Storage 7, io.buildah.version=1.42.2, io.k8s.description=Red Hat Ceph Storage 7, ceph=True, vcs-ref=b3986a21dbd047e1edac0f24f7c0e811518e5b14, maintainer=Guillaume Abrioux <gabrioux@redhat.com>, cpe=cpe:/a:redhat:enterprise_linux:9::appstream, RELEASE=main, architecture=x86_64, summary=Provides the latest Red Hat Ceph Storage 7 on RHEL 9 in a fully featured and supported base image., release=1770267347, GIT_COMMIT=12717c0777377369ea674892da98b0d85250f5b0, io.openshift.expose-services=, distribution-scope=public, build-date=2026-02-09T10:25:24Z, url=https://catalog.redhat.com/en/search?searchType=containers, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, GIT_CLEAN=True, name=rhceph)
Feb 23 10:10:40 np0005626466.localdomain podman[333879]: 2026-02-23 10:10:40.166506923 +0000 UTC m=+0.179339191 container start 7e66abf3937775ffe686996113c815576940c8beda8502961d82d4fd25f7de64 (image=registry.redhat.io/rhceph/rhceph-7-rhel9:latest, name=pensive_robinson, org.opencontainers.image.revision=b3986a21dbd047e1edac0f24f7c0e811518e5b14, distribution-scope=public, release=1770267347, version=7, ceph=True, com.redhat.component=rhceph-container, io.buildah.version=1.42.2, summary=Provides the latest Red Hat Ceph Storage 7 on RHEL 9 in a fully featured and supported base image., GIT_REPO=https://github.com/ceph/ceph-container.git, url=https://catalog.redhat.com/en/search?searchType=containers, vcs-type=git, cpe=cpe:/a:redhat:enterprise_linux:9::appstream, io.openshift.tags=rhceph ceph, description=Red Hat Ceph Storage 7, CEPH_POINT_RELEASE=, vendor=Red Hat, Inc., GIT_COMMIT=12717c0777377369ea674892da98b0d85250f5b0, io.openshift.expose-services=, io.k8s.display-name=Red Hat Ceph Storage 7 on RHEL 9, org.opencontainers.image.created=2026-02-09T10:25:24Z, maintainer=Guillaume Abrioux <gabrioux@redhat.com>, io.k8s.description=Red Hat Ceph Storage 7, name=rhceph, architecture=x86_64, vcs-ref=b3986a21dbd047e1edac0f24f7c0e811518e5b14, RELEASE=main, GIT_BRANCH=main, GIT_CLEAN=True, build-date=2026-02-09T10:25:24Z, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI)
Feb 23 10:10:40 np0005626466.localdomain podman[333879]: 2026-02-23 10:10:40.166842423 +0000 UTC m=+0.179674681 container attach 7e66abf3937775ffe686996113c815576940c8beda8502961d82d4fd25f7de64 (image=registry.redhat.io/rhceph/rhceph-7-rhel9:latest, name=pensive_robinson, org.opencontainers.image.created=2026-02-09T10:25:24Z, CEPH_POINT_RELEASE=, GIT_COMMIT=12717c0777377369ea674892da98b0d85250f5b0, summary=Provides the latest Red Hat Ceph Storage 7 on RHEL 9 in a fully featured and supported base image., url=https://catalog.redhat.com/en/search?searchType=containers, vendor=Red Hat, Inc., description=Red Hat Ceph Storage 7, io.buildah.version=1.42.2, io.openshift.tags=rhceph ceph, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, distribution-scope=public, io.k8s.description=Red Hat Ceph Storage 7, RELEASE=main, architecture=x86_64, GIT_CLEAN=True, name=rhceph, cpe=cpe:/a:redhat:enterprise_linux:9::appstream, build-date=2026-02-09T10:25:24Z, vcs-ref=b3986a21dbd047e1edac0f24f7c0e811518e5b14, maintainer=Guillaume Abrioux <gabrioux@redhat.com>, version=7, vcs-type=git, release=1770267347, GIT_REPO=https://github.com/ceph/ceph-container.git, org.opencontainers.image.revision=b3986a21dbd047e1edac0f24f7c0e811518e5b14, com.redhat.component=rhceph-container, io.openshift.expose-services=, io.k8s.display-name=Red Hat Ceph Storage 7 on RHEL 9, ceph=True, GIT_BRANCH=main)
Feb 23 10:10:41 np0005626466.localdomain podman[333879]: 2026-02-23 10:10:41.07274878 +0000 UTC m=+1.085581008 container died 7e66abf3937775ffe686996113c815576940c8beda8502961d82d4fd25f7de64 (image=registry.redhat.io/rhceph/rhceph-7-rhel9:latest, name=pensive_robinson, release=1770267347, com.redhat.component=rhceph-container, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, io.buildah.version=1.42.2, maintainer=Guillaume Abrioux <gabrioux@redhat.com>, build-date=2026-02-09T10:25:24Z, architecture=x86_64, GIT_COMMIT=12717c0777377369ea674892da98b0d85250f5b0, io.openshift.expose-services=, GIT_BRANCH=main, version=7, cpe=cpe:/a:redhat:enterprise_linux:9::appstream, url=https://catalog.redhat.com/en/search?searchType=containers, io.k8s.display-name=Red Hat Ceph Storage 7 on RHEL 9, distribution-scope=public, CEPH_POINT_RELEASE=, io.openshift.tags=rhceph ceph, org.opencontainers.image.revision=b3986a21dbd047e1edac0f24f7c0e811518e5b14, description=Red Hat Ceph Storage 7, org.opencontainers.image.created=2026-02-09T10:25:24Z, name=rhceph, RELEASE=main, GIT_CLEAN=True, GIT_REPO=https://github.com/ceph/ceph-container.git, vcs-type=git, ceph=True, summary=Provides the latest Red Hat Ceph Storage 7 on RHEL 9 in a fully featured and supported base image., io.k8s.description=Red Hat Ceph Storage 7, vcs-ref=b3986a21dbd047e1edac0f24f7c0e811518e5b14, vendor=Red Hat, Inc.)
Feb 23 10:10:41 np0005626466.localdomain sudo[333626]: pam_unix(sudo:session): session closed for user root
Feb 23 10:10:41 np0005626466.localdomain sudo[335960]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/ls /etc/sysctl.d
Feb 23 10:10:41 np0005626466.localdomain sudo[335960]: pam_unix(sudo:session): session opened for user root(uid=0) by (uid=1002)
Feb 23 10:10:41 np0005626466.localdomain sudo[335960]: pam_unix(sudo:session): session closed for user root

● session-84.scope - Session 84 of User zuul
     Loaded: loaded (/run/systemd/transient/session-84.scope; transient)
  Transient: yes
     Active: active (running) since Mon 2026-02-23 10:10:20 UTC; 42s ago
         IO: 125.8M read, 60.0M written
      Tasks: 16
     Memory: 584.3M
        CPU: 2min 1.440s
     CGroup: /user.slice/user-1000.slice/session-84.scope
             ├─331435 "sshd: zuul [priv]"
             ├─331438 "sshd: zuul@notty"
             ├─331439 sudo bash -c "rm -rf /var/tmp/sos-osp && mkdir /var/tmp/sos-osp && sos report --batch --all-logs --tmp-dir=/var/tmp/sos-osp  -p container,openstack_edpm,system,storage,virt"
             ├─331456 /usr/bin/python3 -s /sbin/sos report --batch --all-logs --tmp-dir=/var/tmp/sos-osp -p container,openstack_edpm,system,storage,virt
             ├─340111 timeout 300s systemctl status --all
             ├─340118 systemctl status --all
             ├─340176 timeout 300s xfs_admin -l -u /dev/vda4
             ├─340177 /usr/bin/sh -f /usr/sbin/xfs_admin -l -u /dev/vda4
             ├─340178 xfs_db -x -p xfs_admin -r -c label -r -c uuid /dev/vda4
             ├─340186 timeout --foreground 300s virsh -r maxvcpus kvm
             └─340187 virsh -r maxvcpus kvm

Feb 23 10:10:20 np0005626466.localdomain systemd[1]: Started Session 84 of User zuul.
Feb 23 10:10:20 np0005626466.localdomain sudo[331439]:     zuul : PWD=/home/zuul ; USER=root ; COMMAND=/bin/bash -c rm -rf /var/tmp/sos-osp && mkdir /var/tmp/sos-osp && sos report --batch --all-logs --tmp-dir=/var/tmp/sos-osp  -p container,openstack_edpm,system,storage,virt
Feb 23 10:10:20 np0005626466.localdomain sudo[331439]: pam_unix(sudo:session): session opened for user root(uid=0) by (uid=1000)
Feb 23 10:10:26 np0005626466.localdomain ovs-vsctl[331754]: ovs|00001|db_ctl_base|ERR|no key "dpdk-init" in Open_vSwitch record "." column other_config
Feb 23 10:10:50 np0005626466.localdomain ovs-appctl[337647]: ovs|00001|daemon_unix|WARN|/var/run/openvswitch/ovs-monitor-ipsec.pid: open: No such file or directory

○ 122635f0e8d39054eca80d2e540d00d16b4a0e4979d56e5a2e398526a74c57c4.service - /usr/bin/podman healthcheck run 122635f0e8d39054eca80d2e540d00d16b4a0e4979d56e5a2e398526a74c57c4
     Loaded: loaded (/run/systemd/transient/122635f0e8d39054eca80d2e540d00d16b4a0e4979d56e5a2e398526a74c57c4.service; transient)
  Transient: yes
     Active: inactive (dead) since Mon 2026-02-23 10:10:56 UTC; 6s ago
   Duration: 241ms
TriggeredBy: ● 122635f0e8d39054eca80d2e540d00d16b4a0e4979d56e5a2e398526a74c57c4.timer
    Process: 339081 ExecStart=/usr/bin/podman healthcheck run 122635f0e8d39054eca80d2e540d00d16b4a0e4979d56e5a2e398526a74c57c4 (code=exited, status=0/SUCCESS)
   Main PID: 339081 (code=exited, status=0/SUCCESS)
        CPU: 117ms

Feb 23 10:10:56 np0005626466.localdomain systemd[1]: Started /usr/bin/podman healthcheck run 122635f0e8d39054eca80d2e540d00d16b4a0e4979d56e5a2e398526a74c57c4.
Feb 23 10:10:56 np0005626466.localdomain podman[339081]: 2026-02-23 10:10:56.34962379 +0000 UTC m=+0.084003251 container health_status 122635f0e8d39054eca80d2e540d00d16b4a0e4979d56e5a2e398526a74c57c4 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, org.label-schema.build-date=20260216, org.label-schema.license=GPLv2, managed_by=edpm_ansible, container_name=ovn_controller, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, maintainer=OpenStack Kubernetes Operator team, io.buildah.version=1.43.0, org.label-schema.schema-version=1.0, tcib_build_tag=8419493e1fd846703d277695e03fc5eb, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'EDPM_CONFIG_HASH': 'a07a4436e1d1ca1c6231f11309616a78b1ad9830450b5c2d2fc3fb113cfbf838'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, config_id=ovn_controller, tcib_managed=true)
Feb 23 10:10:56 np0005626466.localdomain podman[339081]: 2026-02-23 10:10:56.48066062 +0000 UTC m=+0.215040151 container exec_died 122635f0e8d39054eca80d2e540d00d16b4a0e4979d56e5a2e398526a74c57c4 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, org.label-schema.vendor=CentOS, tcib_build_tag=8419493e1fd846703d277695e03fc5eb, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'EDPM_CONFIG_HASH': 'a07a4436e1d1ca1c6231f11309616a78b1ad9830450b5c2d2fc3fb113cfbf838'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, config_id=ovn_controller, container_name=ovn_controller, org.label-schema.name=CentOS Stream 9 Base Image, tcib_managed=true, managed_by=edpm_ansible, org.label-schema.build-date=20260216, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, io.buildah.version=1.43.0, maintainer=OpenStack Kubernetes Operator team)
Feb 23 10:10:56 np0005626466.localdomain systemd[1]: 122635f0e8d39054eca80d2e540d00d16b4a0e4979d56e5a2e398526a74c57c4.service: Deactivated successfully.

○ 6a5bdcbb336925d11e98e079f4b8b7828b30e368d9b142bcbf78246faf17115e.service - /usr/bin/podman healthcheck run 6a5bdcbb336925d11e98e079f4b8b7828b30e368d9b142bcbf78246faf17115e
     Loaded: loaded (/run/systemd/transient/6a5bdcbb336925d11e98e079f4b8b7828b30e368d9b142bcbf78246faf17115e.service; transient)
  Transient: yes
     Active: inactive (dead) since Mon 2026-02-23 10:10:55 UTC; 7s ago
   Duration: 136ms
TriggeredBy: ● 6a5bdcbb336925d11e98e079f4b8b7828b30e368d9b142bcbf78246faf17115e.timer
    Process: 338886 ExecStart=/usr/bin/podman healthcheck run 6a5bdcbb336925d11e98e079f4b8b7828b30e368d9b142bcbf78246faf17115e (code=exited, status=0/SUCCESS)
   Main PID: 338886 (code=exited, status=0/SUCCESS)
        CPU: 123ms

Feb 23 10:10:55 np0005626466.localdomain systemd[1]: Started /usr/bin/podman healthcheck run 6a5bdcbb336925d11e98e079f4b8b7828b30e368d9b142bcbf78246faf17115e.
Feb 23 10:10:55 np0005626466.localdomain podman[338886]: 2026-02-23 10:10:55.349054273 +0000 UTC m=+0.078894702 container health_status 6a5bdcbb336925d11e98e079f4b8b7828b30e368d9b142bcbf78246faf17115e (image=quay.io/openstack-k8s-operators/openstack-network-exporter@sha256:156f95f51d0a91422548c574e96ee37f07a200c948e173b22523982f24f1e79c, name=openstack_network_exporter, health_status=healthy, vcs-type=git, config_data={'command': [], 'environment': {'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml', 'OS_ENDPOINT_TYPE': 'internal', 'EDPM_CONFIG_HASH': 'fc7962317d10396c157604cb31f0de59b77d21ca5133156f39a4831f188d031c'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter', 'test': '/openstack/healthcheck openstack-netwo'}, 'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter@sha256:156f95f51d0a91422548c574e96ee37f07a200c948e173b22523982f24f1e79c', 'net': 'host', 'ports': ['9105:9105'], 'privileged': True, 'recreate': True, 'restart': 'always', 'volumes': ['/var/lib/openstack/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, url=https://catalog.redhat.com/en/search?searchType=containers, managed_by=edpm_ansible, org.opencontainers.image.created=2026-02-05T04:57:10Z, org.opencontainers.image.revision=21849199b7179dc3074812b8e24698ec609d6a5c, description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., distribution-scope=public, summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., container_name=openstack_network_exporter, cpe=cpe:/a:redhat:enterprise_linux:9::appstream, io.buildah.version=1.33.7, io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.openshift.tags=minimal rhel9, io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, version=9.7, build-date=2026-02-05T04:57:10Z, release=1770267347, architecture=x86_64, config_id=openstack_network_exporter, vcs-ref=21849199b7179dc3074812b8e24698ec609d6a5c, vendor=Red Hat, Inc., io.openshift.expose-services=, com.redhat.component=ubi9-minimal-container, maintainer=Red Hat, Inc., name=ubi9/ubi-minimal)
Feb 23 10:10:55 np0005626466.localdomain podman[338886]: 2026-02-23 10:10:55.368234818 +0000 UTC m=+0.098075217 container exec_died 6a5bdcbb336925d11e98e079f4b8b7828b30e368d9b142bcbf78246faf17115e (image=quay.io/openstack-k8s-operators/openstack-network-exporter@sha256:156f95f51d0a91422548c574e96ee37f07a200c948e173b22523982f24f1e79c, name=openstack_network_exporter, managed_by=edpm_ansible, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, vendor=Red Hat, Inc., description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., org.opencontainers.image.created=2026-02-05T04:57:10Z, version=9.7, build-date=2026-02-05T04:57:10Z, config_data={'command': [], 'environment': {'OPENSTACK_NETWORK_EXPORTER_YAML': '/etc/openstack_network_exporter/openstack_network_exporter.yaml', 'OS_ENDPOINT_TYPE': 'internal', 'EDPM_CONFIG_HASH': 'fc7962317d10396c157604cb31f0de59b77d21ca5133156f39a4831f188d031c'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/openstack_network_exporter', 'test': '/openstack/healthcheck openstack-netwo'}, 'image': 'quay.io/openstack-k8s-operators/openstack-network-exporter@sha256:156f95f51d0a91422548c574e96ee37f07a200c948e173b22523982f24f1e79c', 'net': 'host', 'ports': ['9105:9105'], 'privileged': True, 'recreate': True, 'restart': 'always', 'volumes': ['/var/lib/openstack/telemetry/openstack_network_exporter.yaml:/etc/openstack_network_exporter/openstack_network_exporter.yaml:z', '/var/run/openvswitch:/run/openvswitch:rw,z', '/var/lib/openvswitch/ovn:/run/ovn:rw,z', '/proc:/host/proc:ro', '/var/lib/openstack/healthchecks/openstack_network_exporter:/openstack:ro,z']}, url=https://catalog.redhat.com/en/search?searchType=containers, name=ubi9/ubi-minimal, config_id=openstack_network_exporter, io.openshift.expose-services=, cpe=cpe:/a:redhat:enterprise_linux:9::appstream, release=1770267347, container_name=openstack_network_exporter, vcs-type=git, org.opencontainers.image.revision=21849199b7179dc3074812b8e24698ec609d6a5c, io.openshift.tags=minimal rhel9, maintainer=Red Hat, Inc., summary=Provides the latest release of the minimal Red Hat Universal Base Image 9., io.k8s.description=The Universal Base Image Minimal is a stripped down image that uses microdnf as a package manager. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.buildah.version=1.33.7, vcs-ref=21849199b7179dc3074812b8e24698ec609d6a5c, distribution-scope=public, io.k8s.display-name=Red Hat Universal Base Image 9 Minimal, com.redhat.component=ubi9-minimal-container, architecture=x86_64)
Feb 23 10:10:55 np0005626466.localdomain systemd[1]: 6a5bdcbb336925d11e98e079f4b8b7828b30e368d9b142bcbf78246faf17115e.service: Deactivated successfully.

○ 8ca16bfb2f59ef8fc3fb8572c3270f7856db7f62fc874baed2fd5084beb7f01e.service - /usr/bin/podman healthcheck run 8ca16bfb2f59ef8fc3fb8572c3270f7856db7f62fc874baed2fd5084beb7f01e
     Loaded: loaded (/run/systemd/transient/8ca16bfb2f59ef8fc3fb8572c3270f7856db7f62fc874baed2fd5084beb7f01e.service; transient)
  Transient: yes
     Active: inactive (dead) since Mon 2026-02-23 10:11:02 UTC; 344ms ago
   Duration: 156ms
TriggeredBy: ● 8ca16bfb2f59ef8fc3fb8572c3270f7856db7f62fc874baed2fd5084beb7f01e.timer
    Process: 340121 ExecStart=/usr/bin/podman healthcheck run 8ca16bfb2f59ef8fc3fb8572c3270f7856db7f62fc874baed2fd5084beb7f01e (code=exited, status=0/SUCCESS)
   Main PID: 340121 (code=exited, status=0/SUCCESS)
        CPU: 177ms

Feb 23 10:11:02 np0005626466.localdomain systemd[1]: Started /usr/bin/podman healthcheck run 8ca16bfb2f59ef8fc3fb8572c3270f7856db7f62fc874baed2fd5084beb7f01e.
Feb 2Unit apparmor.service could not be found.
3 10:11:02 np0005626466.localdomain podman[340121]: 2026-02-23 10:11:02.876554167 +0000 UTC m=+0.109551703 container health_status 8ca16bfb2f59ef8fc3fb8572c3270f7856db7f62fc874baed2fd5084beb7f01e (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=ovn_metadata_agent, health_status=healthy, managed_by=edpm_ansible, org.label-schema.license=GPLv2, config_id=ovn_metadata_agent, container_name=ovn_metadata_agent, io.buildah.version=1.43.0, org.label-schema.schema-version=1.0, tcib_managed=true, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, tcib_build_tag=8419493e1fd846703d277695e03fc5eb, config_data={'cgroupns': 'host', 'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'EDPM_CONFIG_HASH': 'a07a4436e1d1ca1c6231f11309616a78b1ad9830450b5c2d2fc3fb113cfbf838-df122b180261157f1de1391083b3d8abac306e2f12893ac7b9291feafc874311'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_metadata_agent', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified', 'net': 'host', 'pid': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/run/openvswitch:/run/openvswitch:z', '/var/lib/openstack/neutron-ovn-metadata-agent:/etc/neutron.conf.d:z', '/run/netns:/run/netns:shared', '/var/lib/kolla/config_files/ovn_metadata_agent.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/neutron:/var/lib/neutron:shared,z', '/var/lib/neutron/ovn_metadata_haproxy_wrapper:/usr/local/bin/haproxy:ro', '/var/lib/neutron/kill_scripts:/etc/neutron/kill_scripts:ro', '/var/lib/openstack/cacerts/neutron-metadata/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/healthchecks/ovn_metadata_agent:/openstack:ro,z']}, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20260216)
Feb 23 10:11:02 np0005626466.localdomain podman[340121]: 2026-02-23 10:11:02.889159968 +0000 UTC m=+0.122157544 container exec_died 8ca16bfb2f59ef8fc3fb8572c3270f7856db7f62fc874baed2fd5084beb7f01e (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=ovn_metadata_agent, tcib_managed=true, config_id=ovn_metadata_agent, container_name=ovn_metadata_agent, io.buildah.version=1.43.0, org.label-schema.build-date=20260216, org.label-schema.license=GPLv2, tcib_build_tag=8419493e1fd846703d277695e03fc5eb, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, config_data={'cgroupns': 'host', 'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'EDPM_CONFIG_HASH': 'a07a4436e1d1ca1c6231f11309616a78b1ad9830450b5c2d2fc3fb113cfbf838-df122b180261157f1de1391083b3d8abac306e2f12893ac7b9291feafc874311'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_metadata_agent', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified', 'net': 'host', 'pid': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/run/openvswitch:/run/openvswitch:z', '/var/lib/openstack/neutron-ovn-metadata-agent:/etc/neutron.conf.d:z', '/run/netns:/run/netns:shared', '/var/lib/kolla/config_files/ovn_metadata_agent.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/neutron:/var/lib/neutron:shared,z', '/var/lib/neutron/ovn_metadata_haproxy_wrapper:/usr/local/bin/haproxy:ro', '/var/lib/neutron/kill_scripts:/etc/neutron/kill_scripts:ro', '/var/lib/openstack/cacerts/neutron-metadata/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/healthchecks/ovn_metadata_agent:/openstack:ro,z']}, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible)
Feb 23 10:11:02 np0005626466.localdomain systemd[1]: 8ca16bfb2f59ef8fc3fb8572c3270f7856db7f62fc874baed2fd5084beb7f01e.service: Deactivated successfully.

● auditd.service - SUnit auto-cpufreq.service could not be found.
Unit autofs.service could not be found.
ecurity Auditing Service
     Loaded: loaded (/usr/lib/systemd/system/auditd.service; enabled; preset: enabled)
     Active: active (running) since Mon 2026-02-23 06:36:04 UTC; 3h 34min ago
       Docs: man:auditd(8)
             https://github.com/linux-audit/audit-documentation
   Main PID: 726 (auditd)
         IO: 2.5M read, 52.3M written
      Tasks: 4 (limit: 100220)
     Memory: 29.7M
        CPU: 9.767s
     CGroup: /system.slice/auditd.service
             ├─726 /sbin/auditd
             └─728 /usr/sbin/sedispatch

Feb 23 06:36:04 localhost augenrules[741]: pid 726
Feb 23 06:36:04 localhost augenrules[741]: rate_limit 0
Feb 23 06:36:04 localhost augenrules[741]: backlog_limit 8192
Feb 23 06:36:04 localhost augenrules[741]: lost 0
Feb 23 06:36:04 localhost augenrules[741]: backlog 2
Feb 23 06:36:04 localhost augenrules[741]: backlog_wait_time 60000
Feb 23 06:36:04 localhost augenrules[741]: backlog_wait_time_actual 0
Feb 23 06:36:04 localhost systemd[1]: Started Security Auditing Service.
Feb 23 09:13:40 np0005626466.localdomain auditd[726]: Audit daemon rotating log files
Feb 23 09:32:18 np0005626466.localdomain auditd[726]: Audit daemon rotating log files

○ auth-rpcgss-module.service - Kernel Module supporting RPCSEC_GSS
     Loaded: loaded (/usr/lib/systemd/system/auth-rpcgss-module.service; static)
     Active: inactive (dead)
  Condition: start condition failed at Mon 2026-02-23 06:36:02 UTC; 3h 35min ago

○ blk-availability.service - Availability of block devices
     Loaded: loaded (/usr/lib/systemd/system/blk-availability.service; disabled; preset: disabled)
     Active: inactive (dead)

○ c0dd95da0b8090e1dfefd899739a6f9a1e0b730eec9db8d0d0094fa6bfc519c8.service - /usr/bin/podman healthcheck run c0dd95da0b8090e1dfefd899739a6f9a1e0b730eec9db8d0d0094fa6bfc519c8
     Loaded: loaded (/run/systemd/transient/c0dd95da0b8090e1dfefd899739a6f9a1e0b730eec9db8d0d0094fa6bfc519c8.service; transient)
  Transient: yes
     Active: inactive (dead) since Mon 2026-02-23 10:10:44 UTC; 18s ago
   Duration: 215ms
TriggeredBy: ● c0dd95da0b8090e1dfefd899739a6f9a1e0b730eec9db8d0d0094fa6bfc519c8.timer
    Process: 336400 ExecStart=/usr/bin/podman healthcheck run c0dd95da0b8090e1dfefd899739a6f9a1e0b730eec9db8d0d0094fa6bfc519c8 (code=exited, status=0/SUCCESS)
   Main PID: 336400 (code=exited, status=0/SUCCESS)
        CPU: 114ms

Feb 23 10:10:44 np0005626466.localdomain systemd[1]: Started /usr/bin/podman healthcheck run c0dd95da0b8090e1dfefd899739a6f9a1e0b730eec9db8d0d0094fa6bfc519c8.
Feb 23 10:10:44 np0005626466.localdomain podman[336400]: 2026-02-23 10:10:44.831853792 +0000 UTC m=+0.181401864 container health_status c0dd95da0b8090e1dfefd899739a6f9a1e0b730eec9db8d0d0094fa6bfc519c8 (image=quay.io/navidys/prometheus-podman-exporter@sha256:d339ba049bbd1adccb795962bf163f5b22fd84dea865d88b9eb525e46247d6bd, name=podman_exporter, health_status=healthy, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>, managed_by=edpm_ansible, config_data={'environment': {'CONTAINER_HOST': 'unix:///run/podman/podman.sock', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/podman_exporter', 'test': '/openstack/healthcheck podman_exporter'}, 'image': 'quay.io/navidys/prometheus-podman-exporter@sha256:d339ba049bbd1adccb795962bf163f5b22fd84dea865d88b9eb525e46247d6bd', 'net': 'host', 'ports': ['9882:9882'], 'privileged': True, 'recreate': True, 'restart': 'always', 'user': 'root', 'volumes': ['/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']}, config_id=podman_exporter, container_name=podman_exporter)
Feb 23 10:10:44 np0005626466.localdomain podman[336400]: 2026-02-23 10:10:44.842944907 +0000 UTC m=+0.192492979 container exec_died c0dd95da0b8090e1dfefd899739a6f9a1e0b730eec9db8d0d0094fa6bfc519c8 (image=quay.io/navidys/prometheus-podman-exporter@sha256:d339ba049bbd1adccb795962bf163f5b22fd84dea865d88b9eb525e46247d6bd, name=podman_exporter, config_id=podman_exporter, container_name=podman_exporter, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>, managed_by=edpm_ansible, config_data={'environment': {'CONTAINER_HOST': 'unix:///run/podman/podman.sock', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/podman_exporter', 'test': '/openstack/healthcheck podman_exporter'}, 'image': 'quay.io/navidys/prometheus-podman-exporter@sha256:d339ba049bbd1adccb795962bf163f5b22fd84dea865d88b9eb525e46247d6bd', 'net': 'host', 'ports': ['9882:9882'], 'privileged': True, 'recreate': True, 'restart': 'always', 'user': 'root', 'volumes': ['/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']})
Feb 23 10:10:44 np0005626466.localdomain systemd[1]: c0dd95da0b8090e1dfefd899739a6f9a1e0b730eec9db8d0d0094fa6bfc519c8.service: Deactivated successfully.

○ cd17978adeb2b0468483202f98a4facd0357edbe019deb13313adab6b6b71f52.service - /usr/bin/podman healthcheck run cd17978adeb2b0468483202f98a4facd0357edbe019deb13313adab6b6b71f52
     Loaded: loaded (/run/systemd/transient/cd17978adeb2b0468483202f98a4facd0357edbe019deb13313adab6b6b71f52.service; transient)
  Transient: yes
     Active: inactive (dead) since Mon 2026-02-23 10:10:44 UTC; 18s ago
   Duration: 104ms
TriggeredBy: ● cd17978adeb2b0468483202f98a4facd0357edbe019deb13313adab6b6b71f52.timer
    Process: 336401 ExecStart=/usr/bin/podman healthcheck run cd17978adeb2b0468483202f98a4facd0357edbe019deb13313adab6b6b71f52 (code=exited, status=0/SUCCESS)
   Main PID: 336401 (code=exited, status=0/SUCCESS)
        CPU: 110ms

Feb 23 10:10:44 np0005626466.localdomain systemd[1]: Started /usr/bin/podman healthcheck run cd17978adeb2b0468483202f98a4facd0357edbe019deb13313adab6b6b71f52.
Feb 23 10:10:44 np0005626466.localdomain podman[336401]: 2026-02-23 10:10:44.730193525 +0000 UTC m=+0.074527716 container health_status cd17978adeb2b0468483202f98a4facd0357edbe019deb13313adab6b6b71f52 (image=quay.io/prometheus/node-exporter@sha256:39c642b2b337e38c18e80266fb14383754178202f40103646337722a594d984c, name=node_exporter, health_status=healthy, container_name=node_exporter, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>, managed_by=edpm_ansible, config_data={'command': ['--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl', '--path.rootfs=/rootfs'], 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/node_exporter', 'test': '/openstack/healthcheck node_exporter'}, 'image': 'quay.io/prometheus/node-exporter@sha256:39c642b2b337e38c18e80266fb14383754178202f40103646337722a594d984c', 'net': 'host', 'ports': ['9100:9100'], 'privileged': True, 'recreate': True, 'restart': 'always', 'user': 'root', 'volumes': ['/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/:/rootfs:ro', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']}, config_id=node_exporter)
Feb 23 10:10:44 np0005626466.localdomain podman[336401]: 2026-02-23 10:10:44.741066723 +0000 UTC m=+0.085400884 container exec_died cd17978adeb2b0468483202f98a4facd0357edbe019deb13313adab6b6b71f52 (image=quay.io/prometheus/node-exporter@sha256:39c642b2b337e38c18e80266fb14383754178202f40103646337722a594d984c, name=node_exporter, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>, managed_by=edpm_ansible, config_data={'command': ['--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl', '--path.rootfs=/rootfs'], 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/node_exporter', 'test': '/openstack/healthcheck node_exporter'}, 'image': 'quay.io/prometheus/node-exporter@sha256:39c642b2b337e38c18e80266fb14383754178202f40103646337722a594d984c', 'net': 'host', 'ports': ['9100:9100'], 'privileged': True, 'recreate': True, 'restart': 'always', 'user': 'root', 'volumes': ['/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/:/rootfs:ro', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']}, config_id=node_exporter, container_name=node_exporter)
Feb 23 10:10:44 np0005626466.localdomain systemd[1]: cd17978adeb2b0468483202f98a4facd0357edbe019deb13313adab6b6b71f52.service: Deactivated successfully.

● ceph-f1fea371-cb69-578d-a3d0-b5c472a84b46@crash.np0005626466.service - Ceph crash.np0005626466 for f1fea371-cb69-578d-a3d0-b5c472a84b46
     Loaded: loaded (/etc/systemd/system/ceph-f1fea371-cb69-578d-a3d0-b5c472a84b46@.service; enabled; preset: disabled)
     Active: active (running) since Mon 2026-02-23 07:38:41 UTC; 2h 32min ago
   Main PID: 29136 (conmon)
         IO: 0B read, 408.0K written
      Tasks: 3 (limit: 100220)
     Memory: 8.3M
        CPU: 3.659s
     CGroup: /system.slice/system-ceph\x2df1fea371\x2dcb69\x2d578d\x2da3d0\x2db5c472a84b46.slice/ceph-f1fea371-cb69-578d-a3d0-b5c472a84b46@crash.np0005626466.service
             ├─libpod-payload-975e762c1132156a568538ebdacd7315274fa978e3e6da81d658e2351a66b71e
             │ ├─29138 /run/podman-init -- /usr/bin/ceph-crash -n client.crash.np0005626466
             │ └─29140 /usr/bin/python3 -s /usr/bin/ceph-crash -n client.crash.np0005626466
             └─runtime
               └─29136 /usr/bin/conmon --api-version 1 -c 975e762c1132156a568538ebdacd7315274fa978e3e6da81d658e2351a66b71e -u 975e762c1132156a568538ebdacd7315274fa978e3e6da81d658e2351a66b71e -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/975e762c1132156a568538ebdacd7315274fa978e3e6da81d658e2351a66b71e/userdata -p /run/containers/storage/overlay-containers/975e762c1132156a568538ebdacd7315274fa978e3e6da81d658e2351a66b71e/userdata/pidfile -n ceph-f1fea371-cb69-578d-a3d0-b5c472a84b46-crash-np0005626466 --exit-dir /run/libpod/exits --full-attach -l journald --log-level warning --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/975e762c1132156a568538ebdacd7315274fa978e3e6da81d658e2351a66b71e/userdata/oci-log --conmon-pidfile /run/ceph-f1fea371-cb69-578d-a3d0-b5c472a84b46@crash.np0005626466.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --transient-store=false --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --rm --exit-command-arg 975e762c1132156a568538ebdacd7315274fa978e3e6da81d658e2351a66b71e

Feb 23 07:38:41 np0005626466.localdomain ceph-f1fea371-cb69-578d-a3d0-b5c472a84b46-crash-np0005626466[29136]:     pools:   0 pools, 0 pgs
Feb 23 07:38:41 np0005626466.localdomain ceph-f1fea371-cb69-578d-a3d0-b5c472a84b46-crash-np0005626466[29136]:     objects: 0 objects, 0 B
Feb 23 07:38:41 np0005626466.localdomain ceph-f1fea371-cb69-578d-a3d0-b5c472a84b46-crash-np0005626466[29136]:     usage:   0 B used, 0 B / 0 B avail
Feb 23 07:38:41 np0005626466.localdomain ceph-f1fea371-cb69-578d-a3d0-b5c472a84b46-crash-np0005626466[29136]:     pgs:     
Feb 23 07:38:41 np0005626466.localdomain ceph-f1fea371-cb69-578d-a3d0-b5c472a84b46-crash-np0005626466[29136]:  
Feb 23 07:38:41 np0005626466.localdomain ceph-f1fea371-cb69-578d-a3d0-b5c472a84b46-crash-np0005626466[29136]:   progress:
Feb 23 07:38:41 np0005626466.localdomain ceph-f1fea371-cb69-578d-a3d0-b5c472a84b46-crash-np0005626466[29136]:     Updating crash deployment (+4 -> 6) (4s)
Feb 23 07:38:41 np0005626466.localdomain ceph-f1fea371-cb69-578d-a3d0-b5c472a84b46-crash-np0005626466[29136]:       [==============..............] (remaining: 4s)
Feb 23 07:38:41 np0005626466.localdomain ceph-f1fea371-cb69-578d-a3d0-b5c472a84b46-crash-np0005626466[29136]:  
Feb 23 07:38:41 np0005626466.localdomain ceph-f1fea371-cb69-578d-a3d0-b5c472a84b46-crash-np0005626466[29136]: INFO:ceph-crash:monitoring path /var/lib/ceph/crash, delay 600s

● ceph-f1fea371-cb69-578d-a3d0-b5c472a84b46@mds.mds.np0005626466.vaywlp.service - Ceph mds.mds.np0005626466.vaywlp for f1fea371-cb69-578d-a3d0-b5c472a84b46
     Loaded: loaded (/etc/systemd/system/ceph-f1fea371-cb69-578d-a3d0-b5c472a84b46@.service; enabled; preset: disabled)
     Active: active (running) since Mon 2026-02-23 09:42:45 UTC; 28min ago
   Main PID: 285093 (conmon)
         IO: 0B read, 85.0K written
      Tasks: 18 (limit: 100220)
     Memory: 25.0M
        CPU: 1.939s
     CGroup: /system.slice/system-ceph\x2df1fea371\x2dcb69\x2d578d\x2da3d0\x2db5c472a84b46.slice/ceph-f1fea371-cb69-578d-a3d0-b5c472a84b46@mds.mds.np0005626466.vaywlp.service
             ├─libpod-payload-13d86d0fc7f1279ceb33114723f84eb57a13a8060cd7e1f6fc0b04a1a71bdedf
             │ ├─285095 /run/podman-init -- /usr/bin/ceph-mds -n mds.mds.np0005626466.vaywlp -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ └─285097 /usr/bin/ceph-mds -n mds.mds.np0005626466.vaywlp -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             └─runtime
               └─285093 /usr/bin/conmon --api-version 1 -c 13d86d0fc7f1279ceb33114723f84eb57a13a8060cd7e1f6fc0b04a1a71bdedf -u 13d86d0fc7f1279ceb33114723f84eb57a13a8060cd7e1f6fc0b04a1a71bdedf -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/13d86d0fc7f1279ceb33114723f84eb57a13a8060cd7e1f6fc0b04a1a71bdedf/userdata -p /run/containers/storage/overlay-containers/13d86d0fc7f1279ceb33114723f84eb57a13a8060cd7e1f6fc0b04a1a71bdedf/userdata/pidfile -n ceph-f1fea371-cb69-578d-a3d0-b5c472a84b46-mds-mds-np0005626466-vaywlp --exit-dir /run/libpod/exits --full-attach -l journald --log-level warning --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/13d86d0fc7f1279ceb33114723f84eb57a13a8060cd7e1f6fc0b04a1a71bdedf/userdata/oci-log --conmon-pidfile /run/ceph-f1fea371-cb69-578d-a3d0-b5c472a84b46@mds.mds.np0005626466.vaywlp.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --transient-store=false --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --rm --exit-command-arg 13d86d0fc7f1279ceb33114723f84eb57a13a8060cd7e1f6fc0b04a1a71bdedf

Feb 23 10:10:29 np0005626466.localdomain ceph-mds[285097]: mds.mds.np0005626466.vaywlp Can't run that command on an inactive MDS!
Feb 23 10:10:29 np0005626466.localdomain ceph-mds[285097]: mds.mds.np0005626466.vaywlp asok_command: dump_ops_in_flight {prefix=dump_ops_in_flight} (starting...)
Feb 23 10:10:29 np0005626466.localdomain ceph-mds[285097]: mds.mds.np0005626466.vaywlp Can't run that command on an inactive MDS!
Feb 23 10:10:29 np0005626466.localdomain ceph-mds[285097]: mds.mds.np0005626466.vaywlp asok_command: get subtrees {prefix=get subtrees} (starting...)
Feb 23 10:10:29 np0005626466.localdomain ceph-mds[285097]: mds.mds.np0005626466.vaywlp Can't run that command on an inactive MDS!
Feb 23 10:10:30 np0005626466.localdomain ceph-mds[285097]: mds.mds.np0005626466.vaywlp asok_command: ops {prefix=ops} (starting...)
Feb 23 10:10:30 np0005626466.localdomain ceph-mds[285097]: mds.mds.np0005626466.vaywlp Can't run that command on an inactive MDS!
Feb 23 10:10:30 np0005626466.localdomain ceph-mds[285097]: mds.mds.np0005626466.vaywlp asok_command: session ls {prefix=session ls} (starting...)
Feb 23 10:10:30 np0005626466.localdomain ceph-mds[285097]: mds.mds.np0005626466.vaywlp Can't run that command on an inactive MDS!
Feb 23 10:10:30 np0005626466.localdomain ceph-mds[285097]: mds.mds.np0005626466.vaywlp asok_command: status {prefix=status} (starting...)

● ceph-f1fea371-cb69-578d-a3d0-b5c472a84b46@mgr.np0005626466.nisqfq.service - Ceph mgr.np0005626466.nisqfq for f1fea371-cb69-578d-a3d0-b5c472a84b46
     Loaded: loaded (/etc/systemd/system/ceph-f1fea371-cb69-578d-a3d0-b5c472a84b46@.service; enabled; preset: disabled)
     Active: active (running) since Mon 2026-02-23 09:44:17 UTC; 26min ago
   Main PID: 286338 (conmon)
         IO: 0B read, 89.4M written
      Tasks: 23 (limit: 100220)
     Memory: 431.4M
        CPU: 17.987s
     CGroup: /system.slice/system-ceph\x2df1fea371\x2dcb69\x2d578d\x2da3d0\x2db5c472a84b46.slice/ceph-f1fea371-cb69-578d-a3d0-b5c472a84b46@mgr.np0005626466.nisqfq.service
             ├─libpod-payload-9f7e89f74412aa4d39adf99ebe74e72087671d5f3c70fe114ec0db57835e5bcf
             │ ├─286340 /run/podman-init -- /usr/bin/ceph-mgr -n mgr.np0005626466.nisqfq -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ └─286342 /usr/bin/ceph-mgr -n mgr.np0005626466.nisqfq -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             └─runtime
               └─286338 /usr/bin/conmon --api-version 1 -c 9f7e89f74412aa4d39adf99ebe74e72087671d5f3c70fe114ec0db57835e5bcf -u 9f7e89f74412aa4d39adf99ebe74e72087671d5f3c70fe114ec0db57835e5bcf -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/9f7e89f74412aa4d39adf99ebe74e72087671d5f3c70fe114ec0db57835e5bcf/userdata -p /run/containers/storage/overlay-containers/9f7e89f74412aa4d39adf99ebe74e72087671d5f3c70fe114ec0db57835e5bcf/userdata/pidfile -n ceph-f1fea371-cb69-578d-a3d0-b5c472a84b46-mgr-np0005626466-nisqfq --exit-dir /run/libpod/exits --full-attach -l journald --log-level warning --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/9f7e89f74412aa4d39adf99ebe74e72087671d5f3c70fe114ec0db57835e5bcf/userdata/oci-log --conmon-pidfile /run/ceph-f1fea371-cb69-578d-a3d0-b5c472a84b46@mgr.np0005626466.nisqfq.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --transient-store=false --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --rm --exit-command-arg 9f7e89f74412aa4d39adf99ebe74e72087671d5f3c70fe114ec0db57835e5bcf

Feb 23 09:51:09 np0005626466.localdomain ceph-f1fea371-cb69-578d-a3d0-b5c472a84b46-mgr-np0005626466-nisqfq[286338]: 2026-02-23T09:51:09.751+0000 7faacb534140 -1 mgr[py] Module test_orchestrator has missing NOTIFY_TYPES member
Feb 23 09:51:09 np0005626466.localdomain ceph-mgr[286342]: mgr[py] Loading python module 'volumes'
Feb 23 09:51:09 np0005626466.localdomain ceph-mgr[286342]: mgr[py] Module volumes has missing NOTIFY_TYPES member
Feb 23 09:51:09 np0005626466.localdomain ceph-mgr[286342]: mgr[py] Loading python module 'zabbix'
Feb 23 09:51:09 np0005626466.localdomain ceph-f1fea371-cb69-578d-a3d0-b5c472a84b46-mgr-np0005626466-nisqfq[286338]: 2026-02-23T09:51:09.930+0000 7faacb534140 -1 mgr[py] Module volumes has missing NOTIFY_TYPES member
Feb 23 09:51:09 np0005626466.localdomain ceph-mgr[286342]: mgr[py] Module zabbix has missing NOTIFY_TYPES member
Feb 23 09:51:09 np0005626466.localdomain ceph-f1fea371-cb69-578d-a3d0-b5c472a84b46-mgr-np0005626466-nisqfq[286338]: 2026-02-23T09:51:09.989+0000 7faacb534140 -1 mgr[py] Module zabbix has missing NOTIFY_TYPES member
Feb 23 09:51:09 np0005626466.localdomain ceph-mgr[286342]: ms_deliver_dispatch: unhandled message 0x5607f5edb1e0 mon_map magic: 0 from mon.2 v2:172.18.0.105:3300/0
Feb 23 09:51:10 np0005626466.localdomain ceph-mgr[286342]: client.0 ms_handle_reset on v2:172.18.0.107:6810/2356945423
Feb 23 10:06:11 np0005626466.localdomain ceph-mgr[286342]: client.0 ms_handle_reset on v2:172.18.0.107:6810/2356945423

● ceph-f1fea371-cb69-578d-a3d0-b5c472a84b46@mon.np0005626466.service - Ceph mon.np0005626466 for f1fea371-cb69-578d-a3d0-b5c472a84b46
     Loaded: loaded (/etc/systemd/system/ceph-f1fea371-cb69-578d-a3d0-b5c472a84b46@.service; enabled; preset: disabled)
     Active: active (running) since Mon 2026-02-23 09:49:26 UTC; 21min ago
    Process: 300776 ExecStartPre=/bin/rm -f /run/ceph-f1fea371-cb69-578d-a3d0-b5c472a84b46@mon.np0005626466.service-pid /run/ceph-f1fea371-cb69-578d-a3d0-b5c472a84b46@mon.np0005626466.service-cid (code=exited, status=0/SUCCESS)
    Process: 300777 ExecStart=/bin/bash /var/lib/ceph/f1fea371-cb69-578d-a3d0-b5c472a84b46/mon.np0005626466/unit.run (code=exited, status=0/SUCCESS)
   Main PID: 300837 (conmon)
         IO: 140.0K read, 432.2M written
      Tasks: 27 (limit: 100220)
     Memory: 95.0M
        CPU: 23.574s
     CGroup: /system.slice/system-ceph\x2df1fea371\x2dcb69\x2d578d\x2da3d0\x2db5c472a84b46.slice/ceph-f1fea371-cb69-578d-a3d0-b5c472a84b46@mon.np0005626466.service
             ├─libpod-payload-2ceecf5aca3e1844048535c9c84c12fc98d118507e171300ca4a76145722e20a
             │ ├─300839 /run/podman-init -- /usr/bin/ceph-mon -n mon.np0005626466 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false
             │ └─300841 /usr/bin/ceph-mon -n mon.np0005626466 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false
             └─runtime
               └─300837 /usr/bin/conmon --api-version 1 -c 2ceecf5aca3e1844048535c9c84c12fc98d118507e171300ca4a76145722e20a -u 2ceecf5aca3e1844048535c9c84c12fc98d118507e171300ca4a76145722e20a -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/2ceecf5aca3e1844048535c9c84c12fc98d118507e171300ca4a76145722e20a/userdata -p /run/containers/storage/overlay-containers/2ceecf5aca3e1844048535c9c84c12fc98d118507e171300ca4a76145722e20a/userdata/pidfile -n ceph-f1fea371-cb69-578d-a3d0-b5c472a84b46-mon-np0005626466 --exit-dir /run/libpod/exits --full-attach -l journald --log-level warning --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/2ceecf5aca3e1844048535c9c84c12fc98d118507e171300ca4a76145722e20a/userdata/oci-log --conmon-pidfile /run/ceph-f1fea371-cb69-578d-a3d0-b5c472a84b46@mon.np0005626466.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --transient-store=false --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --rm --exit-command-arg 2ceecf5aca3e1844048535c9c84c12fc98d118507e171300ca4a76145722e20a

Feb 23 10:11:02 np0005626466.localdomain ceph-mon[300841]: from='client.59770 -' entity='client.admin' cmd=[{"prefix": "osd pool autoscale-status", "target": ["mon-mgr", ""], "format": "json-pretty"}]: dispatch
Feb 23 10:11:02 np0005626466.localdomain ceph-mon[300841]: from='client.70106 -' entity='client.admin' cmd=[{"prefix": "pg dump", "target": ["mon-mgr", ""], "format": "json-pretty"}]: dispatch
Feb 23 10:11:02 np0005626466.localdomain ceph-mon[300841]: pgmap v777: 177 pgs: 177 active+clean; 226 MiB data, 1.3 GiB used, 41 GiB / 42 GiB avail
Feb 23 10:11:02 np0005626466.localdomain ceph-mon[300841]: from='client.? 172.18.0.106:0/4043893167' entity='client.admin' cmd={"prefix": "osd pool ls", "detail": "detail", "format": "json-pretty"} : dispatch
Feb 23 10:11:02 np0005626466.localdomain ceph-mon[300841]: from='client.? 172.18.0.107:0/2148840963' entity='client.admin' cmd={"prefix": "osd pool ls", "detail": "detail", "format": "json-pretty"} : dispatch
Feb 23 10:11:02 np0005626466.localdomain ceph-mon[300841]: from='client.? 172.18.0.106:0/902802160' entity='client.admin' cmd={"prefix": "osd stat", "format": "json-pretty"} : dispatch
Feb 23 10:11:02 np0005626466.localdomain ceph-mon[300841]: from='client.? 172.18.0.107:0/3166253469' entity='client.admin' cmd={"prefix": "osd stat", "format": "json-pretty"} : dispatch
Feb 23 10:11:02 np0005626466.localdomain ceph-mon[300841]: from='client.? 172.18.0.108:0/785348751' entity='client.admin' cmd={"prefix": "status", "format": "json-pretty"} : dispatch
Feb 23 10:11:02 np0005626466.localdomain ceph-mon[300841]: mon.np0005626466@2(peon) e15 handle_command mon_command({"prefix": "time-sync-status", "format": "json-pretty"} v 0)
Feb 23 10:11:02 np0005626466.localdomain ceph-mon[300841]: log_channel(audit) log [DBG] : from='client.? 172.18.0.108:0/252287053' entity='client.admin' cmd={"prefix": "time-sync-status", "format": "json-pretty"} : dispatch

● ceph-f1fea371-cb69-578d-a3d0-b5c472a84b46@osd.1.service - Ceph osd.1 for f1fea371-cb69-578d-a3d0-b5c472a84b46
     Loaded: loaded (/etc/systemd/system/ceph-f1fea371-cb69-578d-a3d0-b5c472a84b46@.service; enabled; preset: disabled)
     Active: active (running) since Mon 2026-02-23 07:38:59 UTC; 2h 32min ago
   Main PID: 31869 (conmon)
         IO: 199.2M read, 4.8G written
      Tasks: 60 (limit: 100220)
     Memory: 996.6M
        CPU: 1min 24.690s
     CGroup: /system.slice/system-ceph\x2df1fea371\x2dcb69\x2d578d\x2da3d0\x2db5c472a84b46.slice/ceph-f1fea371-cb69-578d-a3d0-b5c472a84b46@osd.1.service
             ├─libpod-payload-f272037b1a08b3104e31c243e058b49b6cfe9c3898252c2d34ed954af84eea3b
             │ ├─31871 /run/podman-init -- /usr/bin/ceph-osd -n osd.1 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ └─31873 /usr/bin/ceph-osd -n osd.1 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             └─runtime
               └─31869 /usr/bin/conmon --api-version 1 -c f272037b1a08b3104e31c243e058b49b6cfe9c3898252c2d34ed954af84eea3b -u f272037b1a08b3104e31c243e058b49b6cfe9c3898252c2d34ed954af84eea3b -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/f272037b1a08b3104e31c243e058b49b6cfe9c3898252c2d34ed954af84eea3b/userdata -p /run/containers/storage/overlay-containers/f272037b1a08b3104e31c243e058b49b6cfe9c3898252c2d34ed954af84eea3b/userdata/pidfile -n ceph-f1fea371-cb69-578d-a3d0-b5c472a84b46-osd-1 --exit-dir /run/libpod/exits --full-attach -l journald --log-level warning --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/f272037b1a08b3104e31c243e058b49b6cfe9c3898252c2d34ed954af84eea3b/userdata/oci-log --conmon-pidfile /run/ceph-f1fea371-cb69-578d-a3d0-b5c472a84b46@osd.1.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --transient-store=false --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --rm --exit-command-arg f272037b1a08b3104e31c243e058b49b6cfe9c3898252c2d34ed954af84eea3b

Feb 23 10:10:38 np0005626466.localdomain ceph-osd[31873]: do_command 'counter schema' '{prefix=counter schema}' result is 0 bytes
Feb 23 10:10:38 np0005626466.localdomain ceph-osd[31873]: monclient: tick
Feb 23 10:10:38 np0005626466.localdomain ceph-osd[31873]: monclient: _check_auth_tickets
Feb 23 10:10:38 np0005626466.localdomain ceph-osd[31873]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2026-02-23T10:10:06.614627+0000)
Feb 23 10:10:38 np0005626466.localdomain ceph-osd[31873]: prioritycache tune_memory target: 3561598361 mapped: 179904512 unmapped: 23945216 heap: 203849728 old mem: 2222052238 new mem: 2222052238
Feb 23 10:10:38 np0005626466.localdomain ceph-osd[31873]: monclient: tick
Feb 23 10:10:38 np0005626466.localdomain ceph-osd[31873]: monclient: _check_auth_tickets
Feb 23 10:10:38 np0005626466.localdomain ceph-osd[31873]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2026-02-23T10:10:07.614793+0000)
Feb 23 10:10:38 np0005626466.localdomain ceph-osd[31873]: prioritycache tune_memory target: 3561598361 mapped: 179978240 unmapped: 23871488 heap: 203849728 old mem: 2222052238 new mem: 2222052238
Feb 23 10:10:38 np0005626466.localdomain ceph-osd[31873]: do_command 'log dump' '{prefix=log dump}'

● ceph-f1fea371-cb69-578d-a3d0-b5c472a84b46@osd.4.service - Ceph osd.4 for f1fea371-cb69-578d-a3d0-b5c472a84b46
     Loaded: loaded (/etc/systemd/system/ceph-f1fea371-cb69-578d-a3d0-b5c472a84b46@.service; enabled; preset: disabled)
     Active: active (running) since Mon 2026-02-23 07:39:04 UTC; 2h 31min ago
   Main PID: 32809 (conmon)
         IO: 156.1M read, 3.7G written
      Tasks: 60 (limit: 100220)
     Memory: 728.7M
        CPU: 1min 23.262s
     CGroup: /system.slice/system-ceph\x2df1fea371\x2dcb69\x2d578d\x2da3d0\x2db5c472a84b46.slice/ceph-f1fea371-cb69-578d-a3d0-b5c472a84b46@osd.4.service
             ├─libpod-payload-149ab20d951c6d638af0c8d29ce496a98dac476b4b563eca8217acb3595a760e
             │ ├─32811 /run/podman-init -- /usr/bin/ceph-osd -n osd.4 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ └─32813 /usr/bin/ceph-osd -n osd.4 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             └─runtime
               └─32809 /usr/bin/conmon --api-version 1 -c 149ab20d951c6d638af0c8d29ce496a98dac476b4b563eca8217acb3595a760e -u 149ab20d951c6d638af0c8d29ce496a98dac476b4b563eca8217acb3595a760e -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/149ab20d951c6d638af0c8d29ce496a98dac476b4b563eca8217acb3595a760e/userdata -p /run/containers/storage/overlay-containers/149ab20d951c6d638af0c8d29ce496a98dac476b4b563eca8217acb3595a760e/userdata/pidfile -n ceph-f1fea371-cb69-578d-a3d0-b5c472a84b46-osd-4 --exit-dir /run/libpod/exits --full-attach -l journald --log-level warning --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/149ab20d951c6d638af0c8d29ce496a98dac476b4b563eca8217acb3595a760e/userdata/oci-log --conmon-pidfile /run/ceph-f1fea371-cb69-578d-a3d0-b5c472a84b46@osd.4.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --transient-store=false --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --rm --exit-command-arg 149ab20d951c6d638af0c8d29ce496a98dac476b4b563eca8217acb3595a760e

Feb 23 10:10:34 np0005626466.localdomain ceph-osd[32813]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2026-02-23T10:10:02.137343+0000)
Feb 23 10:10:34 np0005626466.localdomain ceph-osd[32813]: prioritycache tune_memory target: 3561598361 mapped: 166027264 unmapped: 5718016 heap: 171745280 old mem: 2222052238 new mem: 2222052238
Feb 23 10:10:34 np0005626466.localdomain ceph-osd[32813]: monclient: tick
Feb 23 10:10:34 np0005626466.localdomain ceph-osd[32813]: monclient: _check_auth_tickets
Feb 23 10:10:34 np0005626466.localdomain ceph-osd[32813]: monclient: _check_auth_rotating have uptodate secrets (they expire after 2026-02-23T10:10:03.137657+0000)
Feb 23 10:10:34 np0005626466.localdomain ceph-osd[32813]: prioritycache tune_memory target: 3561598361 mapped: 166199296 unmapped: 5545984 heap: 171745280 old mem: 2222052238 new mem: 2222052238
Feb 23 10:10:34 np0005626466.localdomain ceph-osd[32813]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.307692
Feb 23 10:10:34 np0005626466.localdomain ceph-osd[32813]: rocksdb: commit_cache_size High Pri Pool Ratio set to 0.0727273
Feb 23 10:10:34 np0005626466.localdomain ceph-osd[32813]: bluestore.MempoolThread(0x561408b23b60) _resize_shards cache_size: 2222052238 kv_alloc: 922746880 kv_used: 2144 kv_onode_alloc: 218103808 kv_onode_used: 464 meta_alloc: 855638016 meta_used: 2285450 data_alloc: 184549376 data_used: 9809920
Feb 23 10:10:34 np0005626466.localdomain ceph-osd[32813]: do_command 'log dump' '{prefix=log dump}'

● ceph-osd-losetup-0.service - Ceph OSD losetup
     Loaded: loaded (/etc/systemd/system/ceph-osd-losetup-0.service; enabled; preset: disabled)
     Active: active (exited) since Mon 2026-02-23 07:34:18 UTC; 2h 36min ago
   Main PID: 25222 (code=exited, status=0/SUCCESS)
        CPU: 17ms

Feb 23 07:34:18 np0005626466.localdomain systemd[1]: Starting Ceph OSD losetup...
Feb 23 07:34:18 np0005626466.localdomain bash[25223]: /dev/loop3: [64516]:8400144 (/var/lib/ceph-osd-0.img)
Feb 23 07:34:18 np0005626466.localdomain systemd[1]: Finished Ceph OSD losetup.

● ceph-osd-losetup-1.service - Ceph OSD losetup
     Loaded: loaded (/etc/systemd/system/ceph-osd-losetup-1.service; enabled; preset: disabled)
     Active: active (exited) since Mon 2026-02-23 07:34:26 UTC; 2h 36min ago
   Main PID: 25470 (code=exited, status=0/SUCCESS)
        CPU: 19ms

Feb 23 07:34:26 np0005626466.localdomain systemd[1]: Starting Ceph OSD losetup...
Feb 23 07:34:26 np0005626466.localdomain bash[25471]: /dev/loop4: [64516]:8399529 (/var/lib/ceph-osd-1.img)
Feb 23 07:34:26 np0005626466.localdomain systemd[1]: Finished Ceph OSD losetup.

○ chrony-online.service - chronyd online sources service
     Loaded: loaded (/etc/systemd/system/chrony-online.service; enabled; preset: disabled)
     Active: inactive (dead) since Mon 2026-02-23 07:49:00 UTC; 2h 22min ago
   Main PID: 45968 (code=exited, status=0/SUCCESS)
        CPU: 9ms

Feb 23 07:49:00 np0005626466.localdomain systemd[1]: Starting chronyd online sources service...
Feb 23 07:49:00 np0005626466.localdomain chronyc[45968]: 200 OK
Feb 23 07:49:00 np0005626466.localdomain systemd[1]: chrony-online.service: Deactivated successfully.
Feb 23 07:49:00 np0005626466.localdomain systemd[1]: Finished chronyd online sources service.

● chronyd.service - NTP client/server
     Loaded: loaded (/usr/lib/systemd/system/chronyd.service; enabled; preset: enabled)
     Active: active (running) since Mon 2026-02-23 09:15:06 UTC; 55min ago
       Docs: man:chronyd(8)
             man:chrony.conf(5)
   Main PID: 139217 (chronyd)
         IO: 44.0K read, 0B written
      Tasks: 1 (limit: 100220)
     Memory: 956.0K
        CPU: 86ms
     CGroup: /system.slice/chronyd.service
             └─139217 /usr/sbin/chronyd -F 2

Feb 23 09:15:06 np0005626466.localdomain systemd[1]: Starting NTP client/server...
Feb 23 09:15:06 np0005626466.localdomain chronyd[139217]: chronyd version 4.3 starting (+CMDMON +NTP +REFCLOCK +RTC +PRIVDROP +SCFILTER +SIGND +ASYNCDNS +NTS +SECHASH +IPV6 +DEBUG)
Feb 23 09:15:06 np0005626466.localdomain chronyd[139217]: Frequency -30.748 +/- 0.200 ppm read from /var/lib/chrony/drift
Feb 23 09:15:06 np0005626466.localdomain chronyd[139217]: Loaded seccomp filter (level 2)
Feb 23 09:15:06 np0005626466.localdomain systemd[1]: Started NTP client/server.
Feb 23 09:17:16 np0005626466.localdomain chronyd[139217]: Selected source 162.159.200.1 (pool.ntp.org)

● cloud-config.service - Apply the settings specified in cloud-config
     Loaded: loaded (/usr/lib/systemd/system/cloud-config.service; enabled; preset: disabled)
     Active: active (exited) since Mon 2026-02-23 06:36:09 UTC; 3h 34min ago
   Main PID: 1130 (code=exited, status=0/SUCCESS)
        CPU: 362ms

Feb 23 06:36:09 np0005626466.novalocal systemd[1]: Starting Apply the settings specified in cloud-config...
Feb 23 06:36:09 np0005626466.novalocal cloud-init[1267]: Cloud-init v. 22.1-9.el9 running 'modules:config' at Mon, 23 Feb 2026 06:36:09 +0000. Up 10.57 seconds.
Feb 23 06:36:09 np0005626466.novalocal systemd[1]: Finished Apply the settings specified in cloud-config.

● cloud-final.service - Execute cloud user/final scripts
     Loaded: loaded (/usr/lib/systemd/system/cloud-final.service; enabled; preset: disabled)
     Active: active (exited) since Mon 2026-02-23 06:36:10 UTC; 3h 34min ago
   Main PID: 1338 (code=exited, status=0/SUCCESS)
        CPU: 463ms

Feb 23 06:36:09 np0005626466.novalocal cloud-init[1454]: Cloud-init v. 22.1-9.el9 running 'modules:final' at Mon, 23 Feb 2026 06:36:09 +0000. Up 10.91 seconds.
Feb 23 06:36:09 np0005626466.novalocal cloud-init[1478]: #############################################################
Feb 23 06:36:09 np0005626466.novalocal cloud-init[1484]: -----BEGIN SSH HOST KEY FINGERPRINTS-----
Feb 23 06:36:09 np0005626466.novalocal cloud-init[1489]: 256 SHA256:yuHcNEmj13ZjjOzjyYoSeDjxLoJ9paNcUTPZJ3X3C9M root@np0005626466.novalocal (ECDSA)
Feb 23 06:36:09 np0005626466.novalocal cloud-init[1498]: 256 SHA256:/a2q8DpxpktGxDOKdh/jeDIe+7BZG8pHruXC8lDfXZs root@np0005626466.novalocal (ED25519)
Feb 23 06:36:09 np0005626466.novalocal cloud-init[1506]: 3072 SHA256:mLtdL7pRhieIeMXPHwD9QBWYSmrlfeJg3xNOTFK80dM root@np0005626466.novalocal (RSA)
Feb 23 06:36:09 np0005626466.novalocal cloud-init[1507]: -----END SSH HOST KEY FINGERPRINTS-----
Feb 23 06:36:09 np0005626466.novalocal cloud-init[1509]: #############################################################
Feb 23 06:36:09 np0005626466.novalocal cloud-init[1454]: Cloud-init v. 22.1-9.el9 finished at Mon, 23 Feb 2026 06:36:09 +0000. Datasource DataSourceConfigDrive [net,ver=2][source=/dev/sr0].  Up 11.12 seconds
Feb 23 06:36:10 np0005626466.novalocal systemd[1]: Finished Execute cloud user/final scripts.

● cloud-init-local.service - Initial cloud-init job (pre-networking)
     Loaded: loaded (/usr/lib/systemd/system/cloud-init-local.service; enabled; preset: disabled)
     Active: active (exited) since Mon 2026-02-23 06:36:05 UTC; 3h 34min ago
   Main PID: 788 (code=exited, status=0/SUCCESS)
        CPU: 758ms

Feb 23 06:36:04 localhost systemd[1]: Starting Initial cloud-init job (pre-networking)...
Feb 23 06:36:05 localhost cloud-init[770]: Cloud-init v. 22.1-9.el9 running 'init-local' at Mon, 23 Feb 2026 06:36:05 +0000. Up 6.37 seconds.
Feb 23 06:36:05 np0005626466.novalocal systemd[1]: Finished Initial cloud-init job (pre-networking).

● cloud-init.service - Initial cloud-init job (metadata service crawler)
     Loaded: loaded (/usr/lib/systemd/system/cloud-init.service; enabled; preset: disabled)
     Active: active (exited) since Mon 2026-02-23 06:36:09 UTC; 3h 34min ago
   Main PID: 815 (code=exited, status=0/SUCCESS)
         IO: 6.0M read, 5.6M written
      Tasks: 0 (limit: 100220)
     Memory: 5.1M
        CPU: 1.452s
     CGroup: /system.slice/cloud-init.service

Feb 23 06:36:09 np0005626466.novalocal cloud-init[919]: |     .           |
Feb 23 06:36:09 np0005626466.novalocal cloud-init[919]: |      =          |
Feb 23 06:36:09 np0005626466.novalocal cloud-init[919]: |   . o o .       |
Feb 23 06:36:09 np0005626466.novalocal cloud-init[919]: |  o + + S . .    |
Feb 23 06:36:09 np0005626466.novalocal cloud-init[919]: | . o *o+o. o +   |
Feb 23 06:36:09 np0005626466.novalocal cloud-init[919]: |  ..*o%=. . E .  |
Feb 23 06:36:09 np0005626466.novalocal cloud-init[919]: |  .+o^==     .   |
Feb 23 06:36:09 np0005626466.novalocal cloud-init[919]: |   oX=*+o....    |
Feb 23 06:36:09 np0005626466.novalocal cloud-init[919]: +----[SHA256]-----+
Feb 23 06:36:09 np0005626466.novalocal systemd[1]: Finished Initial cloud-init job (metadata service crawler).

○ cpupower.service - Configure CPU power related settings
     Loaded: loaded (/usr/lib/systemd/system/cpupower.service; disabled; preset: disabled)
     Active: inactive (dead)

● crond.service - Command Scheduler
     Loaded: loaded (/usr/lib/systemd/system/crond.service; enabled; preset: enabled)
     Active: active (running) since Mon 2026-02-23 06:36:09 UTC; 3h 34min ago
   Main PID: 1139 (crond)
         IO: 148.0K read, 24.0K written
      Tasks: 1 (limit: 100220)
     Memory: 1.1M
        CPU: 299ms
     CGroup: /system.slice/crond.service
             └─1139 /usr/sbin/crond -n

Feb 23 08:01:01 np0005626466.localdomain CROND[59070]: (root) CMDEND (run-parts /etc/cron.hourly)
Feb 23 08:18:01 np0005626466.localdomain anacron[19129]: Unit display-manager.service could not be found.
Unit docker.service could not be found.
Job `cron.monthly' started
Feb 23 08:18:01 np0005626466.localdomain anacron[19129]: Job `cron.monthly' terminated
Feb 23 08:18:01 np0005626466.localdomain anacron[19129]: Normal exit (3 jobs run)
Feb 23 09:01:01 np0005626466.localdomain CROND[108573]: (root) CMD (run-parts /etc/cron.hourly)
Feb 23 09:01:01 np0005626466.localdomain run-parts[108582]: (/etc/cron.hourly) finished 0anacron
Feb 23 09:01:01 np0005626466.localdomain CROND[108572]: (root) CMDEND (run-parts /etc/cron.hourly)
Feb 23 10:01:01 np0005626466.localdomain CROND[326400]: (root) CMD (run-parts /etc/cron.hourly)
Feb 23 10:01:01 np0005626466.localdomain run-parts[326409]: (/etc/cron.hourly) finished 0anacron
Feb 23 10:01:01 np0005626466.localdomain CROND[326399]: (root) CMDEND (run-parts /etc/cron.hourly)

● dbus-broker.service - D-Bus System Message Bus
     Loaded: loaded (/usr/lib/systemd/system/dbus-broker.service; enabled; preset: enabled)
     Active: active (running) since Mon 2026-02-23 06:36:04 UTC; 3h 34min ago
TriggeredBy: ● dbus.socket
       Docs: man:dbus-broker-launch(1)
   Main PID: 751 (dbus-broker-lau)
         IO: 692.5K read, 0B written
      Tasks: 2 (limit: 100220)
     Memory: 3.0M
        CPU: 11.167s
     CGroup: /system.slice/dbus-broker.service
             ├─751 /usr/bin/dbus-broker-launch --scope system --audit
             └─755 dbus-broker --log 4 --controller 9 --machine-id c0212a8b024a111cfc61293864f36c87 --max-bytes 536870912 --max-fds 4096 --max-matches 131072 --audit

Feb 23 09:09:48 np0005626466.localdomain dbus-broker-launch[755]: avc:  op=load_policy lsm=selinux seqno=15 res=1
Feb 23 09:09:50 np0005626466.localdomain dbus-broker-launch[755]: avc:  op=load_policy lsm=selinux seqno=16 res=1
Feb 23 09:09:50 np0005626466.localdomain dbus-broker-launch[755]: avc:  op=load_policy lsm=selinux seqno=17 res=1
Feb 23 09:13:29 np0005626466.localdomain dbus-broker-launch[755]: avc:  op=load_policy lsm=selinux seqno=18 res=1
Feb 23 09:21:10 np0005626466.localdomain dbus-broker-launch[755]: avc:  op=load_policy lsm=selinux seqno=19 res=1
Feb 23 09:21:20 np0005626466.localdomain dbus-broker-launch[755]: avc:  op=load_policy lsm=selinux seqno=20 res=1
Feb 23 09:21:36 np0005626466.localdomain dbus-broker-launch[755]: avc:  op=load_policy lsm=selinux seqno=22 res=1
Feb 23 09:21:49 np0005626466.localdomain dbus-broker-launch[751]: Noticed file-system modification, trigger reload.
Feb 23 09:21:49 np0005626466.localdomain dbus-broker-launch[755]: avc:  op=load_policy lsm=selinux seqno=23 res=1
Feb 23 09:21:49 np0005626466.localdomain dbus-broker-launch[751]: Noticed file-system modification, trigger reload.

○ dm-event.service - Device-mapper event daemon
     Loaded: loaded (/usr/lib/systemd/system/dm-event.service; static)
     Active: inactive (dead)
TriggeredBy: ● dm-event.socket
       Docs: man:dmeventd(8)

○ dnf-makecache.service - dnf makecache
     Loaded: loaded (/usr/lib/systemd/system/dnf-makecache.service; static)
     Active: inactive (dead) since Mon 2026-02-23 09:21:38 UTC; 49min ago
TriggeredBy: ● dnf-makecache.timer
   Main PID: 167870 (code=exited, status=0/SUCCESS)
        CPU: 2.090s

Feb 23 09:21:36 np0005626466.localdomain systemd[1]: Starting dnf makecache...
Feb 23 09:21:37 np0005626466.localdomain dnf[167870]: Updating Subscription Management repositories.
Feb 23 09:21:38 np0005626466.localdomain dnf[167870]: Metadata cache refreshed recently.
Feb 23 09:21:38 np0005626466.localdomain systemd[1]: dnf-makecache.service: Deactivated successfully.
Feb 23 09:21:38 np0005626466.localdomain systemd[1]: Finished dnf makecache.
Feb 23 09:21:38 np0005626466.localdomain systemd[1]: dnf-makecache.service: Consumed 2.090s CPU time.

○ dracut-cmdline.service - dracut cmdline hook
     Loaded: loaded (/usr/lib/systemd/system/dracut-cmdline.service; static)
     Active: inactive (dead) since Mon 2026-02-23 06:36:01 UTC; 3h 35min ago
   Duration: 1.478s
       Docs: man:dracut-cmdline.service(8)
             man:dracut.bootup(7)
   Main PID: 291 (code=exited, status=0/SUCCESS)
        CPU: 128ms

Feb 23 06:36:00 localhost systemd[1]: Starting dracut cmdline hook...
Feb 23 06:36:00 localhost dracut-cmdline[291]: dracut-9.2 (Plow) dracut-057-21.git20230214.el9
Feb 23 06:36:00 localhost dracut-cmdline[291]: Using kernel command line parameters:    BOOT_IMAGE=(hd0,gpt3)/vmlinuz-5.14.0-284.11.1.el9_2.x86_64 root=UUID=a3dd82de-ffc6-4652-88b9-80e003b8f20a console=tty0 console=ttyS0,115200n8 no_timer_check net.ifnames=0 crashkernel=1G-4G:192M,4G-64G:256M,64G-:512M
Feb 23 06:36:00 localhost systemd[1]: Finished dracut cmdline hook.
Feb 23 06:36:01 localhost systemd[1]: dracut-cmdline.service: Deactivated successfully.
Feb 23 06:36:01 localhost systemd[1]: Stopped dracut cmdline hook.

○ dracut-initqueue.service - dracut initqueue hook
     Loaded: loaded (/usr/lib/systemd/system/dracut-initqueue.service; static)
     Active: inactive (dead) since Mon 2026-02-23 06:36:01 UTC; 3h 35min ago
   Duration: 593ms
       Docs: man:dracut-initqueue.service(8)
             man:dracut.bootup(7)
   Main PID: 466 (code=exited, status=0/SUCCESS)
        CPU: 31ms

Feb 23 06:36:00 localhost systemd[1]: Starting dracut initqueue hook...
Feb 23 06:36:01 localhost systemd[1]: Finished dracut initqueue hook.
Feb 23 06:36:01 localhost systemd[1]: dracut-initqueue.service: Deactivated successfully.
Feb 23 06:36:01 localhost systemd[1]: Stopped dracut initqueue hook.

○ dracut-mount.service - dracut mount hook
     Loaded: loaded (/usr/lib/systemd/system/dracut-mount.service; static)
     Active: inactive (dead) since Mon 2026-02-23 06:36:01 UTC; 3h 35min ago
   Duration: 109ms
       Docs: man:dracut-mount.service(8)
             man:dracut.bootup(7)
   Main PID: 526 (code=exited, status=0/SUCCESS)
        CPU: 14ms

Feb 23 06:36:01 localhost systemd[1]: Starting dracut mount hook...
Feb 23 06:36:01 localhost systemd[1]: Finished dracut mount hook.
Feb 23 06:36:01 localhost systemd[1]: dracut-mount.service: Deactivated successfully.
Feb 23 06:36:01 localhost systemd[1]: Stopped dracut mount hook.

○ dracut-pre-mount.service - dracut pre-mount hook
     Loaded: loaded (/usr/lib/systemd/system/dracut-pre-mount.service; static)
     Active: inactive (dead) since Mon 2026-02-23 06:36:01 UTC; 3h 35min ago
   Duration: 559ms
       Docs: man:dracut-pre-mount.service(8)
             man:dracut.bootup(7)
   Main PID: 505 (code=exited, status=0/SUCCESS)
        CPU: 13ms

Feb 23 06:36:01 localhost systemd[1]: Starting dracut pre-mount hook...
Feb 23 06:36:01 localhost systemd[1]: Finished dracut pre-mount hook.
Feb 23 06:36:01 localhost systemd[1]: dracut-pre-mount.service: Deactivated successfully.
Feb 23 06:36:01 localhost systemd[1]: Stopped dracut pre-mount hook.

○ dracut-pre-pivot.service - dracut pre-pivot and cleanup hook
     Loaded: loaded (/usr/lib/systemd/system/dracut-pre-pivot.service; static)
     Active: inactive (dead) since Mon 2026-02-23 06:36:01 UTC; 3h 35min ago
   Duration: 20ms
       Docs: man:dracut-pre-pivot.service(8)
             man:dracut.bootup(7)
   Main PID: 531 (code=exited, status=0/SUCCESS)
        CPU: 67ms

Feb 23 06:36:01 localhost systemd[1]: Starting dracut pre-pivot and cleanup hook...
Feb 23 06:36:01 localhost systemd[1]: Finished dracut pre-pivot and cleanup hook.
Feb 23 06:36:01 localhost systemd[1]: dracut-pre-pivot.service: Deactivated successfully.
Feb 23 06:36:01 localhost systemd[1]: Stopped dracut pre-pivot and cleanup hook.

○ dracut-pre-trigger.service - dracut pre-trigger hook
     Loaded: loaded (/usr/lib/systemd/system/dracut-pre-trigger.service; static)
     Active: inactive (dead) since Mon 2026-02-23 06:36:01 UTC; 3h 35min ago
   Duration: 1.131s
       Docs: man:dracut-pre-trigger.service(8)
             man:dracut.bootup(7)
   Main PID: 432 (code=exited, status=0/SUCCESS)
        CPU: 16ms

Feb 23 06:36:00 localhost systemd[1]: Starting dracut pre-trigger hook...
Feb 23 06:36:00 localhost systemd[1]: Finished dracut pre-trigger hook.
Feb 23 06:36:01 localhost systemd[1]: dracut-pre-trigger.service: Deactivated successfully.
Feb 23 06:36:01 localhost systemd[1]: Stopped dracut pre-trigger hook.

○ dracut-pre-udev.service - dracut pre-udev hook
     Loaded: loaded (/usr/lib/systemd/system/dracut-pre-udev.service; static)
     Active: inactive (dead) since Mon 2026-02-23 06:36:01 UTC; 3h 35min ago
   Duration: 1.203s
       Docs: man:dracut-pre-udev.service(8)
             man:dracut.bootup(7)
   Main PID: 384 (code=exited, status=0/SUCCESS)
        CPU: 283ms

Feb 23 06:36:00 localhost systemd[1]: Starting dracut pre-udev hook...
Feb 23 06:36:00 localhost rpc.statd[408]: Version 2.5.4 starting
Feb 23 06:36:00 localhost rpc.statd[408]: Initializing NSM state
Feb 23 06:36:00 localhost rpc.idmapd[413]: Setting log level to 0
Feb 23 06:36:00 localhost systemd[1]: Finished dracut pre-udev hook.
Feb 23 06:36:01 localhost rpc.idmapd[413]: exiting on signal 15
Feb 23 06:36:01 localhost systemd[1]: dracut-pre-udev.service: Deactivated successfully.
Feb 23 06:36:01 localhost systemd[1]: Stopped dracut pre-udev hook.

○ dracut-shutdown-onfailure.service - Service executing upon dracut-shutdown failure to perform cleanup
     Loaded: loaded (/usr/lib/systemd/system/dracut-shutdown-onfailure.service; static)
     Active: inactive (dead)
       Docs: man:dracut-shutdown.service(8)

● dracut-shutdown.service - Restore /run/initramfs on shutdown
     Loaded: loaded (/usr/lib/systemd/system/dracut-shutdown.service; static)
     Active: active (exited) since Mon 2026-02-23 06:36:04 UTC; 3h 34min ago
       Docs: man:dracut-shutdown.service(8)
   Main PID: 757 (code=exited, status=0/SUCCESS)
        CPU: 1ms

Feb 23 06:36:04 localhost systemd[1]: Starting Restore /run/initramfs on shutdown...
Feb 23 06:36:04 localhost systemd[1]: Finished Restore /run/initramfs on shutdown.

● edpm-container-shutdown.service - EDPM Container Shutdown
     Loaded: loaded (/etc/systemd/system/edpm-container-shutdown.service; enabled; preset: enabled)
     Active: active (exited) since Mon 2026-02-23 09:15:25 UTC; 55min ago
       Docs: https://github.com/openstack-k8s-operators/docs
   Main PID: 140759 (code=exited, status=0/SUCCESS)
        CPU: 2ms

Feb 23 09:15:25 np0005626466.localdomain systemd[1]: Starting EDPM Container Shutdown...
Feb 23 09:15:25 np0005626466.localdomain systemd[1]: Finished EDPM Container Shutdown.

● edpm_ceilometer_agent_compute.service - ceilometer_agent_compute container
     Loaded: loaded (/etc/systemd/system/edpm_ceilometer_agent_compute.service; enabled; preset: disabled)
     Active: active (running) since Mon 2026-02-23 09:28:53 UTC; 42min ago
   Main PID: 236489 (conmon)
         IO: 0B read, 12.0K written
      Tasks: 1 (limit: 100220)
     Memory: 1.2M
        CPU: 175ms
     CGroup: /system.slice/edpm_ceilometer_agent_compute.service
             └─236489 /usr/bin/conmon --api-version 1 -c fe7013c5595cf36029c5acd08a92109de37036b8a083179769dbc4e27b30aaf9 -u fe7013c5595cf36029c5acd08a92109de37036b8a083179769dbc4e27b30aaf9 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/fe7013c5595cf36029c5acd08a92109de37036b8a083179769dbc4e27b30aaf9/userdata -p /run/containers/storage/overlay-containers/fe7013c5595cf36029c5acd08a92109de37036b8a083179769dbc4e27b30aaf9/userdata/pidfile -n ceilometer_agent_compute --exit-dir /run/libpod/exits --full-attach -s -l journald --log-level warning --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/fe7013c5595cf36029c5acd08a92109de37036b8a083179769dbc4e27b30aaf9/userdata/oci-log --conmon-pidfile /run/ceilometer_agent_compute.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --transient-store=false --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg fe7013c5595cf36029c5acd08a92109de37036b8a083179769dbc4e27b30aaf9

Feb 23 10:10:54 np0005626466.localdomain ceilometer_agent_compute[236489]: 2026-02-23 10:10:54.889 12 DEBUG ceilometer.polling.manager [-] Skip pollster network.outgoing.packets.drop, no  resources found this cycle poll_and_notify /usr/lib/python3.9/site-packages/ceilometer/polling/manager.py:193
Feb 23 10:10:54 np0005626466.localdomain ceilometer_agent_compute[236489]: 2026-02-23 10:10:54.889 12 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.write.latency, no  resources found this cycle poll_and_notify /usr/lib/python3.9/site-packages/ceilometer/polling/manager.py:193
Feb 23 10:10:54 np0005626466.localdomain ceilometer_agent_compute[236489]: 2026-02-23 10:10:54.889 12 DEBUG ceilometer.polling.manager [-] Skip pollster network.incoming.bytes, no  resources found this cycle poll_and_notify /usr/lib/python3.9/site-packages/ceilometer/polling/manager.py:193
Feb 23 10:10:54 np0005626466.localdomain ceilometer_agent_compute[236489]: 2026-02-23 10:10:54.889 12 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.usage, no  resources found this cycle poll_and_notify /usr/lib/python3.9/site-packages/ceilometer/polling/manager.py:193
Feb 23 10:10:54 np0005626466.localdomain ceilometer_agent_compute[236489]: 2026-02-23 10:10:54.889 12 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.write.bytes, no  resources found this cycle poll_and_notify /usr/lib/python3.9/site-packages/ceilometer/polling/manager.py:193
Feb 23 10:10:54 np0005626466.localdomain ceilometer_agent_compute[236489]: 2026-02-23 10:10:54.889 12 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.read.latency, no  resources found this cycle poll_and_notify /usr/lib/python3.9/site-packages/ceilometer/polling/manager.py:193
Feb 23 10:10:54 np0005626466.localdomain ceilometer_agent_compute[236489]: 2026-02-23 10:10:54.890 12 DEBUG ceilometer.polling.manager [-] Skip pollster cpu, no  resources found this cycle poll_and_notify /usr/lib/python3.9/site-packages/ceilometer/polling/manager.py:193
Feb 23 10:10:54 np0005626466.localdomain ceilometer_agent_compute[236489]: 2026-02-23 10:10:54.890 12 DEBUG ceilometer.polling.manager [-] Skip pollster network.outgoing.packets, no  resources found this cycle poll_and_notify /usr/lib/python3.9/site-packages/ceilometer/polling/manager.py:193
Feb 23 10:10:54 np0005626466.localdomain ceilometer_agent_compute[236489]: 2026-02-23 10:10:54.890 12 DEBUG ceilometer.polling.manager [-] Skip pollster network.outgoing.bytes, no  resources found this cycle poll_and_notify /usr/lib/python3.9/site-packages/ceilometer/polling/manager.py:193
Feb 23 10:10:54 np0005626466.localdomain ceilometer_agent_compute[236489]: 2026-02-23 10:10:54.890 12 DEBUG ceilometer.polling.manager [-] Skip pollster disk.device.read.bytes, no  resources found this cycle poll_and_notify /usr/lib/python3.9/site-packages/ceilometer/polling/manager.py:193

○ edpm_libvirt_guests.service - Suspend libvirt Guests in edpm
     Loaded: loaded (/etc/systemd/system/edpm_libvirt_guests.service; enabled; preset: disabled)
     Active: inactive (dead)
       Docs: man:libvirtd(8)
             https://libvirt.org

● edpm_neutron_dhcp_agent.service - neutron_dhcp_agent container
     Loaded: loaded (/etc/systemd/system/edpm_neutron_dhcp_agent.service; enabled; preset: disabled)
     Active: active (running) since Mon 2026-02-23 09:35:26 UTC; 35min ago
   Main PID: 263819 (conmon)
         IO: 0B read, 24.0K written
      Tasks: 1 (limit: 100220)
     Memory: 664.0K
        CPU: 230ms
     CGroup: /system.slice/edpm_neutron_dhcp_agent.service
             └─263819 /usr/bin/conmon --api-version 1 -c bdeaed09d67af00ebcf3af49678b8d850a09c9b8240ebddb072ba501276f201d -u bdeaed09d67af00ebcf3af49678b8d850a09c9b8240ebddb072ba501276f201d -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/bdeaed09d67af00ebcf3af49678b8d850a09c9b8240ebddb072ba501276f201d/userdata -p /run/containers/storage/overlay-containers/bdeaed09d67af00ebcf3af49678b8d850a09c9b8240ebddb072ba501276f201d/userdata/pidfile -n neutron_dhcp_agent --exit-dir /run/libpod/exits --full-attach -s -l journald --log-level warning --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/bdeaed09d67af00ebcf3af49678b8d850a09c9b8240ebddb072ba501276f201d/userdata/oci-log --conmon-pidfile /run/neutron_dhcp_agent.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --transient-store=false --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg bdeaed09d67af00ebcf3af49678b8d850a09c9b8240ebddb072ba501276f201d

Feb 23 10:04:58 np0005626466.localdomain neutron_dhcp_agent[263819]: 2026-02-23 10:04:58.941 263823 INFO neutron.agent.dhcp.agent [-] Trigger reload_allocations for port admin_state_up=True, allowed_address_pairs=[], binding:host_id=, binding:profile=, binding:vif_details=, binding:vif_type=unbound, binding:vnic_type=normal, created_at=2026-02-23T10:04:58Z, description=, device_id=003a0537-1e67-4904-bdc1-c9d4f9d3916e, device_owner=network:router_gateway, dns_assignment=[<neutron.agent.linux.dhcp.DictModel object at 0x7f59a3e5de50>], dns_domain=, dns_name=, extra_dhcp_opts=[], fixed_ips=[<neutron.agent.linux.dhcp.DictModel object at 0x7f59a3e5d7f0>], id=4f21aa5f-deca-49b8-b675-2cddb2a67420, ip_allocation=immediate, mac_address=fa:16:3e:32:0c:aa, name=, network=admin_state_up=True, availability_zone_hints=[], availability_zones=[], created_at=2026-02-23T08:22:23Z, description=, dns_domain=, id=91fdc6a7-b901-4255-83f7-4b37365658a3, ipv4_address_scope=None, ipv6_address_scope=None, is_default=False, l2_adjacency=True, mtu=1350, name=public, port_security_enabled=True, project_id=37b8098efb0d4ecc90b451a2db0e966f, provider:network_type=flat, provider:physical_network=datacentre, provider:segmentation_id=None, qos_policy_id=None, revision_number=2, router:external=True, shared=False, standard_attr_id=30, status=ACTIVE, subnets=['21d77760-119f-4466-bc85-a0e9167487a9'], tags=[], tenant_id=37b8098efb0d4ecc90b451a2db0e966f, updated_at=2026-02-23T08:22:29Z, vlan_transparent=None, network_id=91fdc6a7-b901-4255-83f7-4b37365658a3, port_security_enabled=False, project_id=, qos_network_policy_id=None, qos_policy_id=None, resource_request=None, revision_number=1, security_groups=[], standard_attr_id=3698, status=DOWN, tags=[], tenant_id=, updated_at=2026-02-23T10:04:58Z on network 91fdc6a7-b901-4255-83f7-4b37365658a3[00m
Feb 23 10:04:59 np0005626466.localdomain neutron_dhcp_agent[263819]: 2026-02-23 10:04:59.430 263823 INFO neutron.agent.dhcp.agent [None req-8906b59a-7d81-4262-abdb-5b4aa4f79a49 - - - - - -] DHCP configuration for ports {'4f21aa5f-deca-49b8-b675-2cddb2a67420'} is completed[00m
Feb 23 10:05:04 np0005626466.localdomain neutron_dhcp_agent[263819]: 2026-02-23 10:05:04.686 263823 INFO neutron.agent.dhcp.agent [-] Trigger reload_allocations for port admin_state_up=True, allowed_address_pairs=[], binding:host_id=, binding:profile=, binding:vif_details=, binding:vif_type=unbound, binding:vnic_type=normal, created_at=2026-02-23T10:05:04Z, description=, device_id=9a677207-db32-4b9e-abaa-4a936b2ee47c, device_owner=network:router_gateway, dns_assignment=[<neutron.agent.linux.dhcp.DictModel object at 0x7f59a87ea160>], dns_domain=, dns_name=, extra_dhcp_opts=[], fixed_ips=[<neutron.agent.linux.dhcp.DictModel object at 0x7f59a87ea790>], id=d4f68b7a-b506-489f-95a0-7047b8969383, ip_allocation=immediate, mac_address=fa:16:3e:d2:73:b6, name=, network=admin_state_up=True, availability_zone_hints=[], availability_zones=[], created_at=2026-02-23T08:22:23Z, description=, dns_domain=, id=91fdc6a7-b901-4255-83f7-4b37365658a3, ipv4_address_scope=None, ipv6_address_scope=None, is_default=False, l2_adjacency=True, mtu=1350, name=public, port_security_enabled=True, project_id=37b8098efb0d4ecc90b451a2db0e966f, provider:network_type=flat, provider:physical_network=datacentre, provider:segmentation_id=None, qos_policy_id=None, revision_number=2, router:external=True, shared=False, standard_attr_id=30, status=ACTIVE, subnets=['21d77760-119f-4466-bc85-a0e9167487a9'], tags=[], tenant_id=37b8098efb0d4ecc90b451a2db0e966f, updated_at=2026-02-23T08:22:29Z, vlan_transparent=None, network_id=91fdc6a7-b901-4255-83f7-4b37365658a3, port_security_enabled=False, project_id=, qos_network_policy_id=None, qos_policy_id=None, resource_request=None, revision_number=1, security_groups=[], standard_attr_id=3727, status=DOWN, tags=[], tenant_id=, updated_at=2026-02-23T10:05:04Z on network 91fdc6a7-b901-4255-83f7-4b37365658a3[00m
Feb 23 10:05:05 np0005626466.localdomain neutron_dhcp_agent[263819]: 2026-02-23 10:05:05.213 263823 INFO neutron.agent.dhcp.agent [None req-643fa753-0395-422f-9396-798d664a17d9 - - - - - -] DHCP configuration for ports {'d4f68b7a-b506-489f-95a0-7047b8969383'} is completed[00m
Feb 23 10:05:21 np0005626466.localdomain neutron_dhcp_agent[263819]: 2026-02-23 10:05:21.220 263823 INFO neutron.agent.dhcp.agent [-] Trigger reload_allocations for port admin_state_up=True, allowed_address_pairs=[], binding:host_id=, binding:profile=, binding:vif_details=, binding:vif_type=unbound, binding:vnic_type=normal, created_at=2026-02-23T10:05:21Z, description=, device_id=36bd3448-ec4f-40e6-b201-5bd21215f6b7, device_owner=network:router_gateway, dns_assignment=[<neutron.agent.linux.dhcp.DictModel object at 0x7f59a3f55e80>], dns_domain=, dns_name=, extra_dhcp_opts=[], fixed_ips=[<neutron.agent.linux.dhcp.DictModel object at 0x7f59a3f32070>], id=40eaa0c6-ecb8-403e-826e-17c2df6c0242, ip_allocation=immediate, mac_address=fa:16:3e:56:b5:18, name=, network=admin_state_up=True, availability_zone_hints=[], availability_zones=[], created_at=2026-02-23T08:22:23Z, description=, dns_domain=, id=91fdc6a7-b901-4255-83f7-4b37365658a3, ipv4_address_scope=None, ipv6_address_scope=None, is_default=False, l2_adjacency=True, mtu=1350, name=public, port_security_enabled=True, project_id=37b8098efb0d4ecc90b451a2db0e966f, provider:network_type=flat, provider:physical_network=datacentre, provider:segmentation_id=None, qos_policy_id=None, revision_number=2, router:external=True, shared=False, standard_attr_id=30, status=ACTIVE, subnets=['21d77760-119f-4466-bc85-a0e9167487a9'], tags=[], tenant_id=37b8098efb0d4ecc90b451a2db0e966f, updated_at=2026-02-23T08:22:29Z, vlan_transparent=None, network_id=91fdc6a7-b901-4255-83f7-4b37365658a3, port_security_enabled=False, project_id=, qos_network_policy_id=None, qos_policy_id=None, resource_request=None, revision_number=1, security_groups=[], standard_attr_id=3741, status=DOWN, tags=[], tenant_id=, updated_at=2026-02-23T10:05:21Z on network 91fdc6a7-b901-4255-83f7-4b37365658a3[00m
Feb 23 10:05:21 np0005626466.localdomain neutron_dhcp_agent[263819]: 2026-02-23 10:05:21.885 263823 INFO neutron.agent.dhcp.agent [None req-51807f71-5b51-4f44-9a68-7f90992222c9 - - - - - -] DHCP configuration for ports {'40eaa0c6-ecb8-403e-826e-17c2df6c0242'} is completed[00m
Feb 23 10:05:29 np0005626466.localdomain neutron_dhcp_agent[263819]: 2026-02-23 10:05:29.064 263823 INFO neutron.agent.dhcp.agent [-] Trigger reload_allocations for port admin_state_up=True, allowed_address_pairs=[], binding:host_id=, binding:profile=, binding:vif_details=, binding:vif_type=unbound, binding:vnic_type=normal, created_at=2026-02-23T10:05:28Z, description=, device_id=bf770441-f389-4f92-aa3a-271368cb88d2, device_owner=network:router_gateway, dns_assignment=[<neutron.agent.linux.dhcp.DictModel object at 0x7f59a3dc6430>], dns_domain=, dns_name=, extra_dhcp_opts=[], fixed_ips=[<neutron.agent.linux.dhcp.DictModel object at 0x7f59a3dc62e0>], id=165e8114-979b-4974-bfa2-9b3b69757979, ip_allocation=immediate, mac_address=fa:16:3e:a9:ff:cc, name=, network=admin_state_up=True, availability_zone_hints=[], availability_zones=[], created_at=2026-02-23T08:22:23Z, description=, dns_domain=, id=91fdc6a7-b901-4255-83f7-4b37365658a3, ipv4_address_scope=None, ipv6_address_scope=None, is_default=False, l2_adjacency=True, mtu=1350, name=public, port_security_enabled=True, project_id=37b8098efb0d4ecc90b451a2db0e966f, provider:network_type=flat, provider:physical_network=datacentre, provider:segmentation_id=None, qos_policy_id=None, revision_number=2, router:external=True, shared=False, standard_attr_id=30, status=ACTIVE, subnets=['21d77760-119f-4466-bc85-a0e9167487a9'], tags=[], tenant_id=37b8098efb0d4ecc90b451a2db0e966f, updated_at=2026-02-23T08:22:29Z, vlan_transparent=None, network_id=91fdc6a7-b901-4255-83f7-4b37365658a3, port_security_enabled=False, project_id=, qos_network_policy_id=None, qos_policy_id=None, resource_request=None, revision_number=1, security_groups=[], standard_attr_id=3754, status=DOWN, tags=[], tenant_id=, updated_at=2026-02-23T10:05:28Z on network 91fdc6a7-b901-4255-83f7-4b37365658a3[00m
Feb 23 10:05:29 np0005626466.localdomain neutron_dhcp_agent[263819]: 2026-02-23 10:05:29.561 263823 INFO neutron.agent.dhcp.agent [None req-86c881ba-195e-4a5a-b0e5-7cff49ea33fe - - - - - -] DHCP configuration for ports {'165e8114-979b-4974-bfa2-9b3b69757979'} is completed[00m
Feb 23 10:08:18 np0005626466.localdomain neutron_dhcp_agent[263819]: 2026-02-23 10:08:18.971 263823 INFO neutron.agent.dhcp.agent [-] Trigger reload_allocations for port admin_state_up=True, allowed_address_pairs=[], binding:host_id=, binding:profile=, binding:vif_details=, binding:vif_type=unbound, binding:vnic_type=normal, created_at=2026-02-23T10:08:18Z, description=, device_id=bd79233f-62a2-4798-82cd-a3699b0b3389, device_owner=network:router_gateway, dns_assignment=[<neutron.agent.linux.dhcp.DictModel object at 0x7f59a3d86c10>], dns_domain=, dns_name=, extra_dhcp_opts=[], fixed_ips=[<neutron.agent.linux.dhcp.DictModel object at 0x7f59a3d86ac0>], id=4d914d76-d293-4350-af7f-4f32c6566ccf, ip_allocation=immediate, mac_address=fa:16:3e:a6:dd:c6, name=, network=admin_state_up=True, availability_zone_hints=[], availability_zones=[], created_at=2026-02-23T08:22:23Z, description=, dns_domain=, id=91fdc6a7-b901-4255-83f7-4b37365658a3, ipv4_address_scope=None, ipv6_address_scope=None, is_default=False, l2_adjacency=True, mtu=1350, name=public, port_security_enabled=True, project_id=37b8098efb0d4ecc90b451a2db0e966f, provider:network_type=flat, provider:physical_network=datacentre, provider:segmentation_id=None, qos_policy_id=None, revision_number=2, router:external=True, shared=False, standard_attr_id=30, status=ACTIVE, subnets=['21d77760-119f-4466-bc85-a0e9167487a9'], tags=[], tenant_id=37b8098efb0d4ecc90b451a2db0e966f, updated_at=2026-02-23T08:22:29Z, vlan_transparent=None, network_id=91fdc6a7-b901-4255-83f7-4b37365658a3, port_security_enabled=False, project_id=, qos_network_policy_id=None, qos_policy_id=None, resource_request=None, revision_number=1, security_groups=[], standard_attr_id=3949, status=DOWN, tags=[], tenant_id=, updated_at=2026-02-23T10:08:18Z on network 91fdc6a7-b901-4255-83f7-4b37365658a3[00m
Feb 23 10:08:19 np0005626466.localdomain neutron_dhcp_agent[263819]: 2026-02-23 10:08:19.378 263823 INFO neutron.agent.dhcp.agent [None req-3471c372-de65-4ed2-9926-182c141c8ab8 - - - - - -] DHCP configuration for ports {'4d914d76-d293-4350-af7f-4f32c6566ccf'} is completed[00m

● edpm_neutron_sriov_agent.service - neutron_sriov_agent container
     Loaded: loaded (/etc/systemd/system/edpm_neutron_sriov_agent.service; enabled; preset: disabled)
     Active: active (running) since Mon 2026-02-23 09:34:19 UTC; 36min ago
   Main PID: 256425 (conmon)
         IO: 0B read, 20.0K written
      Tasks: 1 (limit: 100220)
     Memory: 636.0K
        CPU: 190ms
     CGroup: /system.slice/edpm_neutron_sriov_agent.service
             └─256425 /usr/bin/conmon --api-version 1 -c 3648631d61f37afb17aa928ef74b239e7715b60e5cab331e4a494567755448eb -u 3648631d61f37afb17aa928ef74b239e7715b60e5cab331e4a494567755448eb -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/3648631d61f37afb17aa928ef74b239e7715b60e5cab331e4a494567755448eb/userdata -p /run/containers/storage/overlay-containers/3648631d61f37afb17aa928ef74b239e7715b60e5cab331e4a494567755448eb/userdata/pidfile -n neutron_sriov_agent --exit-dir /run/libpod/exits --full-attach -s -l journald --log-level warning --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/3648631d61f37afb17aa928ef74b239e7715b60e5cab331e4a494567755448eb/userdata/oci-log --conmon-pidfile /run/neutron_sriov_agent.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --transient-store=false --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg 3648631d61f37afb17aa928ef74b239e7715b60e5cab331e4a494567755448eb

Feb 23 10:03:05 np0005626466.localdomain neutron_sriov_agent[256425]: 2026-02-23 10:03:05.920 2 INFO neutron.agent.securitygroups_rpc [None req-8d452d14-dd60-417e-87bd-a7a2372759de a9be4932f1a84a8293065e9227797a47 d45d0b9da54741348d1d12c73041586e - - default default] Security group rule updated ['33c4ddfa-59ae-40a7-8c2f-cf1ffe09eb9f'][00m
Feb 23 10:03:06 np0005626466.localdomain neutron_sriov_agent[256425]: 2026-02-23 10:03:06.750 2 INFO neutron.agent.securitygroups_rpc [None req-43388687-9a00-4563-af08-790772899ea4 a9be4932f1a84a8293065e9227797a47 d45d0b9da54741348d1d12c73041586e - - default default] Security group rule updated ['e9d6e743-53d6-4e9c-950f-ebadc1a82c0f'][00m
Feb 23 10:03:06 np0005626466.localdomain neutron_sriov_agent[256425]: 2026-02-23 10:03:06.868 2 INFO neutron.agent.securitygroups_rpc [None req-8bc11e13-0374-4d0f-a420-dc9bed7775c4 a9be4932f1a84a8293065e9227797a47 d45d0b9da54741348d1d12c73041586e - - default default] Security group rule updated ['e9d6e743-53d6-4e9c-950f-ebadc1a82c0f'][00m
Feb 23 10:03:07 np0005626466.localdomain neutron_sriov_agent[256425]: 2026-02-23 10:03:07.694 2 INFO neutron.agent.securitygroups_rpc [None req-7032d099-2d57-4911-980c-bd2a477e3e37 a9be4932f1a84a8293065e9227797a47 d45d0b9da54741348d1d12c73041586e - - default default] Security group rule updated ['b2b511b6-235a-4475-b039-adf8e8bf337f'][00m
Feb 23 10:03:07 np0005626466.localdomain neutron_sriov_agent[256425]: 2026-02-23 10:03:07.904 2 INFO neutron.agent.securitygroups_rpc [None req-09a1ef76-51b7-437b-b37e-bb449ab05579 a9be4932f1a84a8293065e9227797a47 d45d0b9da54741348d1d12c73041586e - - default default] Security group rule updated ['b2b511b6-235a-4475-b039-adf8e8bf337f'][00m
Feb 23 10:03:08 np0005626466.localdomain neutron_sriov_agent[256425]: 2026-02-23 10:03:08.080 2 INFO neutron.agent.securitygroups_rpc [None req-6957b0df-fd0d-456d-b289-5c8ae6b2d3ab a9be4932f1a84a8293065e9227797a47 d45d0b9da54741348d1d12c73041586e - - default default] Security group rule updated ['b2b511b6-235a-4475-b039-adf8e8bf337f'][00m
Feb 23 10:03:08 np0005626466.localdomain neutron_sriov_agent[256425]: 2026-02-23 10:03:08.358 2 INFO neutron.agent.securitygroups_rpc [None req-2e64ea36-7ced-48d5-9b78-d6c2b3b8afa9 a9be4932f1a84a8293065e9227797a47 d45d0b9da54741348d1d12c73041586e - - default default] Security group rule updated ['b2b511b6-235a-4475-b039-adf8e8bf337f'][00m
Feb 23 10:03:08 np0005626466.localdomain neutron_sriov_agent[256425]: 2026-02-23 10:03:08.645 2 INFO neutron.agent.securitygroups_rpc [None req-ac68416c-3f8b-4a61-93f4-c722d3303f27 a9be4932f1a84a8293065e9227797a47 d45d0b9da54741348d1d12c73041586e - - default default] Security group rule updated ['b2b511b6-235a-4475-b039-adf8e8bf337f'][00m
Feb 23 10:03:08 np0005626466.localdomain neutron_sriov_agent[256425]: 2026-02-23 10:03:08.908 2 INFO neutron.agent.securitygroups_rpc [None req-b0017ab9-a450-4286-8fae-6ccd40fd4966 a9be4932f1a84a8293065e9227797a47 d45d0b9da54741348d1d12c73041586e - - default default] Security group rule updated ['b2b511b6-235a-4475-b039-adf8e8bf337f'][00m
Feb 23 10:03:09 np0005626466.localdomain neutron_sriov_agent[256425]: 2026-02-23 10:03:09.746 2 INFO neutron.agent.securitygroups_rpc [None req-b5ba1047-11d6-4902-ae19-bf3c18cfb931 a9be4932f1a84a8293065e9227797a47 d45d0b9da54741348d1d12c73041586e - - default default] Security group rule updated ['9ad178d0-3a41-40dd-be58-0e7ebb53d59d'][00m

● edpm_node_exporter.service - node_exporter container
     Loaded: loaded (/etc/systemd/system/edpm_node_exporter.service; enabled; preset: disabled)
     Active: active (running) since Mon 2026-02-23 09:29:13 UTC; 41min ago
   Main PID: 238753 (conmon)
         IO: 0B read, 24.0K written
      Tasks: 1 (limit: 100220)
     Memory: 1.2M
        CPU: 173ms
     CGroup: /system.slice/edpm_node_exporter.service
             └─238753 /usr/bin/conmon --api-version 1 -c cd17978adeb2b0468483202f98a4facd0357edbe019deb13313adab6b6b71f52 -u cd17978adeb2b0468483202f98a4facd0357edbe019deb13313adab6b6b71f52 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/cd17978adeb2b0468483202f98a4facd0357edbe019deb13313adab6b6b71f52/userdata -p /run/containers/storage/overlay-containers/cd17978adeb2b0468483202f98a4facd0357edbe019deb13313adab6b6b71f52/userdata/pidfile -n node_exporter --exit-dir /run/libpod/exits --full-attach -s -l journald --log-level warning --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/cd17978adeb2b0468483202f98a4facd0357edbe019deb13313adab6b6b71f52/userdata/oci-log --conmon-pidfile /run/node_exporter.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --transient-store=false --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg cd17978adeb2b0468483202f98a4facd0357edbe019deb13313adab6b6b71f52

Feb 23 09:29:13 np0005626466.localdomain node_exporter[238753]: ts=2026-02-23T09:29:13.320Z caller=node_exporter.go:117 level=info collector=tapestats
Feb 23 09:29:13 np0005626466.localdomain node_exporter[238753]: ts=2026-02-23T09:29:13.320Z caller=node_exporter.go:117 level=info collector=udp_queues
Feb 23 09:29:13 np0005626466.localdomain node_exporter[238753]: ts=2026-02-23T09:29:13.320Z caller=node_exporter.go:117 level=info collector=vmstat
Feb 23 09:29:13 np0005626466.localdomain node_exporter[238753]: ts=2026-02-23T09:29:13.320Z caller=node_exporter.go:117 level=info collector=xfs
Feb 23 09:29:13 np0005626466.localdomain node_exporter[238753]: ts=2026-02-23T09:29:13.320Z caller=node_exporter.go:117 level=info collector=zfs
Feb 23 09:29:13 np0005626466.localdomain node_exporter[238753]: ts=2026-02-23T09:29:13.321Z caller=tls_config.go:232 level=info msg="Listening on" address=[::]:9100
Feb 23 09:29:13 np0005626466.localdomain node_exporter[238753]: ts=2026-02-23T09:29:13.321Z caller=tls_config.go:235 level=info msg="TLS is disabled." http2=false address=[::]:9100
Feb 23 09:29:13 np0005626466.localdomain podman[238738]: 2026-02-23 09:29:13.335098481 +0000 UTC m=+0.177969085 container start cd17978adeb2b0468483202f98a4facd0357edbe019deb13313adab6b6b71f52 (image=quay.io/prometheus/node-exporter@sha256:39c642b2b337e38c18e80266fb14383754178202f40103646337722a594d984c, name=node_exporter, config_data={'command': ['--web.disable-exporter-metrics', '--collector.systemd', '--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service', '--no-collector.dmi', '--no-collector.entropy', '--no-collector.thermal_zone', '--no-collector.time', '--no-collector.timex', '--no-collector.uname', '--no-collector.stat', '--no-collector.hwmon', '--no-collector.os', '--no-collector.selinux', '--no-collector.textfile', '--no-collector.powersupplyclass', '--no-collector.pressure', '--no-collector.rapl', '--path.rootfs=/rootfs'], 'environment': {'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/node_exporter', 'test': '/openstack/healthcheck node_exporter'}, 'image': 'quay.io/prometheus/node-exporter@sha256:39c642b2b337e38c18e80266fb14383754178202f40103646337722a594d984c', 'net': 'host', 'ports': ['9100:9100'], 'privileged': True, 'recreate': True, 'restart': 'always', 'user': 'root', 'volumes': ['/var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:rw', '/:/rootfs:ro', '/var/lib/openstack/healthchecks/node_exporter:/openstack:ro,z']}, config_id=node_exporter, container_name=node_exporter, maintainer=The Prometheus Authors <prometheus-developers@googlegroups.com>, managed_by=edpm_ansible)
Feb 23 09:29:13 np0005626466.localdomain podman[238738]: node_exporter
Feb 23 09:29:13 np0005626466.localdomain systemd[1]: Started node_exporter container.

● edpm_nova_compute.service - nova_compute container
     Loaded: loaded (/etc/systemd/system/edpm_nova_compute.service; enabled; preset: disabled)
     Active: active (running) since Mon 2026-02-23 09:39:05 UTC; 31min ago
   Main PID: 280690 (conmon)
         IO: 0B read, 24.0K written
      Tasks: 1 (limit: 100220)
     Memory: 664.0K
        CPU: 561ms
     CGroup: /system.slice/edpm_nova_compute.service
             └─280690 /usr/bin/conmon --api-version 1 -c 2812510312ec1c5f807144229711d7908bd35d002349ffc7e5627b311d4edf79 -u 2812510312ec1c5f807144229711d7908bd35d002349ffc7e5627b311d4edf79 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/2812510312ec1c5f807144229711d7908bd35d002349ffc7e5627b311d4edf79/userdata -p /run/containers/storage/overlay-containers/2812510312ec1c5f807144229711d7908bd35d002349ffc7e5627b311d4edf79/userdata/pidfile -n nova_compute --exit-dir /run/libpod/exits --full-attach -s -l journald --log-level warning --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/2812510312ec1c5f807144229711d7908bd35d002349ffc7e5627b311d4edf79/userdata/oci-log --conmon-pidfile /run/nova_compute.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --transient-store=false --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg 2812510312ec1c5f807144229711d7908bd35d002349ffc7e5627b311d4edf79

Feb 23 10:10:51 np0005626466.localdomain nova_compute[280690]: 2026-02-23 10:10:51.646 280697 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] tcp:127.0.0.1:6640: entering IDLE _transition /usr/lib64/python3.9/site-packages/ovs/reconnect.py:519[00m
Feb 23 10:10:51 np0005626466.localdomain nova_compute[280690]: 2026-02-23 10:10:51.676 280697 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 26 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263[00m
Feb 23 10:10:51 np0005626466.localdomain nova_compute[280690]: 2026-02-23 10:10:51.677 280697 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] tcp:127.0.0.1:6640: entering ACTIVE _transition /usr/lib64/python3.9/site-packages/ovs/reconnect.py:519[00m
Feb 23 10:10:56 np0005626466.localdomain nova_compute[280690]: 2026-02-23 10:10:56.678 280697 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] 4997-ms timeout __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:248[00m
Feb 23 10:11:01 np0005626466.localdomain nova_compute[280690]: 2026-02-23 10:11:01.680 280697 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] 4997-ms timeout __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:248[00m
Feb 23 10:11:01 np0005626466.localdomain nova_compute[280690]: 2026-02-23 10:11:01.682 280697 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 26 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263[00m
Feb 23 10:11:01 np0005626466.localdomain nova_compute[280690]: 2026-02-23 10:11:01.682 280697 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] tcp:127.0.0.1:6640: idle 5002 ms, sending inactivity probe run /usr/lib64/python3.9/site-packages/ovs/reconnect.py:117[00m
Feb 23 10:11:01 np0005626466.localdomain nova_compute[280690]: 2026-02-23 10:11:01.682 280697 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] tcp:127.0.0.1:6640: entering IDLE _transition /usr/lib64/python3.9/site-packages/ovs/reconnect.py:519[00m
Feb 23 10:11:01 np0005626466.localdomain nova_compute[280690]: 2026-02-23 10:11:01.682 280697 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] tcp:127.0.0.1:6640: entering ACTIVE _transition /usr/lib64/python3.9/site-packages/ovs/reconnect.py:519[00m
Feb 23 10:11:01 np0005626466.localdomain nova_compute[280690]: 2026-02-23 10:11:01.684 280697 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 26 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263[00m

● edpm_openstack_network_exporter.service - openstack_network_exporter container
     Loaded: loaded (/etc/systemd/system/edpm_openstack_network_exporter.service; enabled; preset: disabled)
     Active: active (running) since Mon 2026-02-23 09:30:20 UTC; 40min ago
   Main PID: 243610 (conmon)
         IO: 0B read, 24.0K written
      Tasks: 1 (limit: 100220)
     Memory: 1.2M
        CPU: 237ms
     CGroup: /system.slice/edpm_openstack_network_exporter.service
             └─243610 /usr/bin/conmon --api-version 1 -c 6a5bdcbb336925d11e98e079f4b8b7828b30e368d9b142bcbf78246faf17115e -u 6a5bdcbb336925d11e98e079f4b8b7828b30e368d9b142bcbf78246faf17115e -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/6a5bdcbb336925d11e98e079f4b8b7828b30e368d9b142bcbf78246faf17115e/userdata -p /run/containers/storage/overlay-containers/6a5bdcbb336925d11e98e079f4b8b7828b30e368d9b142bcbf78246faf17115e/userdata/pidfile -n openstack_network_exporter --exit-dir /run/libpod/exits --full-attach -s -l journald --log-level warning --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/6a5bdcbb336925d11e98e079f4b8b7828b30e368d9b142bcbf78246faf17115e/userdata/oci-log --conmon-pidfile /run/openstack_network_exporter.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --transient-store=false --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg 6a5bdcbb336925d11e98e079f4b8b7828b30e368d9b142bcbf78246faf17115e

Feb 23 10:09:59 np0005626466.localdomain openstack_network_exporter[243610]: ERROR   10:09:59 appctl.go:174: call(dpif-netdev/pmd-perf-show): please specify an existing datapath
Feb 23 10:09:59 np0005626466.localdomain openstack_network_exporter[243610]: 
Feb 23 10:10:29 np0005626466.localdomain openstack_network_exporter[243610]: ERROR   10:10:29 appctl.go:174: call(dpif-netdev/pmd-perf-show): please specify an existing datapath
Feb 23 10:10:29 np0005626466.localdomain openstack_network_exporter[243610]: 
Feb 23 10:10:29 np0005626466.localdomain openstack_network_exporter[243610]: ERROR   10:10:29 appctl.go:174: call(dpif-netdev/pmd-rxq-show): please specify an existing datapath
Feb 23 10:10:29 np0005626466.localdomain openstack_network_exporter[243610]: 
Feb 23 10:10:59 np0005626466.localdomain openstack_network_exporter[243610]: ERROR   10:10:59 appctl.go:174: call(dpif-netdev/pmd-rxq-show): please specify an existing datapath
Feb 23 10:10:59 np0005626466.localdomain openstack_network_exporter[243610]: 
Feb 23 10:10:59 np0005626466.localdomain openstack_network_exporter[243610]: ERROR   10:10:59 appctl.go:174: call(dpif-netdev/pmd-perf-show): please specify an existing datapath
Feb 23 10:10:59 np0005626466.localdomain openstack_network_exporter[243610]: 

● edpm_ovn_controller.service - ovn_controller container
     Loaded: loaded (/etc/systemd/system/edpm_ovn_controller.service; enabled; preset: disabled)
     Active: active (running) since Mon 2026-02-23 09:18:39 UTC; 52min ago
   Main PID: 156066 (conmon)
         IO: 0B read, 24.0K written
      Tasks: 1 (limit: 100220)
     Memory: 1.2M
        CPU: 375ms
     CGroup: /system.slice/edpm_ovn_controller.service
             └─156066 /usr/bin/conmon --api-version 1 -c 122635f0e8d39054eca80d2e540d00d16b4a0e4979d56e5a2e398526a74c57c4 -u 122635f0e8d39054eca80d2e540d00d16b4a0e4979d56e5a2e398526a74c57c4 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/122635f0e8d39054eca80d2e540d00d16b4a0e4979d56e5a2e398526a74c57c4/userdata -p /run/containers/storage/overlay-containers/122635f0e8d39054eca80d2e540d00d16b4a0e4979d56e5a2e398526a74c57c4/userdata/pidfile -n ovn_controller --exit-dir /run/libpod/exits --full-attach -s -l journald --log-level warning --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/122635f0e8d39054eca80d2e540d00d16b4a0e4979d56e5a2e398526a74c57c4/userdata/oci-log --conmon-pidfile /run/ovn_controller.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --transient-store=false --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg 122635f0e8d39054eca80d2e540d00d16b4a0e4979d56e5a2e398526a74c57c4

Feb 23 10:02:41 np0005626466.localdomain ovn_controller[156066]: 2026-02-23T10:02:41Z|00297|binding|INFO|Claiming lport 959298c0-2816-4160-b25b-a3decab2cf50 for this chassis.
Feb 23 10:02:41 np0005626466.localdomain ovn_controller[156066]: 2026-02-23T10:02:41Z|00298|binding|INFO|959298c0-2816-4160-b25b-a3decab2cf50: Claiming unknown
Feb 23 10:02:41 np0005626466.localdomain ovn_controller[156066]: 2026-02-23T10:02:41Z|00299|binding|INFO|Setting lport 959298c0-2816-4160-b25b-a3decab2cf50 ovn-installed in OVS
Feb 23 10:02:41 np0005626466.localdomain ovn_controller[156066]: 2026-02-23T10:02:41Z|00300|binding|INFO|Setting lport 959298c0-2816-4160-b25b-a3decab2cf50 up in Southbound
Feb 23 10:02:46 np0005626466.localdomain ovn_controller[156066]: 2026-02-23T10:02:46Z|00301|binding|INFO|Releasing lport 959298c0-2816-4160-b25b-a3decab2cf50 from this chassis (sb_readonly=0)
Feb 23 10:02:46 np0005626466.localdomain ovn_controller[156066]: 2026-02-23T10:02:46Z|00302|binding|INFO|Setting lport 959298c0-2816-4160-b25b-a3decab2cf50 down in Southbound
Feb 23 10:03:43 np0005626466.localdomain ovn_controller[156066]: 2026-02-23T10:03:43Z|00303|memory_trim|INFO|Detected inactivity (last active 30001 ms ago): trimming memory
Feb 23 10:04:54 np0005626466.localdomain ovn_controller[156066]: 2026-02-23T10:04:54Z|00304|memory_trim|INFO|Detected inactivity (last active 30001 ms ago): trimming memory
Feb 23 10:06:14 np0005626466.localdomain ovn_controller[156066]: 2026-02-23T10:06:14Z|00305|memory_trim|INFO|Detected inactivity (last active 30004 ms ago): trimming memory
Feb 23 10:08:59 np0005626466.localdomain ovn_controller[156066]: 2026-02-23T10:08:59Z|00306|memory_trim|INFO|Detected inactivity (last active 30005 ms ago): trimming memory

● edpm_ovn_metadata_agent.service - ovn_metadata_agent container
     Loaded: loaded (/etc/systemd/system/edpm_ovn_metadata_agent.service; enabled; preset: disabled)
     Active: active (running) since Mon 2026-02-23 09:19:49 UTC; 51min ago
   Main PID: 161939 (conmon)
         IO: 0B read, 24.0K written
      Tasks: 1 (limit: 100220)
     Memory: 1.2M
        CPU: 455ms
     CGroup: /system.slice/edpm_ovn_metadata_agent.service
             └─161939 /usr/bin/conmon --api-version 1 -c 8ca16bfb2f59ef8fc3fb8572c3270f7856db7f62fc874baed2fd5084beb7f01e -u 8ca16bfb2f59ef8fc3fb8572c3270f7856db7f62fc874baed2fd5084beb7f01e -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/8ca16bfb2f59ef8fc3fb8572c3270f7856db7f62fc874baed2fd5084beb7f01e/userdata -p /run/containers/storage/overlay-containers/8ca16bfb2f59ef8fc3fb8572c3270f7856db7f62fc874baed2fd5084beb7f01e/userdata/pidfile -n ovn_metadata_agent --exit-dir /run/libpod/exits --full-attach -s -l journald --log-level warning --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/8ca16bfb2f59ef8fc3fb8572c3270f7856db7f62fc874baed2fd5084beb7f01e/userdata/oci-log --conmon-pidfile /run/ovn_metadata_agent.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --transient-store=false --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg 8ca16bfb2f59ef8fc3fb8572c3270f7856db7f62fc874baed2fd5084beb7f01e

Feb 23 10:08:12 np0005626466.localdomain ovn_metadata_agent[161939]: 2026-02-23 10:08:12.733 161944 DEBUG ovsdbapp.backend.ovs_idl.transaction [-] Running txn n=1 command(idx=0): DbSetCommand(_result=None, table=Chassis_Private, record=0983c4cd-1476-49af-89e0-3187e18b9de6, col_values=(('external_ids', {'neutron:ovn-metadata-sb-cfg': '27'}),), if_exists=True) do_commit /usr/lib/python3.9/site-packages/ovsdbapp/backend/ovs_idl/transaction.py:89[00m
Feb 23 10:08:50 np0005626466.localdomain ovn_metadata_agent[161939]: 2026-02-23 10:08:50.132 161944 DEBUG oslo_concurrency.lockutils [-] Acquiring lock "_check_child_processes" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404[00m
Feb 23 10:08:50 np0005626466.localdomain ovn_metadata_agent[161939]: 2026-02-23 10:08:50.133 161944 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" acquired by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409[00m
Feb 23 10:08:50 np0005626466.localdomain ovn_metadata_agent[161939]: 2026-02-23 10:08:50.133 161944 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" "released" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: held 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423[00m
Feb 23 10:09:50 np0005626466.localdomain ovn_metadata_agent[161939]: 2026-02-23 10:09:50.133 161944 DEBUG oslo_concurrency.lockutils [-] Acquiring lock "_check_child_processes" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404[00m
Feb 23 10:09:50 np0005626466.localdomain ovn_metadata_agent[161939]: 2026-02-23 10:09:50.134 161944 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" acquired by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409[00m
Feb 23 10:09:50 np0005626466.localdomain ovn_metadata_agent[161939]: 2026-02-23 10:09:50.134 161944 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" "released" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: held 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423[00m
Feb 23 10:10:50 np0005626466.localdomain ovn_metadata_agent[161939]: 2026-02-23 10:10:50.134 161944 DEBUG oslo_concurrency.lockutils [-] Acquiring lock "_check_child_processes" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404[00m
Feb 23 10:10:50 np0005626466.localdomain ovn_metadata_agent[161939]: 2026-02-23 10:10:50.135 161944 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" acquired by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: waited 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409[00m
Feb 23 10:10:50 np0005626466.localdomain ovn_metadata_agent[161939]: 2026-02-23 10:10:50.135 161944 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" "released" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: held 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423[00m

● edpm_podman_exporter.service - podman_exporter container
     Loaded: loaded (/etc/systemd/system/edpm_podman_exporter.service; enabled; preset: disabled)
     Active: active (running) since Mon 2026-02-23 09:29:34 UTC; 41min ago
   Main PID: 241164 (conmon)
         IO: 0B read, 24.0K written
      Tasks: 1 (limit: 100220)
     Memory: 1.2M
        CPU: 181ms
     CGroup: /system.slice/edpm_podman_exporter.service
             └─241164 /usr/bin/conmon --api-version 1 -c c0dd95da0b8090e1dfefd899739a6f9a1e0b730eec9db8d0d0094fa6bfc519c8 -u c0dd95da0b8090e1dfefd899739a6f9a1e0b730eec9db8d0d0094fa6bfc519c8 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/c0dd95da0b8090e1dfefd899739a6f9a1e0b730eec9db8d0d0094fa6bfc519c8/userdata -p /run/containers/storage/overlay-containers/c0dd95da0b8090e1dfefd899739a6f9a1e0b730eec9db8d0d0094fa6bfc519c8/userdata/pidfile -n podman_exporter --exit-dir /run/libpod/exits --full-attach -s -l journald --log-level warning --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/c0dd95da0b8090e1dfefd899739a6f9a1e0b730eec9db8d0d0094fa6bfc519c8/userdata/oci-log --conmon-pidfile /run/podman_exporter.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --transient-store=false --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg c0dd95da0b8090e1dfefd899739a6f9a1e0b730eec9db8d0d0094fa6bfc519c8

Feb 23 09:29:34 np0005626466.localdomain podman_exporter[241164]: ts=2026-02-23T09:29:34.968Z caller=exporter.go:68 level=info msg="Starting podman-prometheus-exporter" version="(version=1.10.1, branch=HEAD, revision=1)"
Feb 23 09:29:34 np0005626466.localdomain podman_exporter[241164]: ts=2026-02-23T09:29:34.968Z caller=exporter.go:69 level=info msg=metrics enhanced=false
Feb 23 09:29:34 np0005626466.localdomain podman_exporter[241164]: ts=2026-02-23T09:29:34.968Z caller=handler.go:94 level=info msg="enabled collectors"
Feb 23 09:29:34 np0005626466.localdomain podman_exporter[241164]: ts=2026-02-23T09:29:34.968Z caller=handler.go:105 level=info collector=container
Feb 23 09:29:34 np0005626466.localdomain podman[241150]: 2026-02-23 09:29:34.979285252 +0000 UTC m=+0.195247118 container start c0dd95da0b8090e1dfefd899739a6f9a1e0b730eec9db8d0d0094fa6bfc519c8 (image=quay.io/navidys/prometheus-podman-exporter@sha256:d339ba049bbd1adccb795962bf163f5b22fd84dea865d88b9eb525e46247d6bd, name=podman_exporter, config_id=podman_exporter, container_name=podman_exporter, maintainer=Navid Yaghoobi <navidys@fedoraproject.org>, managed_by=edpm_ansible, config_data={'environment': {'CONTAINER_HOST': 'unix:///run/podman/podman.sock', 'OS_ENDPOINT_TYPE': 'internal'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/podman_exporter', 'test': '/openstack/healthcheck podman_exporter'}, 'image': 'quay.io/navidys/prometheus-podman-exporter@sha256:d339ba049bbd1adccb795962bf163f5b22fd84dea865d88b9eb525e46247d6bd', 'net': 'host', 'ports': ['9882:9882'], 'privileged': True, 'recreate': True, 'restart': 'always', 'user': 'root', 'volumes': ['/run/podman/podman.sock:/run/podman/podman.sock:rw,z', '/var/lib/openstack/healthchecks/podman_exporter:/openstack:ro,z']})
Feb 23 09:29:34 np0005626466.Unit fcoe.service could not be found.
localdomain podman[241150]: podman_exporter
Feb 23 09:29:34 np0005626466.localdomain systemd[1]: Started podman_exporter container.
Feb 23 09:33:55 np0005626466.localdomain podman_exporter[241164]: ts=2026-02-23T09:33:55.083Z caller=exporter.go:96 level=info msg="Listening on" address=:9882
Feb 23 09:33:55 np0005626466.localdomain podman_exporter[241164]: ts=2026-02-23T09:33:55.083Z caller=tls_config.go:313 level=info msg="Listening on" address=[::]:9882
Feb 23 09:33:55 np0005626466.localdomain podman_exporter[241164]: ts=2026-02-23T09:33:55.083Z caller=tls_config.go:316 level=info msg="TLS is disabled." http2=false address=[::]:9882

○ emergency.service - Emergency Shell
     Loaded: loaded (/usr/lib/systemd/system/emergency.service; static)
     Active: inactive (dead)
       Docs: man:sulogin(8)

○ fe7013c5595cf36029c5acd08a92109de37036b8a083179769dbc4e27b30aaf9.service - /usr/bin/podman healthcheck run fe7013c5595cf36029c5acd08a92109de37036b8a083179769dbc4e27b30aaf9
     Loaded: loaded (/run/systemd/transient/fe7013c5595cf36029c5acd08a92109de37036b8a083179769dbc4e27b30aaf9.service; transient)
  Transient: yes
     Active: inactive (dead) since Mon 2026-02-23 10:10:56 UTC; 7s ago
   Duration: 178ms
TriggeredBy: ● fe7013c5595cf36029c5acd08a92109de37036b8a083179769dbc4e27b30aaf9.timer
    Process: 339082 ExecStart=/usr/bin/podman healthcheck run fe7013c5595cf36029c5acd08a92109de37036b8a083179769dbc4e27b30aaf9 (code=exited, status=0/SUCCESS)
   Main PID: 339082 (code=exited, status=0/SUCCESS)
        CPU: 126ms

Feb 23 10:10:56 np0005626466.localdomain systemd[1]: Started /usr/bin/podman healthcheck run fe7013c5595cf36029c5acd08a92109de37036b8a083179769dbc4e27b30aaf9.
Feb 23 10:10:56 np0005626466.localdomain podman[339082]: 2026-02-23 10:10:56.378079333 +0000 UTC m=+0.108041997 container health_status fe7013c5595cf36029c5acd08a92109de37036b8a083179769dbc4e27b30aaf9 (image=quay.io/podified-antelope-centos9/openstack-ceilometer-compute:current-podified, name=ceilometer_agent_compute, health_status=healthy, org.label-schema.build-date=20260216, org.label-schema.license=GPLv2, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, tcib_build_tag=8419493e1fd846703d277695e03fc5eb, config_id=ceilometer_agent_compute, io.buildah.version=1.43.0, tcib_managed=true, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, config_data={'command': 'kolla_start', 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal', 'EDPM_CONFIG_HASH': 'a07a4436e1d1ca1c6231f11309616a78b1ad9830450b5c2d2fc3fb113cfbf838-fc7962317d10396c157604cb31f0de59b77d21ca5133156f39a4831f188d031c'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute', 'test': '/openstack/healthcheck compute'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ceilometer-compute:current-podified', 'net': 'host', 'restart': 'always', 'security_opt': 'label:type:ceilometer_polling_t', 'user': 'ceilometer', 'volumes': ['/var/lib/openstack/telemetry:/var/lib/kolla/config_files/src:z', '/var/lib/kolla/config_files/ceilometer_agent_compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']}, container_name=ceilometer_agent_compute)
Feb 23 10:10:56 np0005626466.localdomain podman[339082]: 2026-02-23 10:10:56.421153261 +0000 UTC m=+0.151115935 container exec_died fe7013c5595cf36029c5acd08a92109de37036b8a083179769dbc4e27b30aaf9 (image=quay.io/podified-antelope-centos9/openstack-ceilometer-compute:current-podified, name=ceilometer_agent_compute, config_data={'command': 'kolla_start', Unit hv_kvp_daemon.service could not be found.
'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'OS_ENDPOINT_TYPE': 'internal', 'EDPM_CONFIG_HASH': 'a07a4436e1d1ca1c6231f11309616a78b1ad9830450b5c2d2fc3fb113cfbf838-fc7962317d10396c157604cb31f0de59b77d21ca5133156f39a4831f188d031c'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ceilometer_agent_compute', 'test': '/openstack/healthcheck compute'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ceilometer-compute:current-podified', 'net': 'host', 'restart': 'always', 'security_opt': 'label:type:ceilometer_polling_t', 'user': 'ceilometer', 'volumes': ['/var/lib/openstack/telemetry:/var/lib/kolla/config_files/src:z', '/var/lib/kolla/config_files/ceilometer_agent_compute.json:/var/lib/kolla/config_files/config.json:z', '/run/libvirt:/run/libvirt:shared,ro', '/etc/hosts:/etc/hosts:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro', '/var/lib/openstack/cacerts/telemetry/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/dev/log:/dev/log', '/var/lib/openstack/healthchecks/ceilometer_agent_compute:/openstack:ro,z']}, config_id=ceilometer_agent_compute, container_name=ceilometer_agent_compute, io.buildah.version=1.43.0, org.label-schema.build-date=20260216, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, tcib_build_tag=8419493e1fd846703d277695e03fc5eb, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible, tcib_managed=true, org.label-schema.license=GPLv2)
Feb 23 10:10:56 np0005626466.localdomain systemd[1]: fe7013c5595cf36029c5acd08a92109de37036b8a083179769dbc4e27b30aaf9.service: Deactivated successfully.

● getty@tty1.service - Getty on tty1
     Loaded: loaded (/usr/lib/systemd/system/getty@.service; enabled; preset: enabled)
     Active: active (running) since Mon 2026-02-23 06:36:09 UTC; 3h 34min ago
       Docs: man:agetty(8)
             man:systemd-getty-generator(8)
             http://0pointer.de/blog/projects/serial-console.html
   Main PID: 1140 (agetty)
         IO: 0B read, 0B written
      Tasks: 1 (limit: 100220)
     Memory: 212.0K
        CPU: 7ms
     CGroup: /system.slice/system-getty.slice/getty@tty1.service
             └─1140 /sbin/agetty -o "-p -- \\u" --noclear - linux

Feb 23 06:36:09 np0005626466.novalocal systemd[1]: Started Getty on tty1.

● gssproxy.service - GSSAPI Proxy Daemon
     Loaded: loaded (/usr/lib/systemd/system/gssproxy.service; disabled; preset: disabled)
     Active: active (running) since Mon 2026-02-23 06:36:05 UTC; 3h 34min ago
   Main PID: 802 (gssproxy)
         IO: 460.0K read, 0B written
      Tasks: 6 (limit: 100220)
     Memory: 3.6M
        CPU: 26ms
     CGroup: /system.slice/gssproxy.service
             └─802 /usr/sbin/gssproxy -D

Feb 23 06:36:05 np0005626466.novalocal systemd[1]: Starting GSSAPI Proxy Daemon...
Feb 23 06:36:05 np0005626466.novalocal systemd[1]: Started GSSAPI Proxy Daemon.

○ import-state.service - Import network configuration from initramfs
     Loaded: loaded (/usr/lib/systemd/system/import-state.service; enabled; preset: enabled)
     Active: inactive (dead)

○ initrd-cleanup.service - Cleaning Up and Shutting Down Daemons
     Loaded: loaded (/usr/lib/systemd/system/initrd-cleanup.service; static)
     Active: inactive (dead) since Mon 2026-02-23 06:36:01 UTC; 3h 35min ago
   Main PID: 564 (code=exited, status=0/SUCCESS)
        CPU: 8ms

Feb 23 06:36:01 localhost systemd[1]: Starting Cleaning Up and Shutting Down Daemons...
Feb 23 06:36:01 localhost systemd[1]: initrd-cleanup.service: Deactivated successfully.
Feb 23 06:36:01 localhost systemd[1]: Finished Cleaning Up and Shutting Down Daemons.

○ initrd-parse-etc.service - Mountpoints Configured in the Real Root
     Loaded: loaded (/usr/lib/systemd/system/initrd-parse-etc.service; static)
     Active: inactive (dead) since Mon 2026-02-23 06:36:01 UTC; 3h 35min ago
   Main PID: 525 (code=exited, status=0/SUCCESS)
        CPU: 16ms

Feb 23 06:36:01 localhost systemd[1]: Starting Mountpoints Configured in the Real Root...
Feb 23 06:36:01 localhost systemd[1]: initrd-parse-etc.service: Deactivated successfully.
Feb 23 06:36:01 localhost systemd[1]: Finished Mountpoints Configured in the Real Root.

○ initrd-switch-root.service - Switch Root
     Loaded: loaded (/usr/lib/systemd/system/initrd-switch-root.service; static)
     Active: inactive (dead) since Mon 2026-02-23 06:36:02 UTC; 3h 35min ago
   Main PID: 566 (code=exited, status=0/SUCCESS)
        CPU: 7ms

Feb 23 06:36:01 localhost systemd[1]: Starting Switch Root...

○ initrd-udevadm-cleanup-db.service - Cleanup udev Database
     Loaded: loaded (/usr/lib/systemd/system/initrd-udevadm-cleanup-db.service; static)
     Active: inactive (dead) since Mon 2026-02-23 06:36:01 UTC; 3h 35min ago
   Main PID: 565 (code=exited, status=0/SUCCESS)
        CPU: 12ms

Feb 23 06:36:01 localhost systemd[1]: Starting Cleanup udev Database...
Feb 23 06:36:01 localhost systemd[1]: initrd-udevadm-cleanup-db.service: Deactivated successfully.
Feb 23 06:36:01 localhost systemd[1]: Finished Cleanup udev Database.

○ insights-client-boot.service - Run Insights Client at boot
     Loaded: loaded (/usr/lib/systemd/system/insights-client-boot.service; enabled; preset: enabled)
     Active: inactive (dead)
  Condition: start condition failed at Mon 2026-02-23 06:36:09 UTC; 3h 34min ago
       Docs: man:insights-client(8)

Feb 23 09:37:46 np0005626466.localdomain systemd[1]: /usr/lib/systemd/system/insights-client-boot.service:24: Unit uses MemoryLimit=; please use MemoryMax= instead. Support for MemoryLimit= will be removed soon.
Feb 23 09:42:44 np0005626466.localdomain systemd[1]: /usr/lib/systemd/system/insights-client-boot.service:24: Unit uses MemoryLimit=; please use MemoryMax= instead. Support for MemoryLimit= will be removed soon.
Feb 23 09:42:44 np0005626466.localdomain systemd[1]: /usr/lib/systemd/system/insights-client-boot.service:24: Unit uses MemoryLimit=; please use MemoryMax= instead. Support for MemoryLimit= will be removed soon.
Feb 23 09:44:16 np0005626466.localdomain systemd[1]: /usr/lib/systemd/system/insights-client-boot.service:24: Unit uses MemoryLimit=; please use MemoryMax= instead. Support for MemoryLimit= will be removed soon.
Feb 23 09:44:16 np0005626466.localdomain systemd[1]: /usr/lib/systemd/system/insights-client-boot.service:24: Unit uses MemoryLimit=; please use MemoryMax= instead. Support for MemoryLimit= will be removed soon.
Feb 23 09:44:30 np0005626466.localdomain systemd[1]: /usr/lib/systemd/system/insights-client-boot.service:24: Unit uses MemoryLimit=; please use MemoryMax= instead. Support for MemoryLimit= will be removed soon.
Feb 23 09:44:30 np0005626466.localdomain systemd[1]: /usr/lib/systemd/system/insights-client-boot.service:24: Unit uses MemoryLimit=; please use MemoryMax= instead. Support for MemoryLimit= will be removed soon.
Feb 23 09:49:12 np0005626466.localdomain systemd[1]: /usr/lib/systemd/system/insights-client-boot.service:24: Unit uses MemoryLimit=; please use MemoryMax= instead. Support for MemoryLimit= will be removed soon.
Feb 23 09:49:25 np0005626466.localdomain systemd[1]: /usr/lib/systemd/system/insights-client-boot.service:24: Unit uses MemoryLimit=; please use MemoryMax= instead. Support for MemoryLimit= will be removed soon.
Feb 23 09:49:25 np0005626466.localdomain systemd[1]: /usr/lib/systemd/system/insights-client-boot.service:24: Unit uses MemoryLimit=; please use MemoryMax= instead. Support for MemoryLimit= will be removed soon.

○ ip6tables.service - IPv6 firewall with ip6tables
     Loaded: loaded (/usr/lib/systemd/system/ip6tables.service; disabled; preset: disabled)
     Active: inactive (dead)

○ iptables.service - IPv4 firewall with iptables
     Loaded: loaded (/usr/lib/systemd/system/iptables.service; disabled; preset: disabled)
     Active: inactive (dead)

● irqbalance.service - irqbalance daemon
     Loaded: loaded (/usr/lib/systemd/system/irqbalance.service; enabled; preset: enabled)
     Active: active (running) since Mon 2026-02-23 06:36:04 UTC; 3h 34min ago
       Docs: man:irqbalance(1)
             https://github.com/Irqbalance/irqbalance
   Main PID: 758 (irqbalance)
         IO: 712.0K read, 0B written
      Tasks: 2 (limit: 100220)
     Memory: 3.2M
        CPU: 654ms
     CGroup: /system.slice/irqbalance.service
             └─758 /usr/sbin/irqbalance --foreground

Feb 23 06:36:04 localhost systemd[1]: Started irqbalance daemon.

○ iscsi-init.service - One time configuration for iscsi.service
     Loaded: loaded (/usr/lib/systemd/system/iscsi-init.service; disabled; preset: disabled)
     Active: inactive (dead)
  Condition: start condition failed at Mon 2026-02-23 09:25:18 UTC; 45min ago

Feb 23 09:25:18 np0005626466.localdomain systemd[1]: One time configuration for iscsi.service was skipped because of an unmet condition check (ConditionPathExists=!/etc/iscsi/initiatorname.iscsi).

○ iscsi-onboot.service - Special handling of early boot iSCSI sessions
     Loaded: loaded (/usr/lib/systemd/system/iscsi-onboot.service; enabled; preset: enabled)
     Active: inactive (dead)
       Docs: man:iscsiadm(8)
             man:iscsid(8)

● iscsi-shutdown.service - Logout off all iSCSI sessions on shutdown
     Loaded: loaded (/usr/lib/systemd/system/iscsi-shutdown.service; static)
     Active: active (exited) since Mon 2026-02-23 09:25:18 UTC; 45min ago
       Docs: man:iscsid(8)
             man:iscsiadm(8)
   Main PID: 212771 (code=exited, status=0/SUCCESS)
        CPU: 2ms

Feb 23 09:25:18 np0005626466.localdomain systemd[1]: Starting Logout off all iSCSI sessions on shutdown...
Feb 23 09:25:18 np0005626466.localdomain systemd[1]: Finished Logout off all iSCSI sessions on shutdown.

○ iscsi-starter.service
     Loaded: loaded (/usr/lib/systemd/system/iscsi-starter.service; enabled; preset: disabled)
     Active: inactive (dead)

○ iscsi.service - Login and scanning of iSCSI devices
     Loaded: loaded (/usr/lib/systemd/system/iscsi.service; indirect; preset: enabled)
     Active: inactive (dead)
       Docs: man:iscsiadm(8)
             man:iscsid(8)

● iscsid.service - Open-iSCSI
     Loaded: loaded (/usr/lib/systemd/system/iscsid.service; enabled; preset: disabled)
     Active: active (running) since Mon 2026-02-23 09:25:18 UTC; 45min ago
TriggeredBy: ● iscsid.socket
       Docs: man:iscsid(8)
             man:iscsiuio(8)
             man:iscsiadm(8)
   Main PID: 212770 (iscsid)
     Status: "Ready to process requests"
         IO: 0B read, 0B written
      Tasks: 1 (limit: 100220)
     Memory: 1.8M
        CPU: 11ms
     CGroup: /system.slice/iscsid.service
             └─212770 /usr/sbin/iscsid -f

Feb 23 09:25:18 np0005626466.localdomain systemd[1]: Starting Open-iSCSI...
Feb 23 09:25:18 np0005626466.localdomain iscsid[212770]: iscsid: can't open InitiatorName configuration file /etc/iscsi/initiatorname.iscsi
Feb 23 09:25:18 np0005626466.localdomain iscsid[212770]: iscsid: Warning: InitiatorName file /etc/iscsi/initiatorname.iscsi does not exist or does not contain a properly formatted InitiatorName. If using software iscsi (iscsi_tcp or ib_iser) or partial offload (bnx2i or cxgbi iscsi), you may not be able to log into or discover targets. Please create a file /etc/iscsi/initiatorname.iscsi that contains a sting with the format: InitiatorName=iqn.yyyy-mm.<reversed domain name>[:identifier].
Feb 23 09:25:18 np0005626466.localdomain iscsid[212770]: Example: InitiatorName=iqn.2001-04.com.redhat:fc6.
Feb 23 09:25:18 np0005626466.localdomain iscsid[212770]: If using hardware iscsi like qla4xxx this message can be ignored.
Feb 23 09:25:18 np0005626466.localdomain iscsid[212770]: iscsid: can't open InitiatorAlias configuration file /etc/iscsi/initiatorname.iscsi
Feb 23 09:25:18 np0005626466.localdomain iscsid[212770]: iscsid: can't open iscsid.safe_logout configuration file /etc/iscsi/iscsid.conf
Feb 23 09:25:18 np0005626466.localdomain iscsid[212770]: iscsid: can't open iscsid.ipc_auth_uid configuration file /etc/iscsi/iscsid.conf
Feb 23 09:25:18 np0005626466.localdomain systemd[1]: Started Open-iSCSI.

○ iscsiuio.service - iSCSI UserSpace I/O driver
     Loaded: loaded (/usr/lib/systemd/system/iscsiuio.service; disabled; preset: disabled)
     Active: inactive (dead)
TriggeredBy: ○ iscsiuio.socket
       Docs: man:iscsiuio(8)

● kdump.service - Crash recovery kernel arming
     Loaded: loaded (/usr/lib/systemd/system/kdump.service; enabled; preset: enabled)
     Active: active (exited) since Mon 2026-02-23 06:36:18 UTC; 3h 34min ago
   Main PID: 1131 (code=exited, status=0/SUCCESS)
        CPU: 13.938s

Feb 23 06:36:16 np0005626466.novalocal dracut[1438]: Stored kernel commandline:
Feb 23 06:36:16 np0005626466.novalocal dracut[1438]: No dracut internal kernel commandline stored in the initramfs
Feb 23 06:36:16 np0005626466.novalocal dracut[1438]: *** Install squash loader ***
Feb 23 06:36:17 np0005626466.novalocal dracut[1438]: *** Squashing the files inside the initramfs ***
Feb 23 06:36:17 np0005626466.novalocal dracut[1438]: *** Squashing the files inside the initramfs done ***
Feb 23 06:36:17 np0005626466.novalocal dracut[1438]: *** Creating image file '/boot/initramfs-5.14.0-284.11.1.el9_2.x86_64kdump.img' ***
Feb 23 06:36:18 np0005626466.novalocal dracut[1438]: *** Creating initramfs image file '/boot/initramfs-5.14.0-284.11.1.el9_2.x86_64kdump.img' done ***
Feb 23 06:36:18 np0005626466.novalocal kdumpctl[1136]: kdump: kexec: loaded kdump kernel
Feb 23 06:36:18 np0005626466.novalocal kdumpctl[1136]: kdump: Starting kdump: [OK]
Feb 23 06:36:18 np0005626466.novalocal systemd[1]: Finished Crash recovery kernel arming.

● kmod-static-nodes.service - Create List of Static Device Nodes
     Loaded: loaded (/usr/lib/systemd/system/kmod-static-nodes.service; static)
     Active: active (exited) since Mon 2026-02-23 06:36:02 UTC; 3h 35min ago
   Main PID: 612 (code=exited, status=0/SUCCESS)
        CPU: 4ms

Feb 23 06:36:02 localhost systemd[1]: Finished Create List of Static Device Nodes.

● ldconfig.service - Rebuild Dynamic Linker Cache
     Loaded: loaded (/usr/lib/systemd/system/ldconfig.service; static)
     Active: active (exited) since Mon 2026-02-23 06:36:04 UTC; 3h 34min ago
       Docs: man:ldconfig(8)
   Main PID: 716 (code=exited, status=0/SUCCESS)
        CPU: 46ms

Feb 23 06:36:04 localhost systemd[1]: Starting Rebuild Dynamic Linker Cache...
Feb 23 06:36:04 localhost systemd[1]: Finished Rebuild Dynamic Linker Cache.

○ libvirtd.service
     Loaded: masked (Reason: Unit libvirtd.service is masked.)
     Active: inactive (dead)
TriggeredBy: ○ libvirtd-admin.socket
             ○ libvirtd-ro.socket
             ○ libvirtd.socket

Feb 23 09:22:32 np0005626466.localdomain systemd[1]: /usr/lib/systemd/system/libvirtd.service:29: Failed to parse service type, ignoring: notify-reload
Feb 23 09:22:32 np0005626466.localdomain systemd[1]: /usr/lib/systemd/system/libvirtd.service:29: Failed to parse service type, ignoring: notify-reload
Feb 23 09:22:32 np0005626466.localdomain systemd[1]: /usr/lib/systemd/system/libvirtd.service:29: Failed to parse service type, ignoring: notify-reload
Feb 23 09:22:32 np0005626466.localdomain systemd[1]: /usr/lib/systemd/system/libvirtd.service:29: Failed to parse service type, ignoring: notify-reload
Feb 23 09:22:33 np0005626466.localdomain systemd[1]: /usr/lib/systemd/system/libvirtd.service:29: Failed to parse service type, ignoring: notify-reload
Feb 23 09:22:33 np0005626466.localdomain systemd[1]: /usr/lib/systemd/system/libvirtd.service:29: Failed to parse service type, ignoring: notify-reload
Feb 23 09:22:33 np0005626466.localdomain systemd[1]: /usr/lib/systemd/system/libvirtd.service:29: Failed to parse service type, ignoring: notify-reload
Feb 23 09:22:33 np0005626466.localdomain systemd[1]: /usr/lib/systemd/system/libvirtd.service:29: Failed to parse service type, ignoring: notify-reload
Feb 23 09:22:33 np0005626466.localdomain systemd[1]: /usr/lib/systemd/system/libvirtd.service:29: Failed to parse service type, ignoring: notify-reload
Feb 23 09:22:34 np0005626466.localdomain systemd[1]: /usr/lib/systemd/system/libvirtd.service:29: FailedUnit lvm2-activation-early.service could not be found.
 to parse service type, ignoring: notify-reload

○ loadmodules.service - Load legacy module configuration
     Loaded: loaded (/usr/lib/systemd/system/loadmodules.service; enabled; preset: enabled)
     Active: inactive (dead)

○ logrotate.service - Rotate log files
     Loaded: loaded (/usr/lib/systemd/system/logrotate.service; static)
     Active: inactive (dead)
TriggeredBy: ● logrotate.timer
       Docs: man:logrotate(8)
             man:logrotate.conf(5)

○ lvm2-lvmpolld.service - LVM2 poll daemon
     Loaded: loaded (/usr/lib/systemd/system/lvm2-lvmpolld.service; static)
     Active: inactive (dead)
TriggeredBy: ● lvm2-lvmpolld.socket
       Docs: man:lvmpolld(8)
        CPU: 0

● lvm2-monitor.service - Monitoring of LVM2 mirrors, snapshots etc. using dmeventd or progress polling
     Loaded: loaded (/usr/lib/systemd/system/lvm2-monitor.service; enabled; preset: enabled)
     Active: active (exited) since Mon 2026-02-23 07:17:16 UTC; 2h 53min ago
       Docs: man:dmeventd(8)
             man:lvcreate(8)
             man:lvchange(8)
             man:vgchange(8)
   Main PID: 24124 (code=exited, status=0/SUCCESS)
        CPU: 32ms

Feb 23 07:17:16 np0005626466.localdomain systemd[1]: Starting Monitoring of LVM2 mirrors, snapshots etc. using dmeventd or progress polling...
Feb 23 07:17:16 np0005626466.localdomain systemd[1]: Finished Monitoring of LVM2 mirrors, snapshots etc. using dmeventd or progress polling.

○ mdmonitor.service - Software RAID monitoring and management
     Loaded: loaded (/usr/lib/systemd/system/mdmonitor.service; enabled; preset: enabled)
     Active: inactive (dead)

○ microcode.service - Load CPU microcode update
     Loaded: loaded (/usr/lib/systemd/system/microcode.service; enabled; preset: enabled)
     Active: inactive (dead)
  Condition: start condition failed at Mon 2026-02-23 06:36:04 UTC; 3h 34min ago

Feb 23 06:36:04 localhost systemd[1]: Load CPU microcode update was skipped because of an unmet condition check (ConditionPathExists=/sys/devices/system/cpu/microcode/reload).

○ modprobe@configfs.service - Load Kernel Module configfs
     Loaded: loaded (/usr/lib/systemd/system/modprobe@.service; static)
     Active: inactive (dead) since Mon 2026-02-23 06:36:03 UTC; 3h 35min ago
       Docs: man:modprobe(8)
   Main PID: 644 (code=exited, status=0/SUCCESS)
        CPU: 6ms

Feb 23 06:36:03 localhost systemd[1]: Starting Load Kernel Module configfs...
Feb 23 06:36:03 localhost systemd[1]: modprobe@configfs.service: Deactivated successfully.
Feb 23 06:36:03 localhost systemd[1]: Finished Load Kernel Module configfs.

○ modprobe@drm.service - Load Kernel Module drm
     Loaded: loaded (/usr/lib/systemd/system/modprobe@.service; static)
     Active: inactive (dead) since Mon 2026-02-23 06:36:02 UTC; 3h 35min ago
       Docs: man:modprobe(8)
   Main PID: 614 (code=exited, status=0/SUCCESS)
        CPU: 101ms

Feb 23 06:36:02 localhost systemd[1]: modprobe@drm.service: Deactivated successfully.
Feb 23 06:36:02 localhost systemd[1]: Finished Load Kernel Module drm.

○ modprobe@fuse.service - Load Kernel Module fuse
     Loaded: loaded (/usr/lib/systemd/system/modprobe@.service; static)
     Active: inactive (dead) since Mon 2026-02-23 06:36:02 UTC; 3h 35min ago
       Docs: man:modprobe(8)
   Main PID: 615 (code=exited, status=0/SUCCESS)
        CPU: 70ms

Feb 23 06:36:02 localhost systemd[1]: modprobe@fuse.service: Deactivated successfully.
Feb 23 06:36:02 localhost systemd[1]: Finished Load Kernel Module fuse.

● multipathd.service - Device-Mapper Multipath Device Controller
     Loaded: loaded (/usr/lib/systemd/system/multipathd.service; enabled; preset: enabled)
     Active: active (running) since Mon 2026-02-23 09:26:03 UTC; 45min ago
TriggeredBy: ● multipathd.socket
   Main PID: 218083 (multipathd)
     Status: "up"
         IO: 0B read, 0B written
      Tasks: 8
     Memory: 18.5M
        CPU: 261ms
     CGroup: /system.slice/multipathd.service
             └─218083 /sbin/multipathd -d -s

Feb 23 09:26:03 np0005626466.localdomain systemd[1]: Starting Device-Mapper Multipath Device Controller...
Feb 23 09:26:03 np0005626466.localdomain multipathd[218083]: --------start up--------
Feb 23 09:26:03 np0005626466.localdomain multipathd[218083]: read /etc/multipath.conf
Feb 23 09:26:03 np0005626466.localdomain multipathd[218083]: path checkers start up
Feb 23 09:26:03 np0005626466.localdomain systemd[1]: Started Device-Mapper Multipath Device Controller.

○ netns-placeholder.service - Create netns directory
     Loaded: loaded (/etc/systemd/system/netns-placeholder.service; enabled; preset: enabled)
     Active: inactive (dead) since Mon 2026-02-23 09:35:06 UTC; 35min ago
   Main PID: 261524 (code=exited, status=0/SUCCESS)
        CPU: 9ms

Feb 23 09:35:06 np0005626466.localdomain systemd[1]: Starting Create netns directory...
Feb 23 09:35:06 np0005626466.localdomain systemd[1]: netns-placeholder.service: Deactivated successfully.
Feb 23 09:35:06 np0005626466.localdomain systemd[1]: Finished Create netns directory.

● network.service - LSB: Bring up/down networking
     Loaded: loaded (/etc/rc.d/init.d/network; generated)
     Active: active (exited) since Mon 2026-02-23 07:14:30 UTC; 2h 56min ago
       Docs: man:systemd-sysv-generator(8)
        CPU: 472ms

Feb 23 07:14:30 np0005626466.novalocal systemd[1]: Starting LSB: Bring up/down networking...
Feb 23 07:14:30 np0005626466.novalocal network[22210]: WARN      : [network] You are using 'network' service provided by 'network-scripts', which are now deprecated.
Feb 23 07:14:30 np0005626466.novalocal network[22221]: You are using 'network' service provided by 'network-scripts', which are now deprecated.
Feb 23 07:14:30 np0005626466.novalocal network[22210]: WARN      : [network] 'network-scripts' will be removed from distribution in near future.
Feb 23 07:14:30 np0005626466.novalocal network[22222]: 'network-scripts' will be removed from distribution in near future.
Feb 23 07:14:30 np0005626466.novalocal network[22210]: WARN      : [network] It is advised to switch to 'NetworkManager' instead for network management.
Feb 23 07:14:30 np0005626466.novalocal network[22223]: It is advised to switch to 'NetworkManager' instead for network management.
Feb 23 07:14:30 np0005626466.novalocal network[22210]: Bringing up loopback interface:  [  OK  ]
Feb 23 07:14:30 np0005626466.novalocal network[22210]: Bringing up interface eth0:  [  OK  ]
Feb 23 07:14:30 np0005626466.novalocal systemd[1]: Started LSB: Bring up/down networking.

● NetworkManager-wait-online.service - Network Manager Wait Online
     Loaded: loaded (/usr/lib/systemd/system/NetworkManager-wait-online.service; enabled; preset: disabled)
     Active: active (exited) since Mon 2026-02-23 06:43:59 UTC; 3h 27min ago
       Docs: man:nm-online(1)
   Main PID: 5994 (code=exited, status=0/SUCCESS)
        CPU: 60ms

Feb 23 06:43:14 np0005626466.novalocal systemd[1]: Starting Network Manager Wait Online...
Feb 23 06:43:59 np0005626466.novalocal systemd[1]: Finished Network Manager Wait Online.

● NetworkManager.service - Network Manager
     Loaded: loaded (/usr/lib/systemd/system/NetworkManager.service; enabled; preset: enabled)
    Drop-In: /usr/lib/systemd/system/NetworkManager.service.d
             └─NetworkManager-ovs.conf
     Active: active (running) since Mon 2026-02-23 06:43:14 UTC; 3h 27min ago
       Docs: man:NetworkManager(8)
   Main PID: 5981 (NetworkManager)
         IO: 512.0K read, 139.5K written
      Tasks: 3 (limit: 100220)
     Memory: 6.5M
        CPU: 1min 387ms
     CGroup: /system.slice/NetworkManager.service
             └─5981 /usr/sbin/NetworkManager --no-daemon

Feb 23 09:59:30 np0005626466.localdomain NetworkManager[5981]: <info>  [1771840770.4134] device (tapff7aa220-50): carrier: link connected
Feb 23 09:59:31 np0005626466.localdomain NetworkManager[5981]: <info>  [1771840771.4314] manager: (tap917e4638-98): new Generic device (/org/freedesktop/NetworkManager/Devices/53)
Feb 23 09:59:36 np0005626466.localdomain NetworkManager[5981]: <info>  [1771840776.3432] manager: (tap6341935b-53): new Generic device (/org/freedesktop/NetworkManageUnit ntpd.service could not be found.
Unit ntpdate.service could not be found.
r/Devices/54)
Feb 23 09:59:39 np0005626466.localdomain NetworkManager[5981]: <info>  [1771840779.1212] manager: (tap2a9ff60a-d3): new Generic device (/org/freedesktop/NetworkManager/Devices/55)
Feb 23 09:59:52 np0005626466.localdomain NetworkManager[5981]: <info>  [1771840792.1447] manager: (tap62bb136d-44): new Generic device (/org/freedesktop/NetworkManager/Devices/56)
Feb 23 10:00:17 np0005626466.localdomain NetworkManager[5981]: <info>  [1771840817.8295] device (tap76b4fad5-6b): state change: disconnected -> unmanaged (reason 'unmanaged', sys-iface-state: 'removed')
Feb 23 10:00:19 np0005626466.localdomain NetworkManager[5981]: <info>  [1771840819.0019] manager: (tap81a83569-e4): new Generic device (/org/freedesktop/NetworkManager/Devices/57)
Feb 23 10:00:29 np0005626466.localdomain NetworkManager[5981]: <info>  [1771840829.2051] manager: (tap305e3026-36): new Generic device (/org/freedesktop/NetworkManager/Devices/58)
Feb 23 10:00:53 np0005626466.localdomain NetworkManager[5981]: <info>  [1771840853.2300] manager: (tap51c10725-ff): new Generic device (/org/freedesktop/NetworkManager/Devices/59)
Feb 23 10:02:41 np0005626466.localdomain NetworkManager[5981]: <info>  [1771840961.7343] manager: (tap959298c0-28): new Generic device (/org/freedesktop/NetworkManager/Devices/60)

○ neutron-cleanup.service - Neutron cleanup on startup
     Loaded: loaded (/usr/lib/systemd/system/neutron-cleanup.service; enabled; preset: disabled)
     Active: inactive (dead)

○ nfs-idmapd.service - NFSv4 ID-name mapping service
     Loaded: loaded (/usr/lib/systemd/system/nfs-idmapd.service; static)
     Active: inactive (dead)

○ nfs-mountd.service - NFS Mount Daemon
     Loaded: loaded (/usr/lib/systemd/system/nfs-mountd.service; static)
     Active: inactive (dead)

○ nfs-server.service - NFS server and services
     Loaded: loaded (/usr/lib/systemd/system/nfs-server.service; disabled; preset: disabled)
     Active: inactive (dead)

○ nfs-utils.service - NFS server and client services
     Loaded: loaded (/usr/lib/systemd/system/nfs-utils.service; static)
     Active: inactive (dead)

○ nfsdcld.service - NFSv4 Client Tracking Daemon
     Loaded: loaded (/usr/lib/systemd/system/nfsdcld.service; static)
     Active: inactive (dead)

● nftables.service - Netfilter Tables
     Loaded: loaded (/usr/lib/systemd/system/nftables.service; enabled; preset: disabled)
     Active: active (exited) since Mon 2026-02-23 09:42:33 UTC; 28min ago
       Docs: man:nft(8)
   Main PID: 284642 (code=exited, status=0/SUCCESS)
        CPU: 40ms

Feb 23 09:42:33 np0005626466.localdomain systemd[1]: Starting Netfilter Tables...
Feb 23 09:42:33 np0005626466.localdomain systemd[1]: Finished Netfilter Tables.

● nis-domainname.service - Read and set NIS domainname from /etc/sysconfig/network
     Loaded: loaded (/usr/lib/systemd/system/nis-domainname.service; enabled; preset: enabled)
     Active: active (exited) since Mon 2026-02-23 06:36:02 UTC; 3h 35min ago
   Main PID: 616 (code=exited, status=0/SUCCESS)
        CPU: 5ms

Feb 23 06:36:02 localhost systemd[1]: Finished Read and set NIS domainname from /etc/sysconfig/network.

○ nvmefc-boot-connections.service - Auto-connect to subsystems on FC-NVME devices found during boot
     Loaded: loaded (/usr/lib/systemd/system/nvmefc-boot-connections.service; enabled; preset: enabled)
     Active: inactive (dead)

● openvswitch.service - Open vSwitch
     Loaded: loaded (/usr/lib/systemd/system/openvswitch.service; enabled; preset: disabled)
     Active: active (exited) since Mon 2026-02-23 07:14:31 UTC; 2h 56min ago
   Main PID: 22528 (code=exited, status=0/SUCCESS)
        CPU: 2ms

Feb 23 07:14:31 np0005626466.novalocal systemd[1]: Starting Open vSwitch...
Feb 23 07:14:31 np0005626466.novalocal systemd[1]: Finished Open vSwitch.

● ovs-delete-transient-ports.service - Open vSwitch Delete Transient Ports
     Loaded: loaded (/usr/lib/systemd/system/ovs-delete-transient-ports.service; static)
     Active: active (exited) since Mon 2026-02-23 07:14:31 UTC; 2h 56min ago
   Main PID: 22465 (code=exited,Unit pacemaker.service could not be found.
Unit plymouth-quit-wait.service could not be found.
Unit plymouth-start.service could not be found.
 status=0/SUCCESS)
        CPU: 24ms

Feb 23 07:14:31 np0005626466.novalocal systemd[1]: Starting Open vSwitch Delete Transient Ports...
Feb 23 07:14:31 np0005626466.novalocal systemd[1]: Finished Open vSwitch Delete Transient Ports.

● ovs-vswitchd.service - Open vSwitch Forwarding Unit
     Loaded: loaded (/usr/lib/systemd/system/ovs-vswitchd.service; static)
     Active: active (running) since Mon 2026-02-23 07:14:31 UTC; 2h 56min ago
   Main PID: 22519 (ovs-vswitchd)
         IO: 1.2M read, 388.0K written
      Tasks: 14 (limit: 100220)
     Memory: 245.5M
        CPU: 40.742s
     CGroup: /system.slice/ovs-vswitchd.service
             └─22519 ovs-vswitchd unix:/var/run/openvswitch/db.sock -vconsole:emer -vsyslog:err -vfile:info --mlockall --user openvswitch:hugetlbfs --no-chdir --log-file=/var/log/openvswitch/ovs-vswitchd.log --pidfile=/var/run/openvswitch/ovs-vswitchd.pid --detach

Feb 23 07:14:31 np0005626466.novalocal systemd[1]: Starting Open vSwitch Forwarding Unit...
Feb 23 07:14:31 np0005626466.novalocal ovs-ctl[22509]: Inserting openvswitch module [  OK  ]
Feb 23 07:14:31 np0005626466.novalocal ovs-ctl[22477]: Starting ovs-vswitchd [  OK  ]
Feb 23 07:14:31 np0005626466.novalocal ovs-vsctl[22527]: ovs|00001|vsctl|INFO|Called as ovs-vsctl --no-wait add Open_vSwitch . external-ids hostname=np0005626466.novalocal
Feb 23 07:14:31 np0005626466.novalocal ovs-ctl[22477]: Enabling remote OVSDB managers [  OK  ]
Feb 23 07:14:31 np0005626466.novalocal systemd[1]: Started Open vSwitch Forwarding Unit.

● ovsdb-server.service - Open vSwitch Database Unit
     Loaded: loaded (/usr/lib/systemd/system/ovsdb-server.service; static)
     Active: active (running) since Mon 2026-02-23 07:14:31 UTC; 2h 56min ago
   Main PID: 22437 (ovsdb-server)
         IO: 864.0K read, 777.0K written
      Tasks: 1 (limit: 100220)
     Memory: 4.1M
        CPU: 2min 49.170s
     CGroup: /system.slice/ovsdb-server.service
             └─22437 ovsdb-server /etc/openvswitch/conf.db -vconsole:emer -vsyslog:err -vfile:info --remote=punix:/var/run/openvswitch/db.sock --private-key=db:Open_vSwitch,SSL,private_key --certificate=db:Open_vSwitch,SSL,certificate --bootstrap-ca-cert=db:Open_vSwitch,SSL,ca_cert --user openvswitch:hugetlbfs --no-chdir --log-file=/var/log/openvswitch/ovsdb-server.log --pidfile=/var/run/openvswitch/ovsdb-server.pid --detach

Feb 23 07:14:31 np0005626466.novalocal ovs-ctl[22389]: /etc/openvswitch/conf.db does not exist ... (warning).
Feb 23 07:14:31 np0005626466.novalocal ovs-ctl[22389]: Creating empty database /etc/openvswitch/conf.db [  OK  ]
Feb 23 07:14:31 np0005626466.novalocal ovs-ctl[22389]: Starting ovsdb-server [  OK  ]
Feb 23 07:14:31 np0005626466.novalocal ovs-vsctl[22438]: ovs|00001|vsctl|INFO|Called as ovs-vsctl --no-wait -- init -- set Open_vSwitch . db-version=8.5.1
Feb 23 07:14:31 np0005626466.novalocal ovs-vsctl[22458]: ovs|00001|vsctl|INFO|Called as ovs-vsctl --no-wait set Open_vSwitch . ovs-version=3.3.6-141.el9fdp "external-ids:system-id=\"0983c4cd-1476-49af-89e0-3187e18b9de6\"" "external-ids:rundir=\"/var/run/openvswitch\"" "system-type=\"rhel\"" "system-version=\"9.2\""
Feb 23 07:14:31 np0005626466.novalocal ovs-ctl[22389]: Configuring Open vSwitch system IDs [  OK  ]
Feb 23 07:14:31 np0005626466.novalocal ovs-ctl[22389]: Enabling remote OVSDB managers [  OK  ]
Feb 23 07:14:31 np0005626466.novalocal systemd[1]: Started Open vSwitch Database Unit.
Feb 23 07:14:31 np0005626466.novalocal ovs-vsctl[22464]: ovs|00001|vsctl|INFO|Called as ovs-vsctl --no-wait add Open_vSwitch . external-ids hostname=np0005626466.novalocal
Feb 23 08:54:19 np0005626466.localdomain ovsdb-server[22437]: ovs|00012|reconnect|ERR|tcp:127.0.0.1:38744: no response to inactivity probe after 5.04 seconds, disconnecting

● podman.service - Podman API Service
     Loaded: loaded (/usr/lib/systemd/system/podman.service; disabled; preset: disabled)
     Active: active (running) since Mon 2026-02-23 09:29:34 UTC; 41min ago
TriggeredBy: ● podman.socket
       Docs: man:podman-system-service(1)
   Main PID: 241175 (podman)
         IO: 256.0K read, 4.7M written
      Tasks: 16 (limit: 100220)
     Memory: 230.9M
        CPU: 5min 47.505s
     CGroup: /system.slice/podman.service
             └─241175 /usr/bin/podman --log-level=info system service

Feb 23 10:09:20 np0005626466.localdomain podman[241175]: @ - - [23/Feb/2026:10:09:20 +0000] "GET /v4.9.3/libpod/containers/stats?all=false&interval=1&stream=false HTTP/1.1" 200 18793 "" "Go-http-client/1.1"
Feb 23 10:09:50 np0005626466.localdomain podman[241175]: time="2026-02-23T10:09:50Z" level=info msg="List containers: received `last` parameter - overwriting `limit`"
Feb 23 10:09:50 np0005626466.localdomain podman[241175]: @ - - [23/Feb/2026:10:09:50 +0000] "GET /v4.9.3/libpod/containers/json?all=true&external=false&last=0&namespace=false&size=false&sync=false HTTP/1.1" 200 157717 "" "Go-http-client/1.1"
Feb 23 10:09:50 np0005626466.localdomain podman[241175]: @ - - [23/Feb/2026:10:09:50 +0000] "GET /v4.9.3/libpod/containers/stats?all=false&interval=1&stream=false HTTP/1.1" 200 18790 "" "Go-http-client/1.1"
Feb 23 10:10:20 np0005626466.localdomain podman[241175]: time="2026-02-23T10:10:20Z" level=info msg="List containers: received `last` parameter - overwriting `limit`"
Feb 23 10:10:20 np0005626466.localdomain podman[241175]: @ - - [23/Feb/2026:10:10:20 +0000] "GET /v4.9.3/libpod/containers/json?all=true&external=false&last=0&namespace=false&size=false&sync=false HTTP/1.1" 200 157717 "" "Go-http-client/1.1"
Feb 23 10:10:20 np0005626466.localdomain podman[241175]: @ - - [23/Feb/2026:10:10:20 +0000] "GET /v4.9.3/libpod/containers/stats?all=false&interval=1&stream=false HTTP/1.1" 200 18793 "" "Go-http-client/1.1"
Feb 23 10:10:50 np0005626466.localdomain podman[241175]: time="2026-02-23T10:10:50Z" level=info msg="List containers: received `last` parameter - overwriting `limit`"
Feb 23 10:10:50 np0005626466.localdomain podman[241175]: @ - - [23/Feb/2026:10:10:50 +0000] "GET /v4.9.3/libpod/containers/json?all=true&external=false&last=0&namespace=false&size=false&sync=false HTTP/1.1" 200 157717 "" "Go-http-client/1.1"
Feb 23 10:10:50 np0005626466.localdomain podman[241175]: @ - - [23/Feb/2026:10:10:50 +0000] "GET /v4.9.3/libpod/containers/stats?all=false&interval=1&stream=false HTTP/1.1" 200 18810 "" "Go-http-client/1.1"

● polkit.service - Authorization Manager
     Loaded: loaded (/usr/lib/systemd/system/polkit.service; static)
     Active: active (running) since Mon 2026-02-23 06:36:06 UTC; 3h 34min ago
       Docs: man:polkit(8)
   Main PID: 1036 (polkitd)
         IO: 12.5M read, 0B written
      Tasks: 12 (limit: 100220)
     Memory: 20.0M
        CPU: 2.712s
     CGroup: /system.slice/polkit.service
             └─1036 /usr/lib/polkit-1/polkitd --no-debug

Feb 23 09:21:50 np0005626466.localdomain polkitd[1036]: Collecting garbage unconditionally...
Feb 23 09:21:50 np0005626466.localdomain polkitd[1036]: Loading rules from directory /etc/polkit-1/rules.d
Feb 23 09:21:50 np0005626466.localdomain polkitd[1036]: Loading rules from directory /usr/share/polkit-1/rules.d
Feb 23 09:21:50 np0005626466.localdomain polkitd[1036]: Finished loading, compiling and executing 5 rules
Feb 23 09:24:15 np0005626466.localdomain polkitd[1036]: Registered Authentication Agent for unix-process:206726:1009647 (system bus name :1.2855 [pkttyagent --process 206726 --notify-fd 4 --fallback], object path /org/freedesktop/PolicyKit1/AuthenticationAgent, locale C.UTF-8)
Feb 23 09:24:15 np0005626466.localdomain polkitd[1036]: Unregistered Authentication Agent for unix-process:206726:1009647 (system bus name :1.2855, object path /org/freedesktop/PolicyKit1/AuthenticationAgent, locale C.UTF-8) (disconnected from bus)
Feb 23 09:24:15 np0005626466.localdomain polkitd[1036]: Registered Authentication Agent for unix-process:206725:1009647 (system bus name :1.2856 [pkttyagent --process 206725 --notify-fd 4 --fallback], object path /org/freedesktop/PolicyKit1/AuthenticationAgent, locale C.UTF-8)
Feb 23 09:24:15 np0005626466.localdomain polkitd[1036]: Unregistered Authentication Agent for unix-process:206725:1009647 (system bUnit power-profiles-daemon.service could not be found.
us name :1.2856, object path /org/freedesktop/PolicyKit1/AuthenticationAgent, locale C.UTF-8) (disconnected from bus)
Feb 23 09:24:17 np0005626466.localdomain polkitd[1036]: Registered Authentication Agent for unix-process:207067:1009863 (system bus name :1.2859 [pkttyagent --process 207067 --notify-fd 4 --fallback], object path /org/freedesktop/PolicyKit1/AuthenticationAgent, locale C.UTF-8)
Feb 23 09:24:17 np0005626466.localdomain polkitd[1036]: Unregistered Authentication Agent for unix-process:207067:1009863 (system bus name :1.2859, object path /org/freedesktop/PolicyKit1/AuthenticationAgent, locale C.UTF-8) (disconnected from bus)

○ rbdmap.service - Map RBD devices
     Loaded: loaded (/usr/lib/systemd/system/rbdmap.service; disabled; preset: disabled)
     Active: inactive (dead)

○ rc-local.service - /etc/rc.d/rc.local Compatibility
     Loaded: loaded (/usr/lib/systemd/system/rc-local.service; static)
     Active: inactive (dead)
       Docs: man:systemd-rc-local-generator(8)

○ rescue.service - Rescue Shell
     Loaded: loaded (/usr/lib/systemd/system/rescue.service; static)
     Active: inactive (dead)
       Docs: man:sulogin(8)

● rhsm.service - RHSM dbus service
     Loaded: loaded (/usr/lib/systemd/system/rhsm.service; disabled; preset: disabled)
     Active: active (running) since Mon 2026-02-23 06:52:37 UTC; 3h 18min ago
   Main PID: 6643 (rhsm-service)
         IO: 296.0K read, 4.3M written
      Tasks: 2 (limit: 100220)
     Memory: 77.8M
        CPU: 4.455s
     CGroup: /system.slice/rhsm.service
             └─6643 /usr/bin/python3 /usr/libexec/rhsm-service

Feb 23 07:12:06 np0005626466.novalocal rhsm-service[6643]: WARNING [subscription_manager.cert_sorter:194] Installed product 479 not present in response from server.
Feb 23 07:12:06 np0005626466.novalocal rhsm-service[6643]: WARNING [subscription_manager.cert_sorter:194] Installed product 479 not present in response from server.
Feb 23 07:12:30 np0005626466.novalocal rhsm-service[6643]: WARNING [subscription_manager.cert_sorter:194] Installed product 479 not present in response from server.
Feb 23 07:12:35 np0005626466.novalocal rhsm-service[6643]: WARNING [subscription_manager.cert_sorter:194] Installed product 479 not present in response from server.
Feb 23 07:12:35 np0005626466.novalocal rhsm-service[6643]: WARNING [subscription_manager.cert_sorter:194] Installed product 479 not present in response from server.
Feb 23 07:13:38 np0005626466.novalocal rhsm-service[6643]: WARNING [subscription_manager.cert_sorter:194] Installed product 479 not present in response from server.
Feb 23 07:13:38 np0005626466.novalocal rhsm-service[6643]: WARNING [subscription_manager.cert_sorter:194] Installed product 479 not present in response from server.
Feb 23 08:19:48 np0005626466.localdomain rhsm-service[6643]: WARNING [subscription_manager.cert_sorter:194] Installed product 479 not present in response from server.
Feb 23 08:19:48 np0005626466.localdomain rhsm-service[6643]: WARNING [subscription_manager.cert_sorter:194] Installed product 479 not present in response from server.
Feb 23 08:20:44 np0005626466.localdomain rhsm-service[6643]: WARNING [subscription_manager.cert_sorter:194] Installed product 479 not present in response from server.

● rhsmcertd.service - Enable periodic update of entitlement certificates.
     Loaded: loaded (/usr/lib/systemd/system/rhsmcertd.service; enabled; preset: enabled)
     Active: active (running) since Mon 2026-02-23 06:36:05 UTC; 3h 34min ago
   Main PID: 795 (rhsmcertd)
         IO: 52.0K read, 4.0K written
      Tasks: 2 (limit: 100220)
     Memory: 2.3M
        CPU: 6ms
     CGroup: /system.slice/rhsmcertd.service
             └─795 /usr/bin/rhsmcertd

Feb 23 06:36:05 np0005626466.novalocal systemd[1]: Starting Enable periodic update of entitlement certificates....
Feb 23 06:36:05 np0005626466.novalocal systemd[1]: Started Enable periodic update of entitlement certificates..

○ rpc-gssd.service - RPC security service for NFS client and server
     Loaded: loaded (/usr/lib/systemd/system/rpc-gssd.serUnit rpc-svcgssd.service could not be found.
vice; static)
     Active: inactive (dead)
  Condition: start condition failed at Mon 2026-02-23 06:36:05 UTC; 3h 34min ago

Feb 23 06:36:05 np0005626466.novalocal systemd[1]: RPC security service for NFS client and server was skipped because of an unmet condition check (ConditionPathExists=/etc/krb5.keytab).

● rpc-statd-notify.service - Notify NFS peers of a restart
     Loaded: loaded (/usr/lib/systemd/system/rpc-statd-notify.service; static)
     Active: active (exited) since Mon 2026-02-23 06:36:09 UTC; 3h 34min ago
        CPU: 6ms

Feb 23 06:36:09 np0005626466.novalocal sm-notify[1132]: Version 2.5.4 starting
Feb 23 06:36:09 np0005626466.novalocal systemd[1]: Starting Notify NFS peers of a restart...
Feb 23 06:36:09 np0005626466.novalocal systemd[1]: Started Notify NFS peers of a restart.

○ rpc-statd.service - NFS status monitor for NFSv2/3 locking.
     Loaded: loaded (/usr/lib/systemd/system/rpc-statd.service; static)
     Active: inactive (dead)

● rpcbind.service - RPC Bind
     Loaded: loaded (/usr/lib/systemd/system/rpcbind.service; enabled; preset: enabled)
     Active: active (running) since Mon 2026-02-23 06:36:04 UTC; 3h 34min ago
TriggeredBy: ● rpcbind.socket
       Docs: man:rpcbind(8)
   Main PID: 724 (rpcbind)
         IO: 2.7M read, 0B written
      Tasks: 1 (limit: 100220)
     Memory: 3.7M
        CPU: 57ms
     CGroup: /system.slice/rpcbind.service
             └─724 /usr/bin/rpcbind -w -f

Feb 23 06:36:04 localhost systemd[1]: Starting RPC Bind...
Feb 23 06:36:04 localhost systemd[1]: Started RPC Bind.

● rsyslog.service - System Logging Service
     Loaded: loaded (/usr/lib/systemd/system/rsyslog.service; enabled; preset: enabled)
     Active: active (running) since Mon 2026-02-23 06:36:04 UTC; 3h 34min ago
       Docs: man:rsyslogd(8)
             https://www.rsyslog.com/doc/
   Main PID: 759 (rsyslogd)
         IO: 920.0K read, 35.3M written
      Tasks: 3 (limit: 100220)
     Memory: 27.1M
        CPU: 16.293s
     CGroup: /system.slice/rsyslog.service
             └─759 /usr/sbin/rsyslogd -n

Feb 23 09:36:59 np0005626466.localdomain rsyslogd[759]: imjournal: journal files changed, reloading...  [v8.2102.0-111.el9 try https://www.rsyslog.com/e/0 ]
Feb 23 09:38:59 np0005626466.localdomain rsyslogd[759]: imjournal: journal files changed, reloading...  [v8.2102.0-111.el9 try https://www.rsyslog.com/e/0 ]
Feb 23 09:38:59 np0005626466.localdomain rsyslogd[759]: imjournal: journal files changed, reloading...  [v8.2102.0-111.el9 try https://www.rsyslog.com/e/0 ]
Feb 23 09:39:08 np0005626466.localdomain rsyslogd[759]: imjournal from <localhost:nova_compute>: begin to drop messages due to rate-limiting
Feb 23 09:39:18 np0005626466.localdomain rsyslogd[759]: imjournal: 2426 messages lost due to rate-limiting (20000 allowed within 600 seconds)
Feb 23 09:42:32 np0005626466.localdomain rsyslogd[759]: imjournal: journal files changed, reloading...  [v8.2102.0-111.el9 try https://www.rsyslog.com/e/0 ]
Feb 23 09:42:32 np0005626466.localdomain rsyslogd[759]: imjournal: journal files changed, reloading...  [v8.2102.0-111.el9 try https://www.rsyslog.com/e/0 ]
Feb 23 10:09:23 np0005626466.localdomain rsyslogd[759]: imjournal: journal files changed, reloading...  [v8.2102.0-111.el9 try https://www.rsyslog.com/e/0 ]
Feb 23 10:09:23 np0005626466.localdomain rsyslogd[759]: imjournal: journal files changed, reloading...  [v8.2102.0-111.el9 try https://www.rsyslog.com/e/0 ]
Feb 23 10:10:39 np0005626466.localdomain rsyslogd[759]: imjournal from <localhost:ceph-osd>: begin to drop messages due to rate-limiting

○ selinux-autorelabel-mark.service - Mark the need to relabel after reboot
     Loaded: loaded (/usr/lib/systemd/system/selinux-autorelabel-mark.service; enabled; preset: enabled)
     Active: inactive (dead)
  Condition: start condition failed at Mon 2026-02-23 06:36:04 UTC; 3h 34min ago

Feb 23 06:36:04 localhost systemd[1]: Mark the need to relabel after reboot was skipped because of an unmet condition check (ConditionSecurity=!selinux).

● serial-getty@ttyS0.service - Serial Getty on tUnit snapd.seeded.service could not be found.
Unit sntp.service could not be found.
Unit sshd-keygen.service could not be found.
tyS0
     Loaded: loaded (/usr/lib/systemd/system/serial-getty@.service; enabled-runtime; preset: disabled)
     Active: active (running) since Mon 2026-02-23 06:36:09 UTC; 3h 34min ago
       Docs: man:agetty(8)
             man:systemd-getty-generator(8)
             http://0pointer.de/blog/projects/serial-console.html
   Main PID: 1142 (agetty)
         IO: 0B read, 0B written
      Tasks: 1 (limit: 100220)
     Memory: 280.0K
        CPU: 11ms
     CGroup: /system.slice/system-serial\x2dgetty.slice/serial-getty@ttyS0.service
             └─1142 /sbin/agetty -o "-p -- \\u" --keep-baud 115200,57600,38400,9600 - vt220

Feb 23 06:36:09 np0005626466.novalocal systemd[1]: Started Serial Getty on ttyS0.

● snmpd.service - Simple Network Management Protocol (SNMP) Daemon.
     Loaded: loaded (/usr/lib/systemd/system/snmpd.service; enabled; preset: disabled)
     Active: active (running) since Mon 2026-02-23 08:09:21 UTC; 2h 1min ago
   Main PID: 67626 (snmpd)
         IO: 0B read, 145.5K written
      Tasks: 1 (limit: 100220)
     Memory: 5.3M
        CPU: 7.331s
     CGroup: /system.slice/snmpd.service
             └─67626 /usr/sbin/snmpd -LS0-5d -f

Feb 23 08:10:21 np0005626466.localdomain snmpd[67626]: empty variable list in _query
Feb 23 08:10:21 np0005626466.localdomain snmpd[67626]: empty variable list in _query
Feb 23 09:55:21 np0005626466.localdomain snmpd[67626]: empty variable list in _query
Feb 23 09:55:21 np0005626466.localdomain snmpd[67626]: empty variable list in _query
Feb 23 09:55:21 np0005626466.localdomain snmpd[67626]: empty variable list in _query
Feb 23 09:56:21 np0005626466.localdomain snmpd[67626]: empty variable list in _query
Feb 23 09:56:21 np0005626466.localdomain snmpd[67626]: empty variable list in _query
Feb 23 09:56:21 np0005626466.localdomain snmpd[67626]: empty variable list in _query
Feb 23 10:00:21 np0005626466.localdomain snmpd[67626]: empty variable list in _query
Feb 23 10:00:21 np0005626466.localdomain snmpd[67626]: empty variable list in _query

○ sshd-keygen@ecdsa.service - OpenSSH ecdsa Server Key Generation
     Loaded: loaded (/usr/lib/systemd/system/sshd-keygen@.service; disabled; preset: disabled)
    Drop-In: /etc/systemd/system/sshd-keygen@.service.d
             └─disable-sshd-keygen-if-cloud-init-active.conf
     Active: inactive (dead)
  Condition: start condition failed at Mon 2026-02-23 09:22:32 UTC; 48min ago

Feb 23 06:36:04 localhost systemd[1]: OpenSSH ecdsa Server Key Generation was skipped because of an unmet condition check (ConditionPathExists=!/run/systemd/generator.early/multi-user.target.wants/cloud-init.target).
Feb 23 07:48:52 np0005626466.localdomain systemd[1]: OpenSSH ecdsa Server Key Generation was skipped because of an unmet condition check (ConditionPathExists=!/run/systemd/generator.early/multi-user.target.wants/cloud-init.target).
Feb 23 09:08:37 np0005626466.localdomain systemd[1]: OpenSSH ecdsa Server Key Generation was skipped because of an unmet condition check (ConditionPathExists=!/run/systemd/generator.early/multi-user.target.wants/cloud-init.target).
Feb 23 09:08:38 np0005626466.localdomain systemd[1]: OpenSSH ecdsa Server Key Generation was skipped because of an unmet condition check (ConditionPathExists=!/run/systemd/generator.early/multi-user.target.wants/cloud-init.target).
Feb 23 09:22:32 np0005626466.localdomain systemd[1]: OpenSSH ecdsa Server Key Generation was skipped because of an unmet condition check (ConditionPathExists=!/run/systemd/generator.early/multi-user.target.wants/cloud-init.target).

○ sshd-keygen@ed25519.service - OpenSSH ed25519 Server Key Generation
     Loaded: loaded (/usr/lib/systemd/system/sshd-keygen@.service; disabled; preset: disabled)
    Drop-In: /etc/systemd/system/sshd-keygen@.service.d
             └─disable-sshd-keygen-if-cloud-init-active.conf
     Active: inactive (dead)
  Condition: start condition failed at Mon 2026-02-23 09:22:32 UTC; 48min ago

Feb 23 06:36:04 localhost systemd[1]: OpenSSH ed25519 Server Key Generation was skipped because of an unmet condition check (ConditionPathExists=!/run/systemd/generator.early/multi-user.target.wants/cloud-init.target).
Feb 23 07:48:52 np0005626466.localdomain systemd[1]: OpenSSH ed25519 Server Key Generation was skipped because of an unmet condition check (ConditionPathExists=!/run/systemd/generator.early/multi-user.target.wants/cloud-init.target).
Feb 23 09:08:37 np0005626466.localdomain systemd[1]: OpenSSH ed25519 Server Key Generation was skipped because of an unmet condition check (ConditionPathExists=!/run/systemd/generator.early/multi-user.target.wants/cloud-init.target).
Feb 23 09:08:38 np0005626466.localdomain systemd[1]: OpenSSH ed25519 Server Key Generation was skipped because of an unmet condition check (ConditionPathExists=!/run/systemd/generator.early/multi-user.target.wants/cloud-init.target).
Feb 23 09:22:32 np0005626466.localdomain systemd[1]: OpenSSH ed25519 Server Key Generation was skipped because of an unmet condition check (ConditionPathExists=!/run/systemd/generator.early/multi-user.target.wants/cloud-init.target).

○ sshd-keygen@rsa.service - OpenSSH rsa Server Key Generation
     Loaded: loaded (/usr/lib/systemd/system/sshd-keygen@.service; disabled; preset: disabled)
    Drop-In: /etc/systemd/system/sshd-keygen@.service.d
             └─disable-sshd-keygen-if-cloud-init-active.conf
     Active: inactive (dead)
  Condition: start condition failed at Mon 2026-02-23 09:22:32 UTC; 48min ago

Feb 23 06:36:04 localhost systemd[1]: OpenSSH rsa Server Key Generation was skipped because of an unmet condition check (ConditionPathExists=!/run/systemd/generator.early/multi-user.target.wants/cloud-init.target).
Feb 23 07:48:52 np0005626466.localdomain systemd[1]: OpenSSH rsa Server Key Generation was skipped because of an unmet condition check (ConditionPathExists=!/run/systemd/generator.early/multi-user.target.wants/cloud-init.target).
Feb 23 09:08:37 np0005626466.localdomain systemd[1]: OpenSSH rsa Server Key Generation was skipped because of an unmet condition check (ConditionPathExists=!/run/systemd/generator.early/multi-user.target.wants/cloud-init.target).
Feb 23 09:08:38 np0005626466.localdomain systemd[1]: OpenSSH rsa Server Key Generation was skipped because of an unmet condition check (ConditionPathExists=!/run/systemd/generator.early/multi-user.target.wants/cloud-init.target).
Feb 23 09:22:32 np0005626466.localdomain systemd[1]: OpenSSH rsa Server Key Generation was skipped because of an unmet condition check (ConditionPathExists=!/run/systemd/generator.early/multi-user.target.wants/cloud-init.target).

● sshd.service - OpenSSH server daemon
     Loaded: loaded (/usr/lib/systemd/system/sshd.service; enabled; preset: enabled)
     Active: active (running) since Mon 2026-02-23 09:22:32 UTC; 48min ago
       Docs: man:sshd(8)
             man:sshd_config(5)
   Main PID: 186135 (sshd)
         IO: 276.0K read, 96.0K written
      Tasks: 1 (limit: 100220)
     Memory: 5.5M
        CPU: 7.542s
     CGroup: /system.slice/sshd.service
             └─186135 "sshd: /usr/sbin/sshd -D [listener] 0 of 10-100 startups"

Feb 23 10:10:20 np0005626466.localdomain sshd[331435]: main: sshd: ssh-rsa algorithm is disabled
Feb 23 10:10:20 np0005626466.localdomain sshd[331435]: Accepted publickey for zuul from 192.168.122.10 port 60584 ssh2: RSA SHA256:/ShS2J5Dq7o9P59e/NmgQORSAcJOBwu46Huo03HBdB4
Feb 23 10:10:20 np0005626466.localdomain sshd[331435]: pam_unix(sshd:session): session opened for user zuul(uid=1000) by (uid=0)
Feb 23 10:10:31 np0005626466.localdomain sshd[332451]: main: sshd: ssh-rsa algorithm is disabled
Feb 23 10:10:31 np0005626466.localdomain sshd[332451]: Invalid user sol from 193.32.162.146 port 34320
Feb 23 10:10:32 np0005626466.localdomain sshd[332451]: Connection closed by invalid user sol 193.32.162.146 port 34320 [preauth]
Feb 23 10:10:32 np0005626466.localdomain sshd[332520]: main: sshd: ssh-rsa algorithm is disabled
Feb 23 10:10:32 np0005626466.localdomain sshd[332520]: fatal: mm_answer_sign: sign: error in libcrypto
Feb 23 10:10:50 np0005626466.localdomain sshd[337783]: main: sshd: ssh-rsa algorithm is disabled
Feb 2Unit syslog.service could not be found.
3 10:10:51 np0005626466.localdomain sshd[337783]: fatal: mm_answer_sign: sign: error in libcrypto

○ sssd-kcm.service - SSSD Kerberos Cache Manager
     Loaded: loaded (/usr/lib/systemd/system/sssd-kcm.service; indirect; preset: disabled)
     Active: inactive (dead)
TriggeredBy: ● sssd-kcm.socket
       Docs: man:sssd-kcm(5)

○ sssd.service - System Security Services Daemon
     Loaded: loaded (/usr/lib/systemd/system/sssd.service; enabled; preset: enabled)
     Active: inactive (dead)
  Condition: start condition failed at Mon 2026-02-23 06:36:04 UTC; 3h 34min ago

Feb 23 06:36:04 localhost systemd[1]: System Security Services Daemon was skipped because no trigger condition checks were met.

○ sysstat-collect.service - system activity accounting tool
     Loaded: loaded (/usr/lib/systemd/system/sysstat-collect.service; static)
     Active: inactive (dead)
TriggeredBy: ○ sysstat-collect.timer
       Docs: man:sa1(8)

○ sysstat-summary.service - Generate a daily summary of process accounting
     Loaded: loaded (/usr/lib/systemd/system/sysstat-summary.service; static)
     Active: inactive (dead)
TriggeredBy: ○ sysstat-summary.timer
       Docs: man:sa2(8)

○ sysstat.service - Resets System Activity Logs
     Loaded: loaded (/usr/lib/systemd/system/sysstat.service; enabled; preset: enabled)
     Active: inactive (dead)

○ systemd-ask-password-console.service - Dispatch Password Requests to Console
     Loaded: loaded (/usr/lib/systemd/system/systemd-ask-password-console.service; static)
     Active: inactive (dead)
TriggeredBy: ● systemd-ask-password-console.path
       Docs: man:systemd-ask-password-console.service(8)

○ systemd-ask-password-wall.service - Forward Password Requests to Wall
     Loaded: loaded (/usr/lib/systemd/system/systemd-ask-password-wall.service; static)
     Active: inactive (dead)
TriggeredBy: ● systemd-ask-password-wall.path
       Docs: man:systemd-ask-password-wall.service(8)

○ systemd-binfmt.service - Set Up Additional Binary Formats
     Loaded: loaded (/usr/lib/systemd/system/systemd-binfmt.service; static)
     Active: inactive (dead)
  Condition: start condition failed at Mon 2026-02-23 06:36:04 UTC; 3h 34min ago
       Docs: man:systemd-binfmt.service(8)
             man:binfmt.d(5)
             https://docs.kernel.org/admin-guide/binfmt-misc.html
             https://www.freedesktop.org/wiki/Software/systemd/APIFileSystems

Feb 23 06:36:04 localhost systemd[1]: Set Up Additional Binary Formats was skipped because no trigger condition checks were met.

○ systemd-boot-system-token.service - Store a System Token in an EFI Variable
     Loaded: loaded (/usr/lib/systemd/system/systemd-boot-system-token.service; static)
     Active: inactive (dead)
  Condition: start condition failed at Mon 2026-02-23 06:36:04 UTC; 3h 34min ago
       Docs: man:systemd-boot-system-token.service(8)

Feb 23 06:36:04 localhost systemd[1]: Store a System Token in an EFI Variable was skipped because of an unmet condition check (ConditionPathExists=/sys/firmware/efi/efivars/LoaderFeatures-4a67b082-0a4c-41cf-b6c7-440b29bb8c4f).

● systemd-boot-update.service - Automatic Boot Loader Update
     Loaded: loaded (/usr/lib/systemd/system/systemd-boot-update.service; enabled; preset: enabled)
     Active: active (exited) since Mon 2026-02-23 06:36:04 UTC; 3h 34min ago
       Docs: man:bootctl(1)
   Main PID: 717 (code=exited, status=0/SUCCESS)
        CPU: 18ms

Feb 23 06:36:04 localhost systemd[1]: Starting Automatic Boot Loader Update...
Feb 23 06:36:04 localhost systemd[1]: Finished Automatic Boot Loader Update.

○ systemd-firstboot.service - First Boot Wizard
     Loaded: loaded (/usr/lib/systemd/system/systemd-firstboot.service; static)
     Active: inactive (dead)
  Condition: start condition failed at Mon 2026-02-23 06:36:02 UTC; 3h 35min ago
       Docs: man:systemd-firstboot(1)

Feb 23 06:36:02 localhost systemd[1]: First Boot Wizard was skipped because of an unmet condition check (ConditionFirstBoot=yes).

○ systemd-fsck-root.service - File System Check on Root Device
     Loaded: loaded (/usr/lib/systemd/system/systemd-fsck-root.service; static)
     Active: inactive (dead) since Mon 2026-02-23 06:36:02 UTC; 3h 35min ago
   Duration: 1.533s
       Docs: man:systemd-fsck-root.service(8)
   Main PID: 509 (code=exited, status=0/SUCCESS)
        CPU: 16ms

Feb 23 06:36:01 localhost systemd[1]: Starting File System Check on /dev/disk/by-uuid/a3dd82de-ffc6-4652-88b9-80e003b8f20a...
Feb 23 06:36:01 localhost systemd-fsck[511]: /usr/sbin/fsck.xfs: XFS file system.
Feb 23 06:36:01 localhost systemd[1]: Finished File System Check on /dev/disk/by-uuid/a3dd82de-ffc6-4652-88b9-80e003b8f20a.

● systemd-fsck@dev-disk-by\x2duuid-7B77\x2d95E7.service - File System Check on /dev/disk/by-uuid/7B77-95E7
     Loaded: loaded (/usr/lib/systemd/system/systemd-fsck@.service; static)
     Active: active (exited) since Mon 2026-02-23 06:36:03 UTC; 3h 35min ago
       Docs: man:systemd-fsck@.service(8)
   Main PID: 674 (code=exited, status=0/SUCCESS)
        CPU: 20ms

Feb 23 06:36:03 localhost systemd[1]: Starting File System Check on /dev/disk/by-uuid/7B77-95E7...
Feb 23 06:36:03 localhost systemd-fsck[679]: fsck.fat 4.2 (2021-01-31)
Feb 23 06:36:03 localhost systemd-fsck[679]: /dev/vda2: 12 files, 1782/51145 clusters
Feb 23 06:36:03 localhost systemd[1]: Finished File System Check on /dev/disk/by-uuid/7B77-95E7.

● systemd-fsck@dev-vda2.service - File System Check on /dev/vda2
     Loaded: loaded (/usr/lib/systemd/system/systemd-fsck@.service; static)
     Active: active (exited) since Mon 2026-02-23 06:36:04 UTC; 3h 34min ago
       Docs: man:systemd-fsck@.service(8)
   Main PID: 720 (code=exited, status=0/SUCCESS)
        CPU: 15ms

Feb 23 06:36:04 localhost systemd[1]: Starting File System Check on /dev/vda2...
Feb 23 06:36:04 localhost systemd[1]: Finished File System Check on /dev/vda2.

● systemd-hostnamed.service - Hostname Service
     Loaded: loaded (/usr/lib/systemd/system/systemd-hostnamed.service; static)
    Drop-In: /usr/lib/systemd/system/systemd-hostnamed.service.d
             └─disable-privatedevices.conf
     Active: active (running) since Mon 2026-02-23 10:10:40 UTC; 22s ago
       Docs: man:systemd-hostnamed.service(8)
             man:hostname(5)
             man:machine-info(5)
             man:org.freedesktop.resolve1(5)
   Main PID: 334843 (systemd-hostnam)
         IO: 4.0K read, 0B written
      Tasks: 1 (limit: 100220)
     Memory: 2.5M
        CPU: 116ms
     CGroup: /system.slice/systemd-hostnamed.service
             └─334843 /usr/lib/systemd/systemd-hostnamed

Feb 23 10:10:40 np0005626466.localdomain systemd[1]: Starting Hostname Service...
Feb 23 10:10:40 np0005626466.localdomain systemd[1]: Started Hostname Service.

● systemd-hwdb-update.service - Rebuild Hardware Database
     Loaded: loaded (/usr/lib/systemd/system/systemd-hwdb-update.service; static)
     Active: active (exited) since Mon 2026-02-23 06:36:03 UTC; 3h 35min ago
       Docs: man:hwdb(7)
             man:systemd-hwdb(8)
   Main PID: 625 (code=exited, status=0/SUCCESS)
        CPU: 451ms

Feb 23 06:36:02 localhost systemd[1]: Starting Rebuild Hardware Database...
Feb 23 06:36:03 localhost systemd[1]: Finished Rebuild Hardware Database.

○ systemd-initctl.service - initctl Compatibility Daemon
     Loaded: loaded (/usr/lib/systemd/system/systemd-initctl.service; static)
     Active: inactive (dead)
TriggeredBy: ● systemd-initctl.socket
       Docs: man:systemd-initctl.service(8)

● systemd-journal-catalog-update.service - Rebuild Journal Catalog
     Loaded: loaded (/usr/lib/systemd/system/systemd-journal-catalog-update.service; static)
     Active: active (exited) since Mon 2026-02-23 06:36:04 UTC; 3h 34min ago
       Docs: man:systemd-journald.service(8)
             man:journald.conf(5)
   Main PID: 725 (code=exited, status=0/SUCCESS)
        CPU: 24ms

Feb 23 06:36:04 localhost systemd[1]: Starting Rebuild Journal Catalog...
Feb 23 06:36:04 localhost systemd[1]: Finished Rebuild Journal Catalog.

● systemd-journal-flush.service - Flush Journal to Persistent Storage
     Loaded: loaded (/usr/lib/systemd/system/systemd-journal-flush.service; static)
     Active: active (exited) since Mon 2026-02-23 06:36:02 UTC; 3h 35min ago
       Docs: man:systemd-journald.service(8)
             man:journald.conf(5)
   Main PID: 626 (code=exited, status=0/SUCCESS)
        CPU: 8ms

Feb 23 06:36:02 localhost systemd[1]: Starting Flush Journal to Persistent Storage...
Feb 23 06:36:02 localhost systemd[1]: Finished Flush Journal to Persistent Storage.

● systemd-journald.service - Journal Service
     Loaded: loaded (/usr/lib/systemd/system/systemd-journald.service; static)
     Active: active (running) since Mon 2026-02-23 07:50:07 UTC; 2h 20min ago
TriggeredBy: ● systemd-journald-dev-log.socket
             ● systemd-journald.socket
       Docs: man:systemd-journald.service(8)
             man:journald.conf(5)
   Main PID: 47881 (systemd-journal)
     Status: "Processing requests..."
         IO: 0B read, 0B written
      Tasks: 1 (limit: 100220)
     Memory: 103.8M
        CPU: 15.682s
     CGroup: /system.slice/systemd-journald.service
             └─47881 /usr/lib/systemd/systemd-journald

Feb 23 09:28:10 np0005626466.localdomain systemd-journald[47881]: Field hash table of /run/log/journal/c0212a8b024a111cfc61293864f36c87/system.journal has a fill level at 76.3 (254 of 333 items), suggesting rotation.
Feb 23 09:28:10 np0005626466.localdomain systemd-journald[47881]: /run/log/journal/c0212a8b024a111cfc61293864f36c87/system.journal: Journal header limits reached or header out-of-date, rotating.
Feb 23 09:36:59 np0005626466.localdomain systemd-journald[47881]: Field hash table of /run/log/journal/c0212a8b024a111cfc61293864f36c87/system.journal has a fill level at 75.1 (250 of 333 items), suggesting rotation.
Feb 23 09:36:59 np0005626466.localdomain systemd-journald[47881]: /run/log/journal/c0212a8b024a111cfc61293864f36c87/system.journal: Journal header limits reached or header out-of-date, rotating.
Feb 23 09:38:59 np0005626466.localdomain systemd-journald[47881]: Field hash table of /run/log/journal/c0212a8b024a111cfc61293864f36c87/system.journal has a fill level at 105.4 (351 of 333 items), suggesting rotation.
Feb 23 09:38:59 np0005626466.localdomain systemd-journald[47881]: /run/log/journal/c0212a8b024a111cfc61293864f36c87/system.journal: Journal header limits reached or header out-of-date, rotating.
Feb 23 09:42:32 np0005626466.localdomain systemd-journald[47881]: Field hash table of /run/log/journal/c0212a8b024a111cfc61293864f36c87/system.journal has a fill level at 80.5 (268 of 333 items), suggesting rotation.
Feb 23 09:42:32 np0005626466.localdomain systemd-journald[47881]: /run/log/journal/c0212a8b024a111cfc61293864f36c87/system.journal: Journal header limits reached or header out-of-date, rotating.
Feb 23 10:09:23 np0005626466.localdomain systemd-journald[47881]: Data hash table of /run/log/journal/c0212a8b024a111cfc61293864f36c87/system.journal has a fill level at 75.0 (53725 of 71630 items, 25165824 file size, 468 bytes per hash table item), suggesting rotation.
Feb 23 10:09:23 np0005626466.localdomain systemd-journald[47881]: /run/log/journal/c0212a8b024a111cfc61293864f36c87/system.journal: Journal header limits reached or header out-of-date, rotating.

● systemd-logind.service - User Login Management
     Loaded: loaded (/usr/lib/systemd/system/systemd-logind.service; static)
    Drop-In: /usr/lib/systemd/system/systemd-logind.service.d
             └─10-grub2-logind-service.conf
     Active: active (running) since Mon 2026-02-23 06:36:04 UTC; 3h 34min ago
       Docs: man:sd-login(3)
             man:systemd-logind.service(8)
             man:logind.conf(5)
             man:org.freedesktop.login1(5)
   Main PID: 760 (systemd-logind)
     Status: "Processing requests..."
         IO: 364.0K read, 0B written
      Tasks: 1 (limit: 100220)
     Memory: 6.4M
        CPU: 7.573s
     CGroup: /system.slice/systemd-logind.service
             └─760 /usr/lib/systemd/systemd-logind

Feb 23 10:09:28 np0005626466.localdomain systemd-logind[760]: New session 81 of user zuul.
Feb 23 10:09:28 np0005626466.localdomain systemd-logind[760]: Session 81 logged out. Waiting for processes to exit.
Feb 23 10:09:28 np0005626466.localdomain systemd-logind[760]: Removed session 81.
Feb 23 10:09:29 np0005626466.localdomain systemd-logind[760]: New session 82 of user zuul.
Feb 23 10:09:29 np0005626466.localdomain systemd-logind[760]: Session 82 logged out. Waiting for processes to exit.
Feb 23 10:09:29 np0005626466.localdomain systemd-logind[760]: Removed session 82.
Feb 23 10:09:29 np0005626466.localdomain systemd-logind[760]: New session 83 of user zuul.
Feb 23 10:09:29 np0005626466.localdomain systemd-logind[760]: Session 83 logged out. Waiting for processes to exit.
Feb 23 10:09:29 np0005626466.localdomain systemd-logind[760]: Removed session 83.
Feb 23 10:10:20 np0005626466.localdomain systemd-logind[760]: New session 84 of user zuul.

○ systemd-machine-id-commit.service - Commit a transient machine-id on disk
     Loaded: loaded (/usr/lib/systemd/system/systemd-machine-id-commit.service; static)
     Active: inactive (dead)
  Condition: start condition failed at Mon 2026-02-23 06:36:04 UTC; 3h 34min ago
       Docs: man:systemd-machine-id-commit.service(8)

Feb 23 06:36:04 localhost systemd[1]: Commit a transient machine-id on disk was skipped because of an unmet condition check (ConditionPathIsMountPoint=/etc/machine-id).

● systemd-machined.service - Virtual Machine and Container Registration Service
     Loaded: loaded (/usr/lib/systemd/system/systemd-machined.service; static)
     Active: active (running) since Mon 2026-02-23 09:24:08 UTC; 46min ago
       Docs: man:systemd-machined.service(8)
             man:org.freedesktop.machine1(5)
   Main PID: 205757 (systemd-machine)
     Status: "Processing requests..."
         IO: 0B read, 0B written
      Tasks: 1 (limit: 100220)
     Memory: 1.3M
        CPU: 1.895s
     CGroup: /system.slice/systemd-machined.service
             └─205757 /usr/lib/systemd/systemd-machined

Feb 23 09:53:33 np0005626466.localdomain systemd-machined[205757]: New machine qemu-1-instance-00000007.
Feb 23 09:53:49 np0005626466.localdomain systemd-machined[205757]: Machine qemu-1-instance-00000007 terminated.
Feb 23 09:54:06 np0005626466.localdomain systemd-machined[205757]: New machine qemu-2-instance-00000007.
Feb 23 09:54:20 np0005626466.localdomain systemd-machined[205757]: Machine qemu-2-instance-00000007 terminated.
Feb 23 09:54:23 np0005626466.localdomain systemd-machined[205757]: New machine qemu-3-instance-00000008.
Feb 23 09:54:35 np0005626466.localdomain systemd-machined[205757]: Machine qemu-3-instance-00000008 terminated.
Feb 23 09:55:46 np0005626466.localdomain systemd-machined[205757]: New machine qemu-4-instance-0000000b.
Feb 23 09:55:56 np0005626466.localdomain systemd-machined[205757]: Machine qemu-4-instance-0000000b terminated.
Feb 23 09:59:30 np0005626466.localdomain systemd-machined[205757]: New machine qemu-5-instance-0000000c.
Feb 23 10:00:17 np0005626466.localdomain systemd-machined[205757]: Machine qemu-5-instance-0000000c terminated.

● systemd-modules-load.service - Load Kernel Modules
     Loaded: loaded (/usr/lib/systemd/system/systemd-modules-load.service; static)
     Active: active (exited) since Mon 2026-02-23 09:25:54 UTC; 45min ago
       Docs: man:systemd-modules-load.service(8)
             man:modules-load.d(5)
   Main PID: 216518 (code=exited, status=0/SUCCESS)
        CPU: 11ms

Feb 23 09:25:54 np0005626466.localdomain systemd[1]: Starting Load Kernel Modules...
Feb 23 09:25:54 np0005626466.localdomain systemd-modules-load[216518]: Module 'msr' is built in
Feb 23 09:25:54 np0005626466.localdomain systemd[1]: Finished Load Kernel Modules.

● systemd-network-generator.service - Generate network units from Kernel command line
     Loaded: loaded (/usr/lib/systemd/system/systemd-network-generator.service; enabled; preset: enabled)
     Active: active (exited) since Mon 2026-02-23 06:36:02 UTC; 3h 35min ago
       Docs: man:systemd-network-generator.service(8)
   Main PID: 619 (code=exited, status=0/SUCCESS)
        CPU: 9ms

Feb 23 06:36:02 localhostUnit systemd-networkd-wait-online.service could not be found.
 systemd[1]: Finished Generate network units from Kernel command line.

○ systemd-pcrphase-initrd.service - TPM2 PCR Barrier (initrd)
     Loaded: loaded (/usr/lib/systemd/system/systemd-pcrphase-initrd.service; static)
     Active: inactive (dead)
       Docs: man:systemd-pcrphase-initrd.service(8)

○ systemd-pcrphase-sysinit.service - TPM2 PCR Barrier (Initialization)
     Loaded: loaded (/usr/lib/systemd/system/systemd-pcrphase-sysinit.service; static)
     Active: inactive (dead)
  Condition: start condition failed at Mon 2026-02-23 06:36:04 UTC; 3h 34min ago
       Docs: man:systemd-pcrphase-sysinit.service(8)

Feb 23 06:36:04 localhost systemd[1]: TPM2 PCR Barrier (Initialization) was skipped because of an unmet condition check (ConditionPathExists=/sys/firmware/efi/efivars/StubPcrKernelImage-4a67b082-0a4c-41cf-b6c7-440b29bb8c4f).

○ systemd-pcrphase.service - TPM2 PCR Barrier (User)
     Loaded: loaded (/usr/lib/systemd/system/systemd-pcrphase.service; static)
     Active: inactive (dead)
  Condition: start condition failed at Mon 2026-02-23 06:36:05 UTC; 3h 34min ago
       Docs: man:systemd-pcrphase.service(8)

Feb 23 06:36:05 np0005626466.novalocal systemd[1]: TPM2 PCR Barrier (User) was skipped because of an unmet condition check (ConditionPathExists=/sys/firmware/efi/efivars/StubPcrKernelImage-4a67b082-0a4c-41cf-b6c7-440b29bb8c4f).

○ systemd-quotacheck.service - File System Quota Check
     Loaded: loaded (/usr/lib/systemd/system/systemd-quotacheck.service; static)
     Active: inactive (dead)
       Docs: man:systemd-quotacheck.service(8)

● systemd-random-seed.service - Load/Save Random Seed
     Loaded: loaded (/usr/lib/systemd/system/systemd-random-seed.service; static)
     Active: active (exited) since Mon 2026-02-23 06:36:02 UTC; 3h 35min ago
       Docs: man:systemd-random-seed.service(8)
             man:random(4)
   Main PID: 627 (code=exited, status=0/SUCCESS)
        CPU: 10ms

Feb 23 06:36:02 localhost systemd[1]: Starting Load/Save Random Seed...
Feb 23 06:36:02 localhost systemd[1]: Finished Load/Save Random Seed.

● systemd-remount-fs.service - Remount Root and Kernel File Systems
     Loaded: loaded (/usr/lib/systemd/system/systemd-remount-fs.service; enabled-runtime; preset: disabled)
     Active: active (exited) since Mon 2026-02-23 06:36:02 UTC; 3h 35min ago
       Docs: man:systemd-remount-fs.service(8)
             https://www.freedesktop.org/wiki/Software/systemd/APIFileSystems
   Main PID: 620 (code=exited, status=0/SUCCESS)
        CPU: 21ms

Feb 23 06:36:02 localhost systemd[1]: Finished Remount Root and Kernel File Systems.

○ systemd-repart.service - Repartition Root Disk
     Loaded: loaded (/usr/lib/systemd/system/systemd-repart.service; static)
     Active: inactive (dead)
  Condition: start condition failed at Mon 2026-02-23 06:36:02 UTC; 3h 35min ago
       Docs: man:systemd-repart.service(8)

○ systemd-rfkill.service - Load/Save RF Kill Switch Status
     Loaded: loaded (/usr/lib/systemd/system/systemd-rfkill.service; static)
     Active: inactive (dead)
TriggeredBy: ● systemd-rfkill.socket
       Docs: man:systemd-rfkill.service(8)

● systemd-sysctl.service - Apply Kernel Variables
     Loaded: loaded (/usr/lib/systemd/system/systemd-sysctl.service; static)
     Active: active (exited) since Mon 2026-02-23 09:10:59 UTC; 1h 0min ago
       Docs: man:systemd-sysctl.service(8)
             man:sysctl.d(5)
   Main PID: 126510 (code=exited, status=0/SUCCESS)
        CPU: 21ms

Feb 23 09:10:59 np0005626466.localdomain systemd[1]: Starting Apply Kernel Variables...
Feb 23 09:10:59 np0005626466.localdomain systemd[1]: Finished Apply Kernel Variables.

○ systemd-sysext.service - Merge System Extension Images into /usr/ and /opt/
     Loaded: loaded (/usr/lib/systemd/system/systemd-sysext.service; disabled; preset: disabled)
     Active: inactive (dead)
       Docs: man:systemd-sysext.service(8)

● systemd-sysusers.service - Create System Users
     Loaded: loaded (/usr/lib/systemd/system/systemd-sysusers.service; static)
     Active: active (exited) since MonUnit systemd-timesyncd.service could not be found.
Unit systemd-tmpfiles.service could not be found.
 2026-02-23 06:36:03 UTC; 3h 35min ago
       Docs: man:sysusers.d(5)
             man:systemd-sysusers.service(8)
   Main PID: 630 (code=exited, status=0/SUCCESS)
        CPU: 24ms

Feb 23 06:36:02 localhost systemd[1]: Starting Create System Users...
Feb 23 06:36:03 localhost systemd-sysusers[630]: Creating group 'sgx' with GID 989.
Feb 23 06:36:03 localhost systemd-sysusers[630]: Creating group 'systemd-oom' with GID 988.
Feb 23 06:36:03 localhost systemd-sysusers[630]: Creating user 'systemd-oom' (systemd Userspace OOM Killer) with UID 988 and GID 988.
Feb 23 06:36:03 localhost systemd[1]: Finished Create System Users.

○ systemd-tmpfiles-clean.service - Cleanup of Temporary Directories
     Loaded: loaded (/usr/lib/systemd/system/systemd-tmpfiles-clean.service; static)
     Active: inactive (dead) since Mon 2026-02-23 06:51:10 UTC; 3h 19min ago
TriggeredBy: ● systemd-tmpfiles-clean.timer
       Docs: man:tmpfiles.d(5)
             man:systemd-tmpfiles(8)
   Main PID: 6221 (code=exited, status=0/SUCCESS)
        CPU: 32ms

Feb 23 06:51:10 np0005626466.novalocal systemd[1]: Starting Cleanup of Temporary Directories...
Feb 23 06:51:10 np0005626466.novalocal systemd[1]: systemd-tmpfiles-clean.service: Deactivated successfully.
Feb 23 06:51:10 np0005626466.novalocal systemd[1]: Finished Cleanup of Temporary Directories.

● systemd-tmpfiles-setup-dev.service - Create Static Device Nodes in /dev
     Loaded: loaded (/usr/lib/systemd/system/systemd-tmpfiles-setup-dev.service; static)
     Active: active (exited) since Mon 2026-02-23 06:36:03 UTC; 3h 35min ago
       Docs: man:tmpfiles.d(5)
             man:systemd-tmpfiles(8)
   Main PID: 632 (code=exited, status=0/SUCCESS)
        CPU: 32ms

Feb 23 06:36:03 localhost systemd[1]: Starting Create Static Device Nodes in /dev...
Feb 23 06:36:03 localhost systemd[1]: Finished Create Static Device Nodes in /dev.

● systemd-tmpfiles-setup.service - Create Volatile Files and Directories
     Loaded: loaded (/usr/lib/systemd/system/systemd-tmpfiles-setup.service; static)
     Active: active (exited) since Mon 2026-02-23 06:36:04 UTC; 3h 34min ago
       Docs: man:tmpfiles.d(5)
             man:systemd-tmpfiles(8)
   Main PID: 718 (code=exited, status=0/SUCCESS)
        CPU: 77ms

Feb 23 06:36:04 localhost systemd[1]: Starting Create Volatile Files and Directories...
Feb 23 06:36:04 localhost systemd[1]: Finished Create Volatile Files and Directories.

● systemd-udev-settle.service - Wait for udev To Complete Device Initialization
     Loaded: loaded (/usr/lib/systemd/system/systemd-udev-settle.service; static)
     Active: active (exited) since Mon 2026-02-23 09:25:48 UTC; 45min ago
       Docs: man:systemd-udev-settle.service(8)
   Main PID: 215854 (code=exited, status=0/SUCCESS)
        CPU: 18ms

Feb 23 09:25:48 np0005626466.localdomain systemd[1]: Starting Wait for udev To Complete Device Initialization...
Feb 23 09:25:48 np0005626466.localdomain udevadm[215854]: systemd-udev-settle.service is deprecated. Please fix multipathd.service not to pull it in.
Feb 23 09:25:48 np0005626466.localdomain systemd[1]: Finished Wait for udev To Complete Device Initialization.

● systemd-udev-trigger.service - Coldplug All udev Devices
     Loaded: loaded (/usr/lib/systemd/system/systemd-udev-trigger.service; static)
    Drop-In: /usr/lib/systemd/system/systemd-udev-trigger.service.d
             └─systemd-udev-trigger-no-reload.conf
     Active: active (exited) since Mon 2026-02-23 06:36:03 UTC; 3h 35min ago
       Docs: man:udev(7)
             man:systemd-udevd.service(8)
   Main PID: 622 (code=exited, status=0/SUCCESS)
        CPU: 96ms

Feb 23 06:36:03 localhost systemd[1]: Finished Coldplug All udev Devices.

● systemd-udevd.service - Rule-based Manager for Device Events and Files
     Loaded: loaded (/usr/lib/systemd/system/systemd-udevd.service; static)
     Active: active (running) since Mon 2026-02-23 07:50:07 UTC; 2h 20min ago
TriggeredBy: ● systemd-udevd-control.socket
             ● systemd-udevd-kernel.socket
       Docs: man:systemd-udevd.service(8)
             man:udev(7)
   Main PID: 47889 (systemd-udevd)
     Status: "Processing with 32 children at max"
         IO: 13.6M read, 0B written
      Tasks: 2
     Memory: 16.2M
        CPU: 2.714s
     CGroup: /system.slice/systemd-udevd.service
             └─udev
               ├─ 47889 /usr/lib/systemd/systemd-udevd
               └─339919 /usr/lib/systemd/systemd-udevd

Feb 23 09:59:39 np0005626466.localdomain systemd-udevd[323535]: Network interface NamePolicy= disabled on kernel command line.
Feb 23 09:59:52 np0005626466.localdomain systemd-udevd[324685]: Network interface NamePolicy= disabled on kernel command line.
Feb 23 10:00:19 np0005626466.localdomain systemd-udevd[325443]: Network interface NamePolicy= disabled on kernel command line.
Feb 23 10:00:29 np0005626466.localdomain systemd-udevd[325972]: Network interface NamePolicy= disabled on kernel command line.
Feb 23 10:00:53 np0005626466.localdomain systemd-udevd[326199]: Network interface NamePolicy= disabled on kernel command line.
Feb 23 10:02:41 np0005626466.localdomain systemd-udevd[327406]: Network interface NamePolicy= disabled on kernel command line.
Feb 23 10:10:28 np0005626466.localdomain lvm[331998]: PV /dev/loop3 online, VG ceph_vg0 is complete.
Feb 23 10:10:28 np0005626466.localdomain lvm[331998]: VG ceph_vg0 finished
Feb 23 10:10:28 np0005626466.localdomain lvm[332009]: PV /dev/loop4 online, VG ceph_vg1 is complete.
Feb 23 10:10:28 np0005626466.localdomain lvm[332009]: VG ceph_vg1 finished

● systemd-update-done.service - Update is Completed
     Loaded: loaded (/usr/lib/systemd/system/systemd-update-done.service; static)
     Active: active (exited) since Mon 2026-02-23 06:36:04 UTC; 3h 34min ago
       Docs: man:systemd-update-done.service(8)
   Main PID: 749 (code=exited, status=0/SUCCESS)
        CPU: 14ms

Feb 23 06:36:04 localhost systemd[1]: Starting Update is Completed...
Feb 23 06:36:04 localhost systemd[1]: Finished Update is Completed.

○ systemd-update-utmp-runlevel.service - Record Runlevel Change in UTMP
     Loaded: loaded (/usr/lib/systemd/system/systemd-update-utmp-runlevel.service; static)
     Active: inactive (dead) since Mon 2026-02-23 06:36:09 UTC; 3h 34min ago
       Docs: man:systemd-update-utmp-runlevel.service(8)
             man:utmp(5)
   Main PID: 1147 (code=exited, status=0/SUCCESS)
        CPU: 8ms

Feb 23 06:36:09 np0005626466.novalocal systemd[1]: Starting Record Runlevel Change in UTMP...
Feb 23 06:36:09 np0005626466.novalocal systemd[1]: systemd-update-utmp-runlevel.service: Deactivated successfully.
Feb 23 06:36:09 np0005626466.novalocal systemd[1]: Finished Record Runlevel Change in UTMP.

● systemd-update-utmp.service - Record System Boot/Shutdown in UTMP
     Loaded: loaded (/usr/lib/systemd/system/systemd-update-utmp.service; static)
     Active: active (exited) since Mon 2026-02-23 06:36:04 UTC; 3h 34min ago
       Docs: man:systemd-update-utmp.service(8)
             man:utmp(5)
   Main PID: 748 (code=exited, status=0/SUCCESS)
        CPU: 9ms

Feb 23 06:36:04 localhost systemd[1]: Starting Record System Boot/Shutdown in UTMP...
Feb 23 06:36:04 localhost systemd[1]: Finished Record System Boot/Shutdown in UTMP.

● systemd-user-sessions.service - Permit User Sessions
     Loaded: loaded (/usr/lib/systemd/system/systemd-user-sessions.service; static)
     Active: active (exited) since Mon 2026-02-23 06:36:09 UTC; 3h 34min ago
       Docs: man:systemd-user-sessions.service(8)
   Main PID: 1134 (code=exited, status=0/SUCCESS)
        CPU: 10ms

Feb 23 06:36:09 np0005626466.novalocal systemd[1]: Starting Permit User Sessions...
Feb 23 06:36:09 np0005626466.novalocal systemd[1]: Finished Permit User Sessions.

○ systemd-vconsole-setup.service - Setup Virtual Console
     Loaded: loaded (/usr/lib/systemd/system/systemd-vconsole-setup.service; static)
     Active: inactive (dead) since Mon 2026-02-23 06:36:01 UTC; 3h 35min ago
   Duration: 1.591s
       Docs: man:systemd-vconsole-setup.service(8)
             man:vconsole.conf(5)
   Main PID: 288 (code=exited, status=0/SUCCESS)
        CPU: 7Unit tlp.service could not be found.
Unit tripleo_neutron_dhcp.service could not be found.
Unit tripleo_neutron_l3_agent.service could not be found.
Unit tripleo_neutron_ovs_agent.service could not be found.
Unit tripleo_nova_compute.service could not be found.
Unit tripleo_nova_virtqemud.service could not be found.
Unit tripleo_nova_virtqemud_recover.service could not be found.
6ms

Feb 23 06:36:00 localhost systemd[1]: Finished Setup Virtual Console.
Feb 23 06:36:01 localhost systemd[1]: systemd-vconsole-setup.service: Deactivated successfully.
Feb 23 06:36:01 localhost systemd[1]: Stopped Setup Virtual Console.
Notice: journal has been rotated since unit was started, output may be incomplete.

● tripleo-container-shutdown.service - TripleO Container Shutdown
     Loaded: loaded (/usr/lib/systemd/system/tripleo-container-shutdown.service; enabled; preset: enabled)
     Active: active (exited) since Mon 2026-02-23 07:52:04 UTC; 2h 18min ago
       Docs: https://docs.openstack.org/tripleo-docs/
   Main PID: 54032 (code=exited, status=0/SUCCESS)
        CPU: 3ms

Feb 23 07:52:04 np0005626466.localdomain systemd[1]: Starting TripleO Container Shutdown...
Feb 23 07:52:04 np0005626466.localdomain systemd[1]: Finished TripleO Container Shutdown.

● tuned.service - Dynamic System Tuning Daemon
     Loaded: loaded (/usr/lib/systemd/system/tuned.service; enabled; preset: enabled)
     Active: active (running) since Mon 2026-02-23 09:10:43 UTC; 1h 0min ago
       Docs: man:tuned(8)
             man:tuned.conf(5)
             man:tuned-adm(8)
   Main PID: 125602 (tuned)
         IO: 0B read, 0B written
      Tasks: 4 (limit: 100220)
     Memory: 15.7M
        CPU: 1.633s
     CGroup: /system.slice/tuned.service
             └─125602 /usr/bin/python3 -Es /usr/sbin/tuned -l -P

Feb 23 09:10:42 np0005626466.localdomain systemd[1]: Starting Dynamic System Tuning Daemon...
Feb 23 09:10:43 np0005626466.localdomain systemd[1]: Started Dynamic System Tuning Daemon.

○ unbound-anchor.service - update of the root trust anchor for DNSSEC validation in unbound
     Loaded: loaded (/usr/lib/systemd/system/unbound-anchor.service; static)
     Active: inactive (dead)
TriggeredBy: ● unbound-anchor.timer
       Docs: man:unbound-anchor(8)

● user-runtime-dir@1000.service - User Runtime Directory /run/user/1000
     Loaded: loaded (/usr/lib/systemd/system/user-runtime-dir@.service; static)
     Active: active (exited) since Mon 2026-02-23 06:36:32 UTC; 3h 34min ago
       Docs: man:user@.service(5)
   Main PID: 4177 (code=exited, status=0/SUCCESS)
        CPU: 11ms

Feb 23 06:36:32 np0005626466.novalocal systemd[1]: Starting User Runtime Directory /run/user/1000...
Feb 23 06:36:32 np0005626466.novalocal systemd[1]: Finished User Runtime Directory /run/user/1000.

● user-runtime-dir@1002.service - User Runtime Directory /run/user/1002
     Loaded: loaded (/usr/lib/systemd/system/user-runtime-dir@.service; static)
     Active: active (exited) since Mon 2026-02-23 07:37:14 UTC; 2h 33min ago
       Docs: man:user@.service(5)
   Main PID: 26357 (code=exited, status=0/SUCCESS)
        CPU: 12ms

Feb 23 07:37:14 np0005626466.localdomain systemd[1]: Starting User Runtime Directory /run/user/1002...
Feb 23 07:37:14 np0005626466.localdomain systemd[1]: Finished User Runtime Directory /run/user/1002.

● user@1000.service - User Manager for UID 1000
     Loaded: loaded (/usr/lib/systemd/system/user@.service; static)
    Drop-In: /usr/lib/systemd/system/user@.service.d
             └─10-login-barrier.conf
     Active: active (running) since Mon 2026-02-23 06:36:32 UTC; 3h 34min ago
       Docs: man:user@.service(5)
   Main PID: 4178 (systemd)
     Status: "Ready."
         IO: 12.0K read, 8.0K written
      Tasks: 5
     Memory: 6.8M
        CPU: 17.358s
     CGroup: /user.slice/user-1000.slice/user@1000.service
             ├─app.slice
             │ └─dbus-broker.service
             │   ├─14084 /usr/bin/dbus-broker-launch --scope user
             │   └─14097 dbus-broker --log 4 --controller 9 --machine-id c0212a8b024a111cfc61293864f36c87 --max-bytes 100000000000000 --max-fds 25000000000000 --max-matches 5000000000
             ├─init.scope
             │ ├─4178 /usr/lib/systemd/systemd --user
             │ └─4180 "(sd-pam)"
             └─user.slice
               └─podman-pause-de66dbe9.scope
                 └─13932 podman

Feb 23 06:54:32 np0005626466.novalocal dbus-broker-lau[14084]: Ready
Feb 23 06:54:32 np0005626466.novalocal systemd[4178]: selinux: avc:  op=load_policy lsm=selinux seqno=3 res=1
Feb 23 06:54:32 np0005626466.novalocal systemd[4178]: Created slice Slice /user.
Feb 23 06:54:32 np0005626466.novalocal systemd[4178]: podman-13916.scope: unit configures an IP firewall, but not running as root.
Feb 23 06:54:32 np0005626466.novalocal systemd[4178]: (This warning is only shown for the first unit using IP firewalling.)
Feb 23 06:54:32 np0005626466.novalocal systemd[4178]: Started podman-13916.scope.
Feb 23 06:54:32 np0005626466.novalocal systemd[4178]: Started podman-pause-de66dbe9.scope.
Feb 23 07:49:57 np0005626466.localdomain dbus-broker-launch[14084]: Noticed file-system modification, trigger reload.
Feb 23 07:49:57 np0005626466.localdomain dbus-broker-launch[14084]: Policy to allow eavesdropping in /usr/share/dbus-1/session.conf +31: Eavesdropping is deprecated and ignored
Feb 23 07:49:57 np0005626466.localdomain dbus-broker-launch[14084]: Policy to allow eavesdropping in /usr/share/dbus-1/session.conf +33: Eavesdropping is deprecated and ignored

● user@1002.service - User Manager for UID 1002
     Loaded: loaded (/usr/lib/systemd/system/user@.service; static)
    Drop-In: /usr/lib/systemd/system/user@.service.d
             └─10-login-barrier.conf
     Active: active (running) since Mon 2026-02-23 07:37:14 UTC; 2h 33min ago
       Docs: man:user@.service(5)
   Main PID: 26358 (systemd)
     Status: "Ready."
         IO: 0B read, 7.0K written
      Tasks: 2
     Memory: 5.5M
        CPU: 15.545s
     CGroup: /user.slice/user-1002.slice/user@1002.service
             └─init.scope
               ├─26358 /usr/lib/systemd/systemd --user
               └─26360 "(sd-pam)"

Feb 23 07:37:14 np0005626466.localdomain systemd[26358]: Finished Create User's Volatile Files and Directories.
Feb 23 07:37:14 np0005626466.localdomain systemd[26358]: Reached target Basic System.
Feb 23 07:37:14 np0005626466.localdomain systemd[1]: Started User Manager for UID 1002.
Feb 23 07:37:14 np0005626466.localdomain systemd[26358]: Reached target Main User Target.
Feb 23 07:37:14 np0005626466.localdomain systemd[26358]: Startup finished in 118ms.
Feb 23 07:39:21 np0005626466.localdomain systemd[26358]: Starting Mark boot as successful...
Feb 23 07:39:21 np0005626466.localdomain systemd[26358]: Finished Mark boot as successful.
Feb 23 07:42:55 np0005626466.localdomain systemd[26358]: Created slice User Background Tasks Slice.
Feb 23 07:42:55 np0005626466.localdomain systemd[26358]: Starting Cleanup of User's Temporary Files and Directories...
Feb 23 07:42:55 np0005626466.localdomain systemd[26358]: Finished Cleanup of User's Temporary Files and Directories.

○ virtinterfaced.service - libvirt interface daemon
     Loaded: loaded (/usr/lib/systemd/system/virtinterfaced.service; disabled; preset: disabled)
     Active: inactive (dead)
TriggeredBy: ○ virtinterfaced.socket
             ○ virtinterfaced-admin.socket
             ○ virtinterfaced-ro.socket
       Docs: man:virtinterfaced(8)
             https://libvirt.org/

Feb 23 09:37:46 np0005626466.localdomain systemd[1]: /usr/lib/systemd/system/virtinterfaced.service:18: Failed to parse service type, ignoring: notify-reload
Feb 23 09:42:44 np0005626466.localdomain systemd[1]: /usr/lib/systemd/system/virtinterfaced.service:18: Failed to parse service type, ignoring: notify-reload
Feb 23 09:42:44 np0005626466.localdomain systemd[1]: /usr/lib/systemd/system/virtinterfaced.service:18: Failed to parse service type, ignoring: notify-reload
Feb 23 09:44:16 np0005626466.localdomain systemd[1]: /usr/lib/systemd/system/virtinterfaced.service:18: Failed to parse service type, ignoring: notify-reload
Feb 23 09:44:16 np0005626466.localdomain systemd[1]: /usr/lib/systemd/system/virtinterfaced.service:18: Failed to parse service type, ignoring: notify-reload
Feb 23 09:44:30 np0005626466.localdomain systemd[1]: /usr/lib/systemd/system/virtinterfaced.service:18: Failed to parse service type, ignoring: notify-reload
Feb 23 09:44:30 np0005626466.localdomain systemd[1]: /usr/lib/systemd/system/virtinterfaced.service:18: Failed to parse service type, ignoring: notify-reload
Feb 23 09:49:12 np0005626466.localdomain systemd[1]: /usr/lib/systemd/system/virtinterfaced.service:18: Failed to parse service type, ignoring: notify-reload
Feb 23 09:49:25 np0005626466.localdomain systemd[1]: /usr/lib/systemd/system/virtinterfaced.service:18: Failed to parse service type, ignoring: notify-reload
Feb 23 09:49:25 np0005626466.localdomain systemd[1]: /usr/lib/systemd/system/virtinterfaced.service:18: Failed to parse service type, ignoring: notify-reload

○ virtlockd.service - libvirt locking daemon
     Loaded: loaded (/usr/lib/systemd/system/virtlockd.service; disabled; preset: disabled)
     Active: inactive (dead)
TriggeredBy: ● virtlockd.socket
             ○ virtlockd-admin.socket
       Docs: man:virtlockd(8)
             https://libvirt.org/

● virtlogd.service - libvirt logging daemon
     Loaded: loaded (/usr/lib/systemd/system/virtlogd.service; enabled; preset: disabled)
     Active: active (running) since Mon 2026-02-23 09:24:04 UTC; 46min ago
TriggeredBy: ● virtlogd-admin.socket
             ● virtlogd.socket
       Docs: man:virtlogd(8)
             https://libvirt.org/
   Main PID: 205249 (virtlogd)
         IO: 0B read, 428.0K written
      Tasks: 1 (limit: 100220)
     Memory: 2.3M
        CPU: 5.458s
     CGroup: /system.slice/virtlogd.service
             └─205249 /usr/sbin/virtlogd

Feb 23 09:24:04 np0005626466.localdomain systemd[1]: Starting libvirt logging daemon...
Feb 23 09:24:04 np0005626466.localdomain systemd[1]: Started libvirt logging daemon.

○ virtnetworkd.service - libvirt network daemon
     Loaded: loaded (/usr/lib/systemd/system/virtnetworkd.service; disabled; preset: disabled)
     Active: inactive (dead)
TriggeredBy: ○ virtnetworkd.socket
             ○ virtnetworkd-ro.socket
             ○ virtnetworkd-admin.socket
       Docs: man:virtnetworkd(8)
             https://libvirt.org/

Feb 23 09:37:46 np0005626466.localdomain systemd[1]: /usr/lib/systemd/system/virtnetworkd.service:18: Failed to parse service type, ignoring: notify-reload
Feb 23 09:42:44 np0005626466.localdomain systemd[1]: /usr/lib/systemd/system/virtnetworkd.service:18: Failed to parse service type, ignoring: notify-reload
Feb 23 09:42:44 np0005626466.localdomain systemd[1]: /usr/lib/systemd/system/virtnetworkd.service:18: Failed to parse service type, ignoring: notify-reload
Feb 23 09:44:16 np0005626466.localdomain systemd[1]: /usr/lib/systemd/system/virtnetworkd.service:18: Failed to parse service type, ignoring: notify-reload
Feb 23 09:44:16 np0005626466.localdomain systemd[1]: /usr/lib/systemd/system/virtnetworkd.service:18: Failed to parse service type, ignoring: notify-reload
Feb 23 09:44:30 np0005626466.localdomain systemd[1]: /usr/lib/systemd/system/virtnetworkd.service:18: Failed to parse service type, ignoring: notify-reload
Feb 23 09:44:30 np0005626466.localdomain systemd[1]: /usr/lib/systemd/system/virtnetworkd.service:18: Failed to parse service type, ignoring: notify-reload
Feb 23 09:49:12 np0005626466.localdomain systemd[1]: /usr/lib/systemd/system/virtnetworkd.service:18: Failed to parse service type, ignoring: notify-reload
Feb 23 09:49:25 np0005626466.localdomain systemd[1]: /usr/lib/systemd/system/virtnetworkd.service:18: Failed to parse service type, ignoring: notify-reload
Feb 23 09:49:25 np0005626466.localdomain systemd[1]: /usr/lib/systemd/system/virtnetworkd.service:18: Failed to parse service type, ignoring: notify-reload

● virtnodedevd.service - libvirt nodedev daemon
     Loaded: loaded (/usr/lib/systemd/system/virtnodedevd.service; enabled; preset: disabled)
     Active: active (running) since Mon 2026-02-23 09:27:45 UTC; 43min ago
TriggeredBy: ● virtnodedevd-ro.socket
             ● virtnodedevd-admin.socket
             ● virtnodedevd.socket
       Docs: man:virtnodedevd(8)
             https://libvirt.org/
   Main PID: 229338 (virtnodedevd)
         IO: 0B read, 0B written
      Tasks: 20 (limit: 100220)
     Memory: 11.4M
        CPU: 3.527s
     CGroup: /system.slice/virtnodedevd.service
             └─229338 /usr/sbin/virtnodedevd --timeout 120

Feb 23 10:00:53 np0005626466.localdomain virtnodedevd[229338]: ethtool ioctl error on tap51c10725-ff: No such device
Feb 23 10:00:53 np0005626466.localdomain virtnodedevd[229338]: ethtool ioctl error on tap51c10725-ff: No such device
Feb 23 10:02:41 np0005626466.localdomain virtnodedevd[229338]: ethtool ioctl error on tap959298c0-28: No such device
Feb 23 10:02:41 np0005626466.localdomain virtnodedevd[229338]: ethtool ioctl error on tap959298c0-28: No such device
Feb 23 10:02:41 np0005626466.localdomain virtnodedevd[229338]: ethtool ioctl error on tap959298c0-28: No such device
Feb 23 10:02:41 np0005626466.localdomain virtnodedevd[229338]: ethtool ioctl error on tap959298c0-28: No such device
Feb 23 10:02:41 np0005626466.localdomain virtnodedevd[229338]: ethtool ioctl error on tap959298c0-28: No such device
Feb 23 10:02:41 np0005626466.localdomain virtnodedevd[229338]: ethtool ioctl error on tap959298c0-28: No such device
Feb 23 10:02:41 np0005626466.localdomain virtnodedevd[229338]: ethtool ioctl error on tap959298c0-28: No such device
Feb 23 10:02:41 np0005626466.localdomain virtnodedevd[229338]: ethtool ioctl error on tap959298c0-28: No such device

○ virtnwfilterd.service - libvirt nwfilter daemon
     Loaded: loaded (/usr/lib/systemd/system/virtnwfilterd.service; disabled; preset: disabled)
     Active: inactive (dead)
TriggeredBy: ○ virtnwfilterd.socket
             ○ virtnwfilterd-admin.socket
             ○ virtnwfilterd-ro.socket
       Docs: man:virtnwfilterd(8)
             https://libvirt.org/

Feb 23 09:37:46 np0005626466.localdomain systemd[1]: /usr/lib/systemd/system/virtnwfilterd.service:18: Failed to parse service type, ignoring: notify-reload
Feb 23 09:42:44 np0005626466.localdomain systemd[1]: /usr/lib/systemd/system/virtnwfilterd.service:18: Failed to parse service type, ignoring: notify-reload
Feb 23 09:42:44 np0005626466.localdomain systemd[1]: /usr/lib/systemd/system/virtnwfilterd.service:18: Failed to parse service type, ignoring: notify-reload
Feb 23 09:44:16 np0005626466.localdomain systemd[1]: /usr/lib/systemd/system/virtnwfilterd.service:18: Failed to parse service type, ignoring: notify-reload
Feb 23 09:44:16 np0005626466.localdomain systemd[1]: /usr/lib/systemd/system/virtnwfilterd.service:18: Failed to parse service type, ignoring: notify-reload
Feb 23 09:44:30 np0005626466.localdomain systemd[1]: /usr/lib/systemd/system/virtnwfilterd.service:18: Failed to parse service type, ignoring: notify-reload
Feb 23 09:44:30 np0005626466.localdomain systemd[1]: /usr/lib/systemd/system/virtnwfilterd.service:18: Failed to parse service type, ignoring: notify-reload
Feb 23 09:49:12 np0005626466.localdomain systemd[1]: /usr/lib/systemd/system/virtnwfilterd.service:18: Failed to parse service type, ignoring: notify-reload
Feb 23 09:49:25 np0005626466.localdomain systemd[1]: /usr/lib/systemd/system/virtnwfilterd.service:18: Failed to parse service type, ignoring: notify-reload
Feb 23 09:49:25 np0005626466.localdomain systemd[1]: /usr/lib/systemd/system/virtnwfilterd.service:18: Failed to parse service type, ignoring: notify-reload

○ virtproxyd.service - libvirt proxy daemon
     Loaded: loaded (/usr/lib/systemd/system/virtproxyd.service; enabled; preset: disabled)
     Active: inactive (dead) since Mon 2026-02-23 09:26:07 UTC; 44min ago
   Duration: 2min 273ms
TriggeredBy: ● virtproxyd-admin.socket
             ● virtproxyd-ro.socket
             ● virtproxyd.socket
       Docs: man:virtproxyd(8)
             https://libvirt.org/
   Main PID: 205587 (code=exited, status=0/SUCCESS)
        CPU: 49ms

Feb 23 09:37:46 np0005626466.localdomain systemd[1]: /usr/lib/systemd/system/virtproxyd.service:18: Failed to parse service type, ignoring: notify-reload
Feb 23 09:42:44 np0005626466.localdomain systemd[1]: /usr/lib/systemd/system/virtproxyd.service:18: Failed to parse service type, ignoring: notify-reload
Feb 23 09:42:44 np0005626466.localdomain systemd[1]: /usr/lib/systemd/system/virtproxyd.service:18: Failed to parse service type, ignoring: notify-reload
Feb 23 09:44:16 np0005626466.localdomain systemd[1]: /usr/lib/systemd/system/virtproxyd.service:18: Failed to parse service type, ignoring: notify-reload
Feb 23 09:44:16 np0005626466.localdomain systemd[1]: /usr/lib/systemd/system/virtproxyd.service:18: Failed to parse service type, ignoring: notify-reload
Feb 23 09:44:30 np0005626466.localdomain systemd[1]: /usr/lib/systemd/system/virtproxyd.service:18: Failed to parse service type, ignoring: notify-reload
Feb 23 09:44:30 np0005626466.localdomain systemd[1]: /usr/lib/systemd/system/virtproxyd.service:18: Failed to parse service type, ignoring: notify-reload
Feb 23 09:49:12 np0005626466.localdomain systemd[1]: /usr/lib/systemd/system/virtproxyd.service:18: Failed to parse service type, ignoring: notify-reload
Feb 23 09:49:25 np0005626466.localdomain systemd[1]: /usr/lib/systemd/system/virtproxyd.service:18: Failed to parse service type, ignoring: notify-reload
Feb 23 09:49:25 np0005626466.localdomain systemd[1]: /usr/lib/systemd/system/virtproxyd.service:18: Failed to parse service type, ignoring: notify-reload

● virtqemud.service - libvirt QEMU daemon
     Loaded: loaded (/usr/lib/systemd/system/virtqemud.service; enabled; preset: enabled)
     Active: active (running) since Mon 2026-02-23 09:27:42 UTC; 43min ago
TriggeredBy: ● virtqemud.socket
             ● virtqemud-admin.socket
             ● virtqemud-ro.socket
       Docs: man:virtqemud(8)
             https://libvirt.org/
   Main PID: 229010 (virtqemud)
         IO: 24.0K read, 136.0K written
      Tasks: 19 (limit: 32768)
     Memory: 26.8M
        CPU: 4.913s
     CGroup: /system.slice/virtqemud.service
             └─229010 /usr/sbin/virtqemud --timeout 120

Feb 23 09:49:25 np0005626466.localdomain systemd[1]: /usr/lib/systemd/system/virtqemud.service:25: Failed to parse service type, ignoring: notify-reload
Feb 23 09:54:06 np0005626466.localdomain virtqemud[229010]: End of file while reading data: Input/output error
Feb 23 09:54:06 np0005626466.localdomain virtqemud[229010]: End of file while reading data: Input/output error
Feb 23 09:54:30 np0005626466.localdomain virtqemud[229010]: Domain id=3 name='instance-00000008' uuid=78070789-b766-4674-b4e1-8040cbf7346b is tainted: custom-monitor
Feb 23 09:55:56 np0005626466.localdomain virtqemud[229010]: Unable to get XATTR trusted.libvirt.security.ref_selinux on vms/66c5eac8-f6f4-40ae-b09f-54e200c103b8_disk: No such file or directory
Feb 23 09:55:56 np0005626466.localdomain virtqemud[229010]: Unable to get XATTR trusted.libvirt.security.ref_dac on vms/66c5eac8-f6f4-40ae-b09f-54e200c103b8_disk: No such file or directory
Feb 23 10:10:27 np0005626466.localdomain virtqemud[229010]: Failed to connect socket to '/var/run/libvirt/virtnetworkd-sock-ro': No such file or directory
Feb 23 10:10:27 np0005626466.localdomain virtqemud[229010]: Failed to connect socket to '/var/run/libvirt/virtnwfilterd-sock-ro': No such file or directory
Feb 23 10:10:27 np0005626466.localdomain virtqemud[229010]: Failed to connect socket to '/var/run/libvirt/virtstoraged-sock-ro': No such file or directory
Feb 23 10:11:03 np0005626466.localdomain virtqemud[229010]: Failed to connect socket to '/var/run/libvirt/virtstoraged-sock-ro': No such file or directory

● virtsecretd.service - libvirt secret daemon
     Loaded: loaded (/usr/lib/systemd/system/virtsecretd.service; enabled; preset: disabled)
     Active: active (running) since Mon 2026-02-23 09:59:30 UTC; 11min ago
TriggeredBy: ● virtsecretd-ro.socket
             ● virtsecretd-admin.socket
             ● virtsecretd.socket
       Docs: man:virtsecretd(8)
             https://libvirt.org/
   Main PID: 323059 (virtsecretd)
         IO: 0B read, 0B written
      Tasks: 18 (limit: 100220)
     Memory: 7.2M
        CPU: 93ms
     CGroup: /system.slice/virtsecretd.service
             └─323059 /usr/sbin/virtsecretd --timeout 120

Feb 23 09:59:30 np0005626466.localdomain systemd[1]: Started libvirt secretUnit ypbind.service could not be found.
Unit yppasswdd.service could not be found.
Unit ypserv.service could not be found.
Unit ypxfrd.service could not be found.
 daemon.

○ virtstoraged.service - libvirt storage daemon
     Loaded: loaded (/usr/lib/systemd/system/virtstoraged.service; disabled; preset: disabled)
     Active: inactive (dead)
TriggeredBy: ○ virtstoraged-admin.socket
             ○ virtstoraged-ro.socket
             ○ virtstoraged.socket
       Docs: man:virtstoraged(8)
             https://libvirt.org/

Feb 23 09:37:46 np0005626466.localdomain systemd[1]: /usr/lib/systemd/system/virtstoraged.service:20: Failed to parse service type, ignoring: notify-reload
Feb 23 09:42:44 np0005626466.localdomain systemd[1]: /usr/lib/systemd/system/virtstoraged.service:20: Failed to parse service type, ignoring: notify-reload
Feb 23 09:42:44 np0005626466.localdomain systemd[1]: /usr/lib/systemd/system/virtstoraged.service:20: Failed to parse service type, ignoring: notify-reload
Feb 23 09:44:16 np0005626466.localdomain systemd[1]: /usr/lib/systemd/system/virtstoraged.service:20: Failed to parse service type, ignoring: notify-reload
Feb 23 09:44:16 np0005626466.localdomain systemd[1]: /usr/lib/systemd/system/virtstoraged.service:20: Failed to parse service type, ignoring: notify-reload
Feb 23 09:44:30 np0005626466.localdomain systemd[1]: /usr/lib/systemd/system/virtstoraged.service:20: Failed to parse service type, ignoring: notify-reload
Feb 23 09:44:30 np0005626466.localdomain systemd[1]: /usr/lib/systemd/system/virtstoraged.service:20: Failed to parse service type, ignoring: notify-reload
Feb 23 09:49:12 np0005626466.localdomain systemd[1]: /usr/lib/systemd/system/virtstoraged.service:20: Failed to parse service type, ignoring: notify-reload
Feb 23 09:49:25 np0005626466.localdomain systemd[1]: /usr/lib/systemd/system/virtstoraged.service:20: Failed to parse service type, ignoring: notify-reload
Feb 23 09:49:25 np0005626466.localdomain systemd[1]: /usr/lib/systemd/system/virtstoraged.service:20: Failed to parse service type, ignoring: notify-reload

● -.slice - Root Slice
     Loaded: loaded
     Active: active since Mon 2026-02-23 06:36:00 UTC; 3h 35min ago
      Until: Mon 2026-02-23 06:36:00 UTC; 3h 35min ago
       Docs: man:systemd.special(7)
      Tasks: 633
     Memory: 4.0G
        CPU: 1h 8min 57.340s
     CGroup: /
             ├─init.scope
             │ └─1 /usr/lib/systemd/systemd --system --deserialize 25
             ├─machine.slice
             │ ├─libpod-122635f0e8d39054eca80d2e540d00d16b4a0e4979d56e5a2e398526a74c57c4.scope
             │ │ └─container
             │ │   ├─156068 dumb-init --single-child -- kolla_start
             │ │   └─156071 /usr/bin/ovn-controller --pidfile unix:/run/openvswitch/db.sock
             │ ├─libpod-2812510312ec1c5f807144229711d7908bd35d002349ffc7e5627b311d4edf79.scope
             │ │ └─container
             │ │   ├─280694 dumb-init --single-child -- kolla_start
             │ │   ├─280697 /usr/bin/python3 /usr/bin/nova-compute
             │ │   ├─307841 /usr/bin/python3 /bin/privsep-helper --config-file /etc/nova/nova.conf --config-file /etc/nova/nova-compute.conf --config-dir /etc/nova/nova.conf.d --privsep_context vif_plug_ovs.privsep.vif_plug --privsep_sock_path /tmp/tmpqn5dfyp_/privsep.sock
             │ │   └─324666 /usr/bin/python3 /bin/privsep-helper --config-file /etc/nova/nova.conf --config-file /etc/nova/nova-compute.conf --config-dir /etc/nova/nova.conf.d --privsep_context os_brick.privileged.default --privsep_sock_path /tmp/tmpa7k05g5b/privsep.sock
             │ ├─libpod-3648631d61f37afb17aa928ef74b239e7715b60e5cab331e4a494567755448eb.scope
             │ │ └─container
             │ │   ├─256427 dumb-init --single-child -- kolla_start
             │ │   └─256429 "neutron-sriov-nic-agent (/usr/bin/python3 /usr/bin/neutron-sriov-nic-agent)"
             │ ├─libpod-6a5bdcbb336925d11e98e079f4b8b7828b30e368d9b142bcbf78246faf17115e.scope
             │ │ └─container
             │ │   └─243612 /app/openstack-network-exporter
             │ ├─libpod-8813b336059ca31aa5e43a3a140b1dec6d9019f75a06f121cdebad271f8eb62c.scope
             │ │ └─container
             │ │   ├─264101 dumb-init --single-child -- /usr/sbin/dnsmasq -k --no-hosts --no-resolv --pid-file=/var/lib/neutron/dhcp/91fdc6a7-b901-4255-83f7-4b37365658a3/pid --dhcp-hostsfile=/var/lib/neutron/dhcp/91fdc6a7-b901-4255-83f7-4b37365658a3/host --addn-hosts=/var/lib/neutron/dhcp/91fdc6a7-b901-4255-83f7-4b37365658a3/addn_hosts --dhcp-optsfile=/var/lib/neutron/dhcp/91fdc6a7-b901-4255-83f7-4b37365658a3/opts --dhcp-leasefile=/var/lib/neutron/dhcp/91fdc6a7-b901-4255-83f7-4b37365658a3/leases --dhcp-match=set:ipxe,175 --dhcp-userclass=set:ipxe6,iPXE --local-service --bind-dynamic --dhcp-range=set:subnet-21d77760-119f-4466-bc85-a0e9167487a9,192.168.122.0,static,255.255.255.0,86400s --dhcp-option-force=option:mtu,1350 --dhcp-lease-max=256 --conf-file=/dev/null --domain=openstacklocal
             │ │   └─264111 /usr/sbin/dnsmasq -k --no-hosts --no-resolv --pid-file=/var/lib/neutron/dhcp/91fdc6a7-b901-4255-83f7-4b37365658a3/pid --dhcp-hostsfile=/var/lib/neutron/dhcp/91fdc6a7-b901-4255-83f7-4b37365658a3/host --addn-hosts=/var/lib/neutron/dhcp/91fdc6a7-b901-4255-83f7-4b37365658a3/addn_hosts --dhcp-optsfile=/var/lib/neutron/dhcp/91fdc6a7-b901-4255-83f7-4b37365658a3/opts --dhcp-leasefile=/var/lib/neutron/dhcp/91fdc6a7-b901-4255-83f7-4b37365658a3/leases --dhcp-match=set:ipxe,175 --dhcp-userclass=set:ipxe6,iPXE --local-service --bind-dynamic --dhcp-range=set:subnet-21d77760-119f-4466-bc85-a0e9167487a9,192.168.122.0,static,255.255.255.0,86400s --dhcp-option-force=option:mtu,1350 --dhcp-lease-max=256 --conf-file=/dev/null --domain=openstacklocal
             │ ├─libpod-8ca16bfb2f59ef8fc3fb8572c3270f7856db7f62fc874baed2fd5084beb7f01e.scope
             │ │ └─container
             │ │   ├─161941 dumb-init --single-child -- kolla_start
             │ │   ├─161944 "neutron-ovn-metadata-agent (/usr/bin/python3 /usr/bin/neutron-ovn-metadata-agent)"
             │ │   ├─162130 "neutron-ovn-metadata-agent (/usr/bin/python3 /usr/bin/neutron-ovn-metadata-agent)"
             │ │   ├─162175 /usr/bin/python3 /bin/privsep-helper --config-file /etc/neutron/neutron.conf --config-dir /etc/neutron.conf.d --privsep_context neutron.privileged.namespace_cmd --privsep_sock_path /tmp/tmp84iwktpn/privsep.sock
             │ │   ├─264002 /usr/bin/python3 /bin/privsep-helper --config-file /etc/neutron/neutron.conf --config-dir /etc/neutron.conf.d --privsep_context neutron.privileged.default --privsep_sock_path /tmp/tmpbtevuisl/privsep.sock
             │ │   └─308309 /usr/bin/python3 /bin/privsep-helper --config-file /etc/neutron/neutron.conf --config-dir /etc/neutron.conf.d --privsep_context neutron.privileged.link_cmd --privsep_sock_path /tmp/tmpkfen9wub/privsep.sock
             │ ├─libpod-a43c45687bee0664320d95d80d3e7a94940f1f0a4aba277a159d8b3eeefd4d41.scope
             │ │ └─container
             │ │   ├─264098 dumb-init --single-child -- /usr/sbin/dnsmasq -k --no-hosts --no-resolv --pid-file=/var/lib/neutron/dhcp/9da5b53d-3184-450f-9a5b-bdba1a6c9f6d/pid --dhcp-hostsfile=/var/lib/neutron/dhcp/9da5b53d-3184-450f-9a5b-bdba1a6c9f6d/host --addn-hosts=/var/lib/neutron/dhcp/9da5b53d-3184-450f-9a5b-bdba1a6c9f6d/addn_hosts --dhcp-optsfile=/var/lib/neutron/dhcp/9da5b53d-3184-450f-9a5b-bdba1a6c9f6d/opts --dhcp-leasefile=/var/lib/neutron/dhcp/9da5b53d-3184-450f-9a5b-bdba1a6c9f6d/leases --dhcp-match=set:ipxe,175 --dhcp-userclass=set:ipxe6,iPXE --local-service --bind-dynamic --dhcp-range=set:subnet-33e5fe9f-f246-45b2-86ae-3c10c0318cd7,192.168.0.0,static,255.255.255.0,86400s --dhcp-option-force=option:mtu,1292 --dhcp-lease-max=256 --conf-file=/dev/null --domain=openstacklocal
             │ │   └─264104 /usr/sbin/dnsmasq -k --no-hosts --no-resolv --pid-file=/var/lib/neutron/dhcp/9da5b53d-3184-450f-9a5b-bdba1a6c9f6d/pid --dhcp-hostsfile=/var/lib/neutron/dhcp/9da5b53d-3184-450f-9a5b-bdba1a6c9f6d/host --addn-hosts=/var/lib/neutron/dhcp/9da5b53d-3184-450f-9a5b-bdba1a6c9f6d/addn_hosts --dhcp-optsfile=/var/lib/neutron/dhcp/9da5b53d-3184-450f-9a5b-bdba1a6c9f6d/opts --dhcp-leasefile=/var/lib/neutron/dhcp/9da5b53d-3184-450f-9a5b-bdba1a6c9f6d/leases --dhcp-match=set:ipxe,175 --dhcp-userclass=set:ipxe6,iPXE --local-service --bind-dynamic --dhcp-range=set:subnet-33e5fe9f-f246-45b2-86ae-3c10c0318cd7,192.168.0.0,static,255.255.255.0,86400s --dhcp-option-force=option:mtu,1292 --dhcp-lease-max=256 --conf-file=/dev/null --domain=openstacklocal
             │ ├─libpod-bdeaed09d67af00ebcf3af49678b8d850a09c9b8240ebddb072ba501276f201d.scope
             │ │ └─container
             │ │   ├─263821 dumb-init --single-child -- kolla_start
             │ │   ├─263823 "neutron-dhcp-agent (/usr/bin/python3 /usr/bin/neutron-dhcp-agent)"
             │ │   ├─263857 /usr/bin/python3 /bin/privsep-helper --config-file /etc/neutron/neutron.conf --config-dir /etc/neutron.conf.d --privsep_context neutron.privileged.default --privsep_sock_path /tmp/tmpg0yua8_o/privsep.sock
             │ │   ├─263887 /usr/bin/python3 /bin/privsep-helper --config-file /etc/neutron/neutron.conf --config-dir /etc/neutron.conf.d --privsep_context neutron.privileged.namespace_cmd --privsep_sock_path /tmp/tmp7urwfrmj/privsep.sock
             │ │   ├─263903 /usr/bin/python3 /bin/privsep-helper --config-file /etc/neutron/neutron.conf --config-dir /etc/neutron.conf.d --privsep_context neutron.privileged.link_cmd --privsep_sock_path /tmp/tmpopxbn0rr/privsep.sock
             │ │   └─315526 /usr/bin/python3 /bin/privsep-helper --config-file /etc/neutron/neutron.conf --config-dir /etc/neutron.conf.d --privsep_context neutron.privileged.dhcp_release_cmd --privsep_sock_path /tmp/tmp02asih0a/privsep.sock
             │ ├─libpod-c0dd95da0b8090e1dfefd899739a6f9a1e0b730eec9db8d0d0094fa6bfc519c8.scope
             │ │ └─container
             │ │   └─241166 /bin/podman_exporter
             │ ├─libpod-cd17978adeb2b0468483202f98a4facd0357edbe019deb13313adab6b6b71f52.scope
             │ │ └─container
             │ │   └─238755 /bin/node_exporter --web.disable-exporter-metrics --collector.systemd "--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service" --no-collector.dmi --no-collector.entropy --no-collector.thermal_zone --no-collector.time --no-collector.timex --no-collector.uname --no-collector.stat --no-collector.hwmon --no-collector.os --no-collector.selinux --no-collector.textfile --no-collector.powersupplyclass --no-collector.pressure --no-collector.rapl --path.rootfs=/rootfs
             │ ├─libpod-conmon-8813b336059ca31aa5e43a3a140b1dec6d9019f75a06f121cdebad271f8eb62c.scope
             │ │ └─264099 /usr/bin/conmon --api-version 1 -c 8813b336059ca31aa5e43a3a140b1dec6d9019f75a06f121cdebad271f8eb62c -u 8813b336059ca31aa5e43a3a140b1dec6d9019f75a06f121cdebad271f8eb62c -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/8813b336059ca31aa5e43a3a140b1dec6d9019f75a06f121cdebad271f8eb62c/userdata -p /run/containers/storage/overlay-containers/8813b336059ca31aa5e43a3a140b1dec6d9019f75a06f121cdebad271f8eb62c/userdata/pidfile -n neutron-dnsmasq-qdhcp-91fdc6a7-b901-4255-83f7-4b37365658a3 --exit-dir /run/libpod/exits --full-attach -s -l journald --log-level warning --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/8813b336059ca31aa5e43a3a140b1dec6d9019f75a06f121cdebad271f8eb62c/userdata/oci-log --conmon-pidfile /run/containers/storage/overlay-containers/8813b336059ca31aa5e43a3a140b1dec6d9019f75a06f121cdebad271f8eb62c/userdata/conmon.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --transient-store=false --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg 8813b336059ca31aa5e43a3a140b1dec6d9019f75a06f121cdebad271f8eb62c
             │ ├─libpod-conmon-a43c45687bee0664320d95d80d3e7a94940f1f0a4aba277a159d8b3eeefd4d41.scope
             │ │ └─264095 /usr/bin/conmon --api-version 1 -c a43c45687bee0664320d95d80d3e7a94940f1f0a4aba277a159d8b3eeefd4d41 -u a43c45687bee0664320d95d80d3e7a94940f1f0a4aba277a159d8b3eeefd4d41 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/a43c45687bee0664320d95d80d3e7a94940f1f0a4aba277a159d8b3eeefd4d41/userdata -p /run/containers/storage/overlay-containers/a43c45687bee0664320d95d80d3e7a94940f1f0a4aba277a159d8b3eeefd4d41/userdata/pidfile -n neutron-dnsmasq-qdhcp-9da5b53d-3184-450f-9a5b-bdba1a6c9f6d --exit-dir /run/libpod/exits --full-attach -s -l journald --log-level warning --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/a43c45687bee0664320d95d80d3e7a94940f1f0a4aba277a159d8b3eeefd4d41/userdata/oci-log --conmon-pidfile /run/containers/storage/overlay-containers/a43c45687bee0664320d95d80d3e7a94940f1f0a4aba277a159d8b3eeefd4d41/userdata/conmon.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --transient-store=false --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg a43c45687bee0664320d95d80d3e7a94940f1f0a4aba277a159d8b3eeefd4d41
             │ └─libpod-fe7013c5595cf36029c5acd08a92109de37036b8a083179769dbc4e27b30aaf9.scope
             │   └─container
             │     ├─236491 dumb-init --single-child -- kolla_start
             │     ├─236494 "ceilometer-polling: master process [/usr/bin/ceilometer-polling --polling-namespaces compute --logfile /dev/stdout]"
             │     └─236558 "ceilometer-polling: AgentManager worker(0)"
             ├─system.slice
             │ ├─NetworkManager.service
             │ │ └─5981 /usr/sbin/NetworkManager --no-daemon
             │ ├─auditd.service
             │ │ ├─726 /sbin/auditd
             │ │ └─728 /usr/sbin/sedispatch
             │ ├─chronyd.service
             │ │ └─139217 /usr/sbin/chronyd -F 2
             │ ├─crond.service
             │ │ └─1139 /usr/sbin/crond -n
             │ ├─dbus-broker.service
             │ │ ├─751 /usr/bin/dbus-broker-launch --scope system --audit
             │ │ └─755 dbus-broker --log 4 --controller 9 --machine-id c0212a8b024a111cfc61293864f36c87 --max-bytes 536870912 --max-fds 4096 --max-matches 131072 --audit
             │ ├─edpm_ceilometer_agent_compute.service
             │ │ └─236489 /usr/bin/conmon --api-version 1 -c fe7013c5595cf36029c5acd08a92109de37036b8a083179769dbc4e27b30aaf9 -u fe7013c5595cf36029c5acd08a92109de37036b8a083179769dbc4e27b30aaf9 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/fe7013c5595cf36029c5acd08a92109de37036b8a083179769dbc4e27b30aaf9/userdata -p /run/containers/storage/overlay-containers/fe7013c5595cf36029c5acd08a92109de37036b8a083179769dbc4e27b30aaf9/userdata/pidfile -n ceilometer_agent_compute --exit-dir /run/libpod/exits --full-attach -s -l journald --log-level warning --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/fe7013c5595cf36029c5acd08a92109de37036b8a083179769dbc4e27b30aaf9/userdata/oci-log --conmon-pidfile /run/ceilometer_agent_compute.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --transient-store=false --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg fe7013c5595cf36029c5acd08a92109de37036b8a083179769dbc4e27b30aaf9
             │ ├─edpm_neutron_dhcp_agent.service
             │ │ └─263819 /usr/bin/conmon --api-version 1 -c bdeaed09d67af00ebcf3af49678b8d850a09c9b8240ebddb072ba501276f201d -u bdeaed09d67af00ebcf3af49678b8d850a09c9b8240ebddb072ba501276f201d -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/bdeaed09d67af00ebcf3af49678b8d850a09c9b8240ebddb072ba501276f201d/userdata -p /run/containers/storage/overlay-containers/bdeaed09d67af00ebcf3af49678b8d850a09c9b8240ebddb072ba501276f201d/userdata/pidfile -n neutron_dhcp_agent --exit-dir /run/libpod/exits --full-attach -s -l journald --log-level warning --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/bdeaed09d67af00ebcf3af49678b8d850a09c9b8240ebddb072ba501276f201d/userdata/oci-log --conmon-pidfile /run/neutron_dhcp_agent.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --transient-store=false --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg bdeaed09d67af00ebcf3af49678b8d850a09c9b8240ebddb072ba501276f201d
             │ ├─edpm_neutron_sriov_agent.service
             │ │ └─256425 /usr/bin/conmon --api-version 1 -c 3648631d61f37afb17aa928ef74b239e7715b60e5cab331e4a494567755448eb -u 3648631d61f37afb17aa928ef74b239e7715b60e5cab331e4a494567755448eb -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/3648631d61f37afb17aa928ef74b239e7715b60e5cab331e4a494567755448eb/userdata -p /run/containers/storage/overlay-containers/3648631d61f37afb17aa928ef74b239e7715b60e5cab331e4a494567755448eb/userdata/pidfile -n neutron_sriov_agent --exit-dir /run/libpod/exits --full-attach -s -l journald --log-level warning --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/3648631d61f37afb17aa928ef74b239e7715b60e5cab331e4a494567755448eb/userdata/oci-log --conmon-pidfile /run/neutron_sriov_agent.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --transient-store=false --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg 3648631d61f37afb17aa928ef74b239e7715b60e5cab331e4a494567755448eb
             │ ├─edpm_node_exporter.service
             │ │ └─238753 /usr/bin/conmon --api-version 1 -c cd17978adeb2b0468483202f98a4facd0357edbe019deb13313adab6b6b71f52 -u cd17978adeb2b0468483202f98a4facd0357edbe019deb13313adab6b6b71f52 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/cd17978adeb2b0468483202f98a4facd0357edbe019deb13313adab6b6b71f52/userdata -p /run/containers/storage/overlay-containers/cd17978adeb2b0468483202f98a4facd0357edbe019deb13313adab6b6b71f52/userdata/pidfile -n node_exporter --exit-dir /run/libpod/exits --full-attach -s -l journald --log-level warning --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/cd17978adeb2b0468483202f98a4facd0357edbe019deb13313adab6b6b71f52/userdata/oci-log --conmon-pidfile /run/node_exporter.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --transient-store=false --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg cd17978adeb2b0468483202f98a4facd0357edbe019deb13313adab6b6b71f52
             │ ├─edpm_nova_compute.service
             │ │ └─280690 /usr/bin/conmon --api-version 1 -c 2812510312ec1c5f807144229711d7908bd35d002349ffc7e5627b311d4edf79 -u 2812510312ec1c5f807144229711d7908bd35d002349ffc7e5627b311d4edf79 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/2812510312ec1c5f807144229711d7908bd35d002349ffc7e5627b311d4edf79/userdata -p /run/containers/storage/overlay-containers/2812510312ec1c5f807144229711d7908bd35d002349ffc7e5627b311d4edf79/userdata/pidfile -n nova_compute --exit-dir /run/libpod/exits --full-attach -s -l journald --log-level warning --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/2812510312ec1c5f807144229711d7908bd35d002349ffc7e5627b311d4edf79/userdata/oci-log --conmon-pidfile /run/nova_compute.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --transient-store=false --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg 2812510312ec1c5f807144229711d7908bd35d002349ffc7e5627b311d4edf79
             │ ├─edpm_openstack_network_exporter.service
             │ │ └─243610 /usr/bin/conmon --api-version 1 -c 6a5bdcbb336925d11e98e079f4b8b7828b30e368d9b142bcbf78246faf17115e -u 6a5bdcbb336925d11e98e079f4b8b7828b30e368d9b142bcbf78246faf17115e -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/6a5bdcbb336925d11e98e079f4b8b7828b30e368d9b142bcbf78246faf17115e/userdata -p /run/containers/storage/overlay-containers/6a5bdcbb336925d11e98e079f4b8b7828b30e368d9b142bcbf78246faf17115e/userdata/pidfile -n openstack_network_exporter --exit-dir /run/libpod/exits --full-attach -s -l journald --log-level warning --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/6a5bdcbb336925d11e98e079f4b8b7828b30e368d9b142bcbf78246faf17115e/userdata/oci-log --conmon-pidfile /run/openstack_network_exporter.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --transient-store=false --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg 6a5bdcbb336925d11e98e079f4b8b7828b30e368d9b142bcbf78246faf17115e
             │ ├─edpm_ovn_controller.service
             │ │ └─156066 /usr/bin/conmon --api-version 1 -c 122635f0e8d39054eca80d2e540d00d16b4a0e4979d56e5a2e398526a74c57c4 -u 122635f0e8d39054eca80d2e540d00d16b4a0e4979d56e5a2e398526a74c57c4 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/122635f0e8d39054eca80d2e540d00d16b4a0e4979d56e5a2e398526a74c57c4/userdata -p /run/containers/storage/overlay-containers/122635f0e8d39054eca80d2e540d00d16b4a0e4979d56e5a2e398526a74c57c4/userdata/pidfile -n ovn_controller --exit-dir /run/libpod/exits --full-attach -s -l journald --log-level warning --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/122635f0e8d39054eca80d2e540d00d16b4a0e4979d56e5a2e398526a74c57c4/userdata/oci-log --conmon-pidfile /run/ovn_controller.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --transient-store=false --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg 122635f0e8d39054eca80d2e540d00d16b4a0e4979d56e5a2e398526a74c57c4
             │ ├─edpm_ovn_metadata_agent.service
             │ │ └─161939 /usr/bin/conmon --api-version 1 -c 8ca16bfb2f59ef8fc3fb8572c3270f7856db7f62fc874baed2fd5084beb7f01e -u 8ca16bfb2f59ef8fc3fb8572c3270f7856db7f62fc874baed2fd5084beb7f01e -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/8ca16bfb2f59ef8fc3fb8572c3270f7856db7f62fc874baed2fd5084beb7f01e/userdata -p /run/containers/storage/overlay-containers/8ca16bfb2f59ef8fc3fb8572c3270f7856db7f62fc874baed2fd5084beb7f01e/userdata/pidfile -n ovn_metadata_agent --exit-dir /run/libpod/exits --full-attach -s -l journald --log-level warning --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/8ca16bfb2f59ef8fc3fb8572c3270f7856db7f62fc874baed2fd5084beb7f01e/userdata/oci-log --conmon-pidfile /run/ovn_metadata_agent.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --transient-store=false --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg 8ca16bfb2f59ef8fc3fb8572c3270f7856db7f62fc874baed2fd5084beb7f01e
             │ ├─edpm_podman_exporter.service
             │ │ └─241164 /usr/bin/conmon --api-version 1 -c c0dd95da0b8090e1dfefd899739a6f9a1e0b730eec9db8d0d0094fa6bfc519c8 -u c0dd95da0b8090e1dfefd899739a6f9a1e0b730eec9db8d0d0094fa6bfc519c8 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/c0dd95da0b8090e1dfefd899739a6f9a1e0b730eec9db8d0d0094fa6bfc519c8/userdata -p /run/containers/storage/overlay-containers/c0dd95da0b8090e1dfefd899739a6f9a1e0b730eec9db8d0d0094fa6bfc519c8/userdata/pidfile -n podman_exporter --exit-dir /run/libpod/exits --full-attach -s -l journald --log-level warning --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/c0dd95da0b8090e1dfefd899739a6f9a1e0b730eec9db8d0d0094fa6bfc519c8/userdata/oci-log --conmon-pidfile /run/podman_exporter.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --transient-store=false --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg c0dd95da0b8090e1dfefd899739a6f9a1e0b730eec9db8d0d0094fa6bfc519c8
             │ ├─gssproxy.service
             │ │ └─802 /usr/sbin/gssproxy -D
             │ ├─irqbalance.service
             │ │ └─758 /usr/sbin/irqbalance --foreground
             │ ├─iscsid.service
             │ │ └─212770 /usr/sbin/iscsid -f
             │ ├─multipathd.service
             │ │ └─218083 /sbin/multipathd -d -s
             │ ├─ovs-vswitchd.service
             │ │ └─22519 ovs-vswitchd unix:/var/run/openvswitch/db.sock -vconsole:emer -vsyslog:err -vfile:info --mlockall --user openvswitch:hugetlbfs --no-chdir --log-file=/var/log/openvswitch/ovs-vswitchd.log --pidfile=/var/run/openvswitch/ovs-vswitchd.pid --detach
             │ ├─ovsdb-server.service
             │ │ └─22437 ovsdb-server /etc/openvswitch/conf.db -vconsole:emer -vsyslog:err -vfile:info --remote=punix:/var/run/openvswitch/db.sock --private-key=db:Open_vSwitch,SSL,private_key --certificate=db:Open_vSwitch,SSL,certificate --bootstrap-ca-cert=db:Open_vSwitch,SSL,ca_cert --user openvswitch:hugetlbfs --no-chdir --log-file=/var/log/openvswitch/ovsdb-server.log --pidfile=/var/run/openvswitch/ovsdb-server.pid --detach
             │ ├─podman.service
             │ │ └─241175 /usr/bin/podman --log-level=info system service
             │ ├─polkit.service
             │ │ └─1036 /usr/lib/polkit-1/polkitd --no-debug
             │ ├─rhsm.service
             │ │ └─6643 /usr/bin/python3 /usr/libexec/rhsm-service
             │ ├─rhsmcertd.service
             │ │ └─795 /usr/bin/rhsmcertd
             │ ├─rpcbind.service
             │ │ └─724 /usr/bin/rpcbind -w -f
             │ ├─rsyslog.service
             │ │ └─759 /usr/sbin/rsyslogd -n
             │ ├─snmpd.service
             │ │ └─67626 /usr/sbin/snmpd -LS0-5d -f
             │ ├─sshd.service
             │ │ └─186135 "sshd: /usr/sbin/sshd -D [listener] 0 of 10-100 startups"
             │ ├─system-ceph\x2df1fea371\x2dcb69\x2d578d\x2da3d0\x2db5c472a84b46.slice
             │ │ ├─ceph-f1fea371-cb69-578d-a3d0-b5c472a84b46@crash.np0005626466.service
             │ │ │ ├─libpod-payload-975e762c1132156a568538ebdacd7315274fa978e3e6da81d658e2351a66b71e
             │ │ │ │ ├─29138 /run/podman-init -- /usr/bin/ceph-crash -n client.crash.np0005626466
             │ │ │ │ └─29140 /usr/bin/python3 -s /usr/bin/ceph-crash -n client.crash.np0005626466
             │ │ │ └─runtime
             │ │ │   └─29136 /usr/bin/conmon --api-version 1 -c 975e762c1132156a568538ebdacd7315274fa978e3e6da81d658e2351a66b71e -u 975e762c1132156a568538ebdacd7315274fa978e3e6da81d658e2351a66b71e -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/975e762c1132156a568538ebdacd7315274fa978e3e6da81d658e2351a66b71e/userdata -p /run/containers/storage/overlay-containers/975e762c1132156a568538ebdacd7315274fa978e3e6da81d658e2351a66b71e/userdata/pidfile -n ceph-f1fea371-cb69-578d-a3d0-b5c472a84b46-crash-np0005626466 --exit-dir /run/libpod/exits --full-attach -l journald --log-level warning --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/975e762c1132156a568538ebdacd7315274fa978e3e6da81d658e2351a66b71e/userdata/oci-log --conmon-pidfile /run/ceph-f1fea371-cb69-578d-a3d0-b5c472a84b46@crash.np0005626466.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --transient-store=false --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --rm --exit-command-arg 975e762c1132156a568538ebdacd7315274fa978e3e6da81d658e2351a66b71e
             │ │ ├─ceph-f1fea371-cb69-578d-a3d0-b5c472a84b46@mds.mds.np0005626466.vaywlp.service
             │ │ │ ├─libpod-payload-13d86d0fc7f1279ceb33114723f84eb57a13a8060cd7e1f6fc0b04a1a71bdedf
             │ │ │ │ ├─285095 /run/podman-init -- /usr/bin/ceph-mds -n mds.mds.np0005626466.vaywlp -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ │ │ └─285097 /usr/bin/ceph-mds -n mds.mds.np0005626466.vaywlp -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ │ └─runtime
             │ │ │   └─285093 /usr/bin/conmon --api-version 1 -c 13d86d0fc7f1279ceb33114723f84eb57a13a8060cd7e1f6fc0b04a1a71bdedf -u 13d86d0fc7f1279ceb33114723f84eb57a13a8060cd7e1f6fc0b04a1a71bdedf -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/13d86d0fc7f1279ceb33114723f84eb57a13a8060cd7e1f6fc0b04a1a71bdedf/userdata -p /run/containers/storage/overlay-containers/13d86d0fc7f1279ceb33114723f84eb57a13a8060cd7e1f6fc0b04a1a71bdedf/userdata/pidfile -n ceph-f1fea371-cb69-578d-a3d0-b5c472a84b46-mds-mds-np0005626466-vaywlp --exit-dir /run/libpod/exits --full-attach -l journald --log-level warning --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/13d86d0fc7f1279ceb33114723f84eb57a13a8060cd7e1f6fc0b04a1a71bdedf/userdata/oci-log --conmon-pidfile /run/ceph-f1fea371-cb69-578d-a3d0-b5c472a84b46@mds.mds.np0005626466.vaywlp.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --transient-store=false --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --rm --exit-command-arg 13d86d0fc7f1279ceb33114723f84eb57a13a8060cd7e1f6fc0b04a1a71bdedf
             │ │ ├─ceph-f1fea371-cb69-578d-a3d0-b5c472a84b46@mgr.np0005626466.nisqfq.service
             │ │ │ ├─libpod-payload-9f7e89f74412aa4d39adf99ebe74e72087671d5f3c70fe114ec0db57835e5bcf
             │ │ │ │ ├─286340 /run/podman-init -- /usr/bin/ceph-mgr -n mgr.np0005626466.nisqfq -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ │ │ └─286342 /usr/bin/ceph-mgr -n mgr.np0005626466.nisqfq -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ │ └─runtime
             │ │ │   └─286338 /usr/bin/conmon --api-version 1 -c 9f7e89f74412aa4d39adf99ebe74e72087671d5f3c70fe114ec0db57835e5bcf -u 9f7e89f74412aa4d39adf99ebe74e72087671d5f3c70fe114ec0db57835e5bcf -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/9f7e89f74412aa4d39adf99ebe74e72087671d5f3c70fe114ec0db57835e5bcf/userdata -p /run/containers/storage/overlay-containers/9f7e89f74412aa4d39adf99ebe74e72087671d5f3c70fe114ec0db57835e5bcf/userdata/pidfile -n ceph-f1fea371-cb69-578d-a3d0-b5c472a84b46-mgr-np0005626466-nisqfq --exit-dir /run/libpod/exits --full-attach -l journald --log-level warning --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/9f7e89f74412aa4d39adf99ebe74e72087671d5f3c70fe114ec0db57835e5bcf/userdata/oci-log --conmon-pidfile /run/ceph-f1fea371-cb69-578d-a3d0-b5c472a84b46@mgr.np0005626466.nisqfq.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --transient-store=false --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --rm --exit-command-arg 9f7e89f74412aa4d39adf99ebe74e72087671d5f3c70fe114ec0db57835e5bcf
             │ │ ├─ceph-f1fea371-cb69-578d-a3d0-b5c472a84b46@mon.np0005626466.service
             │ │ │ ├─libpod-payload-2ceecf5aca3e1844048535c9c84c12fc98d118507e171300ca4a76145722e20a
             │ │ │ │ ├─300839 /run/podman-init -- /usr/bin/ceph-mon -n mon.np0005626466 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false
             │ │ │ │ └─300841 /usr/bin/ceph-mon -n mon.np0005626466 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false
             │ │ │ └─runtime
             │ │ │   └─300837 /usr/bin/conmon --api-version 1 -c 2ceecf5aca3e1844048535c9c84c12fc98d118507e171300ca4a76145722e20a -u 2ceecf5aca3e1844048535c9c84c12fc98d118507e171300ca4a76145722e20a -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/2ceecf5aca3e1844048535c9c84c12fc98d118507e171300ca4a76145722e20a/userdata -p /run/containers/storage/overlay-containers/2ceecf5aca3e1844048535c9c84c12fc98d118507e171300ca4a76145722e20a/userdata/pidfile -n ceph-f1fea371-cb69-578d-a3d0-b5c472a84b46-mon-np0005626466 --exit-dir /run/libpod/exits --full-attach -l journald --log-level warning --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/2ceecf5aca3e1844048535c9c84c12fc98d118507e171300ca4a76145722e20a/userdata/oci-log --conmon-pidfile /run/ceph-f1fea371-cb69-578d-a3d0-b5c472a84b46@mon.np0005626466.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --transient-store=false --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --rm --exit-command-arg 2ceecf5aca3e1844048535c9c84c12fc98d118507e171300ca4a76145722e20a
             │ │ ├─ceph-f1fea371-cb69-578d-a3d0-b5c472a84b46@osd.1.service
             │ │ │ ├─libpod-payload-f272037b1a08b3104e31c243e058b49b6cfe9c3898252c2d34ed954af84eea3b
             │ │ │ │ ├─31871 /run/podman-init -- /usr/bin/ceph-osd -n osd.1 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ │ │ └─31873 /usr/bin/ceph-osd -n osd.1 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ │ └─runtime
             │ │ │   └─31869 /usr/bin/conmon --api-version 1 -c f272037b1a08b3104e31c243e058b49b6cfe9c3898252c2d34ed954af84eea3b -u f272037b1a08b3104e31c243e058b49b6cfe9c3898252c2d34ed954af84eea3b -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/f272037b1a08b3104e31c243e058b49b6cfe9c3898252c2d34ed954af84eea3b/userdata -p /run/containers/storage/overlay-containers/f272037b1a08b3104e31c243e058b49b6cfe9c3898252c2d34ed954af84eea3b/userdata/pidfile -n ceph-f1fea371-cb69-578d-a3d0-b5c472a84b46-osd-1 --exit-dir /run/libpod/exits --full-attach -l journald --log-level warning --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/f272037b1a08b3104e31c243e058b49b6cfe9c3898252c2d34ed954af84eea3b/userdata/oci-log --conmon-pidfile /run/ceph-f1fea371-cb69-578d-a3d0-b5c472a84b46@osd.1.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --transient-store=false --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --rm --exit-command-arg f272037b1a08b3104e31c243e058b49b6cfe9c3898252c2d34ed954af84eea3b
             │ │ └─ceph-f1fea371-cb69-578d-a3d0-b5c472a84b46@osd.4.service
             │ │   ├─libpod-payload-149ab20d951c6d638af0c8d29ce496a98dac476b4b563eca8217acb3595a760e
             │ │   │ ├─32811 /run/podman-init -- /usr/bin/ceph-osd -n osd.4 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │   │ └─32813 /usr/bin/ceph-osd -n osd.4 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │   └─runtime
             │ │     └─32809 /usr/bin/conmon --api-version 1 -c 149ab20d951c6d638af0c8d29ce496a98dac476b4b563eca8217acb3595a760e -u 149ab20d951c6d638af0c8d29ce496a98dac476b4b563eca8217acb3595a760e -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/149ab20d951c6d638af0c8d29ce496a98dac476b4b563eca8217acb3595a760e/userdata -p /run/containers/storage/overlay-containers/149ab20d951c6d638af0c8d29ce496a98dac476b4b563eca8217acb3595a760e/userdata/pidfile -n ceph-f1fea371-cb69-578d-a3d0-b5c472a84b46-osd-4 --exit-dir /run/libpod/exits --full-attach -l journald --log-level warning --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/149ab20d951c6d638af0c8d29ce496a98dac476b4b563eca8217acb3595a760e/userdata/oci-log --conmon-pidfile /run/ceph-f1fea371-cb69-578d-a3d0-b5c472a84b46@osd.4.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --transient-store=false --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --rm --exit-command-arg 149ab20d951c6d638af0c8d29ce496a98dac476b4b563eca8217acb3595a760e
             │ ├─system-getty.slice
             │ │ └─getty@tty1.service
             │ │   └─1140 /sbin/agetty -o "-p -- \\u" --noclear - linux
             │ ├─system-serial\x2dgetty.slice
             │ │ └─serial-getty@ttyS0.service
             │ │   └─1142 /sbin/agetty -o "-p -- \\u" --keep-baud 115200,57600,38400,9600 - vt220
             │ ├─systemd-hostnamed.service
             │ │ └─334843 /usr/lib/systemd/systemd-hostnamed
             │ ├─systemd-journald.service
             │ │ └─47881 /usr/lib/systemd/systemd-journald
             │ ├─systemd-logind.service
             │ │ └─760 /usr/lib/systemd/systemd-logind
             │ ├─systemd-machined.service
             │ │ └─205757 /usr/lib/systemd/systemd-machined
             │ ├─systemd-udevd.service
             │ │ └─udev
             │ │   ├─ 47889 /usr/lib/systemd/systemd-udevd
             │ │   └─339919 /usr/lib/systemd/systemd-udevd
             │ ├─tuned.service
             │ │ └─125602 /usr/bin/python3 -Es /usr/sbin/tuned -l -P
             │ ├─virtlogd.service
             │ │ └─205249 /usr/sbin/virtlogd
             │ ├─virtnodedevd.service
             │ │ └─229338 /usr/sbin/virtnodedevd --timeout 120
             │ ├─virtqemud.service
             │ │ └─229010 /usr/sbin/virtqemud --timeout 120
             │ └─virtsecretd.service
             │   └─323059 /usr/sbin/virtsecretd --timeout 120
             └─user.slice
               ├─user-1000.slice
               │ ├─session-1.scope
               │ │ └─4336 /usr/bin/python3
               │ ├─session-84.scope
               │ │ ├─331435 "sshd: zuul [priv]"
               │ │ ├─331438 "sshd: zuul@notty"
               │ │ ├─331439 sudo bash -c "rm -rf /var/tmp/sos-osp && mkdir /var/tmp/sos-osp && sos report --batch --all-logs --tmp-dir=/var/tmp/sos-osp  -p container,openstack_edpm,system,storage,virt"
               │ │ ├─331456 /usr/bin/python3 -s /sbin/sos report --batch --all-logs --tmp-dir=/var/tmp/sos-osp -p container,openstack_edpm,system,storage,virt
               │ │ ├─340111 timeout 300s systemctl status --all
               │ │ ├─340118 systemctl status --all
               │ │ ├─340176 timeout 300s xfs_admin -l -u /dev/vda4
               │ │ ├─340177 /usr/bin/sh -f /usr/sbin/xfs_admin -l -u /dev/vda4
               │ │ └─340178 xfs_db -x -p xfs_admin -r -c label -r -c uuid /dev/vda4
               │ └─user@1000.service
               │   ├─app.slice
               │   │ └─dbus-broker.service
               │   │   ├─14084 /usr/bin/dbus-broker-launch --scope user
               │   │   └─14097 dbus-broker --log 4 --controller 9 --machine-id c0212a8b024a111cfc61293864f36c87 --max-bytes 100000000000000 --max-fds 25000000000000 --max-matches 5000000000
               │   ├─init.scope
               │   │ ├─4178 /usr/lib/systemd/systemd --user
               │   │ └─4180 "(sd-pam)"
               │   └─user.slice
               │     └─podman-pause-de66dbe9.scope
               │       └─13932 podman
               └─user-1002.slice
                 ├─session-71.scope
                 │ ├─303883 "sshd: ceph-admin [priv]"
                 │ └─303886 "sshd: ceph-admin@notty"
                 └─user@1002.service
                   └─init.scope
                     ├─26358 /usr/lib/systemd/systemd --user
                     └─26360 "(sd-pam)"

Feb 23 10:10:44 np0005626466.localdomain systemd[1]: c0dd95da0b8090e1dfefd899739a6f9a1e0b730eec9db8d0d0094fa6bfc519c8.service: Deactivated successfully.
Feb 23 10:10:55 np0005626466.localdomain systemd[1]: Started /usr/bin/podman healthcheck run 6a5bdcbb336925d11e98e079f4b8b7828b30e368d9b142bcbf78246faf17115e.
Feb 23 10:10:55 np0005626466.localdomain systemd[1]: tmp-crun.WuJvtK.mount: Deactivated successfully.
Feb 23 10:10:55 np0005626466.localdomain systemd[1]: 6a5bdcbb336925d11e98e079f4b8b7828b30e368d9b142bcbf78246faf17115e.service: Deactivated successfully.
Feb 23 10:10:56 np0005626466.localdomain systemd[1]: Started /usr/bin/podman healthcheck run 122635f0e8d39054eca80d2e540d00d16b4a0e4979d56e5a2e398526a74c57c4.
Feb 23 10:10:56 np0005626466.localdomain systemd[1]: Started /usr/bin/podman healthcheck run fe7013c5595cf36029c5acd08a92109de37036b8a083179769dbc4e27b30aaf9.
Feb 23 10:10:56 np0005626466.localdomain systemd[1]: fe7013c5595cf36029c5acd08a92109de37036b8a083179769dbc4e27b30aaf9.service: Deactivated successfully.
Feb 23 10:10:56 np0005626466.localdomain systemd[1]: 122635f0e8d39054eca80d2e540d00d16b4a0e4979d56e5a2e398526a74c57c4.service: Deactivated successfully.
Feb 23 10:11:02 np0005626466.localdomain systemd[1]: Started /usr/bin/podman healthcheck run 8ca16bfb2f59ef8fc3fb8572c3270f7856db7f62fc874baed2fd5084beb7f01e.
Feb 23 10:11:02 np0005626466.localdomain systemd[1]: 8ca16bfb2f59ef8fc3fb8572c3270f7856db7f62fc874baed2fd5084beb7f01e.service: Deactivated successfully.

● machine.slice - Virtual Machine and Container Slice
     Loaded: loaded (/usr/lib/systemd/system/machine.slice; static)
     Active: active since Mon 2026-02-23 07:38:08 UTC; 2h 32min ago
      Until: Mon 2026-02-23 07:38:08 UTC; 2h 32min ago
       Docs: man:systemd.special(7)
         IO: 4.0K read, 3.9M written
      Tasks: 102
     Memory: 1.6G
        CPU: 9min 50.629s
     CGroup: /machine.slice
             ├─libpod-122635f0e8d39054eca80d2e540d00d16b4a0e4979d56e5a2e398526a74c57c4.scope
             │ └─container
             │   ├─156068 dumb-init --single-child -- kolla_start
             │   └─156071 /usr/bin/ovn-controller --pidfile unix:/run/openvswitch/db.sock
             ├─libpod-2812510312ec1c5f807144229711d7908bd35d002349ffc7e5627b311d4edf79.scope
             │ └─container
             │   ├─280694 dumb-init --single-child -- kolla_start
             │   ├─280697 /usr/bin/python3 /usr/bin/nova-compute
             │   ├─307841 /usr/bin/python3 /bin/privsep-helper --config-file /etc/nova/nova.conf --config-file /etc/nova/nova-compute.conf --config-dir /etc/nova/nova.conf.d --privsep_context vif_plug_ovs.privsep.vif_plug --privsep_sock_path /tmp/tmpqn5dfyp_/privsep.sock
             │   └─324666 /usr/bin/python3 /bin/privsep-helper --config-file /etc/nova/nova.conf --config-file /etc/nova/nova-compute.conf --config-dir /etc/nova/nova.conf.d --privsep_context os_brick.privileged.default --privsep_sock_path /tmp/tmpa7k05g5b/privsep.sock
             ├─libpod-3648631d61f37afb17aa928ef74b239e7715b60e5cab331e4a494567755448eb.scope
             │ └─container
             │   ├─256427 dumb-init --single-child -- kolla_start
             │   └─256429 "neutron-sriov-nic-agent (/usr/bin/python3 /usr/bin/neutron-sriov-nic-agent)"
             ├─libpod-6a5bdcbb336925d11e98e079f4b8b7828b30e368d9b142bcbf78246faf17115e.scope
             │ └─container
             │   └─243612 /app/openstack-network-exporter
             ├─libpod-8813b336059ca31aa5e43a3a140b1dec6d9019f75a06f121cdebad271f8eb62c.scope
             │ └─container
             │   ├─264101 dumb-init --single-child -- /usr/sbin/dnsmasq -k --no-hosts --no-resolv --pid-file=/var/lib/neutron/dhcp/91fdc6a7-b901-4255-83f7-4b37365658a3/pid --dhcp-hostsfile=/var/lib/neutron/dhcp/91fdc6a7-b901-4255-83f7-4b37365658a3/host --addn-hosts=/var/lib/neutron/dhcp/91fdc6a7-b901-4255-83f7-4b37365658a3/addn_hosts --dhcp-optsfile=/var/lib/neutron/dhcp/91fdc6a7-b901-4255-83f7-4b37365658a3/opts --dhcp-leasefile=/var/lib/neutron/dhcp/91fdc6a7-b901-4255-83f7-4b37365658a3/leases --dhcp-match=set:ipxe,175 --dhcp-userclass=set:ipxe6,iPXE --local-service --bind-dynamic --dhcp-range=set:subnet-21d77760-119f-4466-bc85-a0e9167487a9,192.168.122.0,static,255.255.255.0,86400s --dhcp-option-force=option:mtu,1350 --dhcp-lease-max=256 --conf-file=/dev/null --domain=openstacklocal
             │   └─264111 /usr/sbin/dnsmasq -k --no-hosts --no-resolv --pid-file=/var/lib/neutron/dhcp/91fdc6a7-b901-4255-83f7-4b37365658a3/pid --dhcp-hostsfile=/var/lib/neutron/dhcp/91fdc6a7-b901-4255-83f7-4b37365658a3/host --addn-hosts=/var/lib/neutron/dhcp/91fdc6a7-b901-4255-83f7-4b37365658a3/addn_hosts --dhcp-optsfile=/var/lib/neutron/dhcp/91fdc6a7-b901-4255-83f7-4b37365658a3/opts --dhcp-leasefile=/var/lib/neutron/dhcp/91fdc6a7-b901-4255-83f7-4b37365658a3/leases --dhcp-match=set:ipxe,175 --dhcp-userclass=set:ipxe6,iPXE --local-service --bind-dynamic --dhcp-range=set:subnet-21d77760-119f-4466-bc85-a0e9167487a9,192.168.122.0,static,255.255.255.0,86400s --dhcp-option-force=option:mtu,1350 --dhcp-lease-max=256 --conf-file=/dev/null --domain=openstacklocal
             ├─libpod-8ca16bfb2f59ef8fc3fb8572c3270f7856db7f62fc874baed2fd5084beb7f01e.scope
             │ └─container
             │   ├─161941 dumb-init --single-child -- kolla_start
             │   ├─161944 "neutron-ovn-metadata-agent (/usr/bin/python3 /usr/bin/neutron-ovn-metadata-agent)"
             │   ├─162130 "neutron-ovn-metadata-agent (/usr/bin/python3 /usr/bin/neutron-ovn-metadata-agent)"
             │   ├─162175 /usr/bin/python3 /bin/privsep-helper --config-file /etc/neutron/neutron.conf --config-dir /etc/neutron.conf.d --privsep_context neutron.privileged.namespace_cmd --privsep_sock_path /tmp/tmp84iwktpn/privsep.sock
             │   ├─264002 /usr/bin/python3 /bin/privsep-helper --config-file /etc/neutron/neutron.conf --config-dir /etc/neutron.conf.d --privsep_context neutron.privileged.default --privsep_sock_path /tmp/tmpbtevuisl/privsep.sock
             │   └─308309 /usr/bin/python3 /bin/privsep-helper --config-file /etc/neutron/neutron.conf --config-dir /etc/neutron.conf.d --privsep_context neutron.privileged.link_cmd --privsep_sock_path /tmp/tmpkfen9wub/privsep.sock
             ├─libpod-a43c45687bee0664320d95d80d3e7a94940f1f0a4aba277a159d8b3eeefd4d41.scope
             │ └─container
             │   ├─264098 dumb-init --single-child -- /usr/sbin/dnsmasq -k --no-hosts --no-resolv --pid-file=/var/lib/neutron/dhcp/9da5b53d-3184-450f-9a5b-bdba1a6c9f6d/pid --dhcp-hostsfile=/var/lib/neutron/dhcp/9da5b53d-3184-450f-9a5b-bdba1a6c9f6d/host --addn-hosts=/var/lib/neutron/dhcp/9da5b53d-3184-450f-9a5b-bdba1a6c9f6d/addn_hosts --dhcp-optsfile=/var/lib/neutron/dhcp/9da5b53d-3184-450f-9a5b-bdba1a6c9f6d/opts --dhcp-leasefile=/var/lib/neutron/dhcp/9da5b53d-3184-450f-9a5b-bdba1a6c9f6d/leases --dhcp-match=set:ipxe,175 --dhcp-userclass=set:ipxe6,iPXE --local-service --bind-dynamic --dhcp-range=set:subnet-33e5fe9f-f246-45b2-86ae-3c10c0318cd7,192.168.0.0,static,255.255.255.0,86400s --dhcp-option-force=option:mtu,1292 --dhcp-lease-max=256 --conf-file=/dev/null --domain=openstacklocal
             │   └─264104 /usr/sbin/dnsmasq -k --no-hosts --no-resolv --pid-file=/var/lib/neutron/dhcp/9da5b53d-3184-450f-9a5b-bdba1a6c9f6d/pid --dhcp-hostsfile=/var/lib/neutron/dhcp/9da5b53d-3184-450f-9a5b-bdba1a6c9f6d/host --addn-hosts=/var/lib/neutron/dhcp/9da5b53d-3184-450f-9a5b-bdba1a6c9f6d/addn_hosts --dhcp-optsfile=/var/lib/neutron/dhcp/9da5b53d-3184-450f-9a5b-bdba1a6c9f6d/opts --dhcp-leasefile=/var/lib/neutron/dhcp/9da5b53d-3184-450f-9a5b-bdba1a6c9f6d/leases --dhcp-match=set:ipxe,175 --dhcp-userclass=set:ipxe6,iPXE --local-service --bind-dynamic --dhcp-range=set:subnet-33e5fe9f-f246-45b2-86ae-3c10c0318cd7,192.168.0.0,static,255.255.255.0,86400s --dhcp-option-force=option:mtu,1292 --dhcp-lease-max=256 --conf-file=/dev/null --domain=openstacklocal
             ├─libpod-bdeaed09d67af00ebcf3af49678b8d850a09c9b8240ebddb072ba501276f201d.scope
             │ └─container
             │   ├─263821 dumb-init --single-child -- kolla_start
             │   ├─263823 "neutron-dhcp-agent (/usr/bin/python3 /usr/bin/neutron-dhcp-agent)"
             │   ├─263857 /usr/bin/python3 /bin/privsep-helper --config-file /etc/neutron/neutron.conf --config-dir /etc/neutron.conf.d --privsep_context neutron.privileged.default --privsep_sock_path /tmp/tmpg0yua8_o/privsep.sock
             │   ├─263887 /usr/bin/python3 /bin/privsep-helper --config-file /etc/neutron/neutron.conf --config-dir /etc/neutron.conf.d --privsep_context neutron.privileged.namespace_cmd --privsep_sock_path /tmp/tmp7urwfrmj/privsep.sock
             │   ├─263903 /usr/bin/python3 /bin/privsep-helper --config-file /etc/neutron/neutron.conf --config-dir /etc/neutron.conf.d --privsep_context neutron.privileged.link_cmd --privsep_sock_path /tmp/tmpopxbn0rr/privsep.sock
             │   └─315526 /usr/bin/python3 /bin/privsep-helper --config-file /etc/neutron/neutron.conf --config-dir /etc/neutron.conf.d --privsep_context neutron.privileged.dhcp_release_cmd --privsep_sock_path /tmp/tmp02asih0a/privsep.sock
             ├─libpod-c0dd95da0b8090e1dfefd899739a6f9a1e0b730eec9db8d0d0094fa6bfc519c8.scope
             │ └─container
             │   └─241166 /bin/podman_exporter
             ├─libpod-cd17978adeb2b0468483202f98a4facd0357edbe019deb13313adab6b6b71f52.scope
             │ └─container
             │   └─238755 /bin/node_exporter --web.disable-exporter-metrics --collector.systemd "--collector.systemd.unit-include=(edpm_.*|ovs.*|openvswitch|virt.*|rsyslog)\\.service" --no-collector.dmi --no-collector.entropy --no-collector.thermal_zone --no-collector.time --no-collector.timex --no-collector.uname --no-collector.stat --no-collector.hwmon --no-collector.os --no-collector.selinux --no-collector.textfile --no-collector.powersupplyclass --no-collector.pressure --no-collector.rapl --path.rootfs=/rootfs
             ├─libpod-conmon-8813b336059ca31aa5e43a3a140b1dec6d9019f75a06f121cdebad271f8eb62c.scope
             │ └─264099 /usr/bin/conmon --api-version 1 -c 8813b336059ca31aa5e43a3a140b1dec6d9019f75a06f121cdebad271f8eb62c -u 8813b336059ca31aa5e43a3a140b1dec6d9019f75a06f121cdebad271f8eb62c -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/8813b336059ca31aa5e43a3a140b1dec6d9019f75a06f121cdebad271f8eb62c/userdata -p /run/containers/storage/overlay-containers/8813b336059ca31aa5e43a3a140b1dec6d9019f75a06f121cdebad271f8eb62c/userdata/pidfile -n neutron-dnsmasq-qdhcp-91fdc6a7-b901-4255-83f7-4b37365658a3 --exit-dir /run/libpod/exits --full-attach -s -l journald --log-level warning --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/8813b336059ca31aa5e43a3a140b1dec6d9019f75a06f121cdebad271f8eb62c/userdata/oci-log --conmon-pidfile /run/containers/storage/overlay-containers/8813b336059ca31aa5e43a3a140b1dec6d9019f75a06f121cdebad271f8eb62c/userdata/conmon.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --transient-store=false --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg 8813b336059ca31aa5e43a3a140b1dec6d9019f75a06f121cdebad271f8eb62c
             ├─libpod-conmon-a43c45687bee0664320d95d80d3e7a94940f1f0a4aba277a159d8b3eeefd4d41.scope
             │ └─264095 /usr/bin/conmon --api-version 1 -c a43c45687bee0664320d95d80d3e7a94940f1f0a4aba277a159d8b3eeefd4d41 -u a43c45687bee0664320d95d80d3e7a94940f1f0a4aba277a159d8b3eeefd4d41 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/a43c45687bee0664320d95d80d3e7a94940f1f0a4aba277a159d8b3eeefd4d41/userdata -p /run/containers/storage/overlay-containers/a43c45687bee0664320d95d80d3e7a94940f1f0a4aba277a159d8b3eeefd4d41/userdata/pidfile -n neutron-dnsmasq-qdhcp-9da5b53d-3184-450f-9a5b-bdba1a6c9f6d --exit-dir /run/libpod/exits --full-attach -s -l journald --log-level warning --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/a43c45687bee0664320d95d80d3e7a94940f1f0a4aba277a159d8b3eeefd4d41/userdata/oci-log --conmon-pidfile /run/containers/storage/overlay-containers/a43c45687bee0664320d95d80d3e7a94940f1f0a4aba277a159d8b3eeefd4d41/userdata/conmon.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --transient-store=false --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg a43c45687bee0664320d95d80d3e7a94940f1f0a4aba277a159d8b3eeefd4d41
             └─libpod-fe7013c5595cf36029c5acd08a92109de37036b8a083179769dbc4e27b30aaf9.scope
               └─container
                 ├─236491 dumb-init --single-child -- kolla_start
                 ├─236494 "ceilometer-polling: master process [/usr/bin/ceilometer-polling --polling-namespaces compute --logfile /dev/stdout]"
                 └─236558 "ceilometer-polling: AgentManager worker(0)"

Feb 23 10:10:41 np0005626466.localdomain pensive_robinson[333915]:             "sectors": 0,
Feb 23 10:10:41 np0005626466.localdomain pensive_robinson[333915]:             "sectorsize": "2048",
Feb 23 10:10:41 np0005626466.localdomain pensive_robinson[333915]:             "size": 493568.0,
Feb 23 10:10:41 np0005626466.localdomain pensive_robinson[333915]:             "support_discard": "0",
Feb 23 10:10:41 np0005626466.localdomain pensive_robinson[333915]:             "type": "disk",
Feb 23 10:10:41 np0005626466.localdomain pensive_robinson[333915]:             "vendor": "QEMU"
Feb 23 10:10:41 np0005626466.localdomain pensive_robinson[333915]:         }
Feb 23 10:10:41 np0005626466.localdomain pensive_robinson[333915]:     }
Feb 23 10:10:41 np0005626466.localdomain pensive_robinson[333915]: ]
Feb 23 10:10:41 np0005626466.localdomain podman[335895]: 2026-02-23 10:10:41.182719315 +0000 UTC m=+0.097224461 container remove 7e66abf3937775ffe686996113c815576940c8beda8502961d82d4fd25f7de64 (image=registry.redhat.io/rhceph/rhceph-7-rhel9:latest, name=pensive_robinson, io.buildah.version=1.42.2, vendor=Red Hat, Inc., io.k8s.description=Red Hat Ceph Storage 7, GIT_CLEAN=True, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, vcs-ref=b3986a21dbd047e1edac0f24f7c0e811518e5b14, release=1770267347, distribution-scope=public, io.openshift.expose-services=, cpe=cpe:/a:redhat:enterprise_linux:9::appstream, maintainer=Guillaume Abrioux <gabrioux@redhat.com>, description=Red Hat Ceph Storage 7, com.redhat.component=rhceph-container, RELEASE=main, ceph=True, summary=Provides the latest Red Hat Ceph Storage 7 on RHEL 9 in a fully featured and supported base image., build-date=2026-02-09T10:25:24Z, io.k8s.display-name=Red Hat Ceph Storage 7 on RHEL 9, architecture=x86_64, io.openshift.tags=rhceph ceph, GIT_COMMIT=12717c0777377369ea674892da98b0d85250f5b0, GIT_BRANCH=main, version=7, org.opencontainers.image.revision=b3986a21dbd047e1edac0f24f7c0e811518e5b14, url=https://catalog.redhat.com/en/search?searchType=containers, org.opencontainers.image.created=2026-02-09T10:25:24Z, name=rhceph, CEPH_POINT_RELEASE=, vcs-type=git, GIT_REPO=https://github.com/ceph/ceph-container.git)

● system-ceph\x2df1fea371\x2dcb69\x2d578d\x2da3d0\x2db5c472a84b46.slice - Slice /system/ceph-f1fea371-cb69-578d-a3d0-b5c472a84b46
     Loaded: loaded
     Active: active since Mon 2026-02-23 07:38:40 UTC; 2h 32min ago
      Until: Mon 2026-02-23 07:38:40 UTC; 2h 32min ago
         IO: 0B read, 369.1M written
      Tasks: 191
     Memory: 2.2G
        CPU: 3min 46.259s
     CGroup: /system.slice/system-ceph\x2df1fea371\x2dcb69\x2d578d\x2da3d0\x2db5c472a84b46.slice
             ├─ceph-f1fea371-cb69-578d-a3d0-b5c472a84b46@crash.np0005626466.service
             │ ├─libpod-payload-975e762c1132156a568538ebdacd7315274fa978e3e6da81d658e2351a66b71e
             │ │ ├─29138 /run/podman-init -- /usr/bin/ceph-crash -n client.crash.np0005626466
             │ │ └─29140 /usr/bin/python3 -s /usr/bin/ceph-crash -n client.crash.np0005626466
             │ └─runtime
             │   └─29136 /usr/bin/conmon --api-version 1 -c 975e762c1132156a568538ebdacd7315274fa978e3e6da81d658e2351a66b71e -u 975e762c1132156a568538ebdacd7315274fa978e3e6da81d658e2351a66b71e -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/975e762c1132156a568538ebdacd7315274fa978e3e6da81d658e2351a66b71e/userdata -p /run/containers/storage/overlay-containers/975e762c1132156a568538ebdacd7315274fa978e3e6da81d658e2351a66b71e/userdata/pidfile -n ceph-f1fea371-cb69-578d-a3d0-b5c472a84b46-crash-np0005626466 --exit-dir /run/libpod/exits --full-attach -l journald --log-level warning --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/975e762c1132156a568538ebdacd7315274fa978e3e6da81d658e2351a66b71e/userdata/oci-log --conmon-pidfile /run/ceph-f1fea371-cb69-578d-a3d0-b5c472a84b46@crash.np0005626466.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --transient-store=false --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --rm --exit-command-arg 975e762c1132156a568538ebdacd7315274fa978e3e6da81d658e2351a66b71e
             ├─ceph-f1fea371-cb69-578d-a3d0-b5c472a84b46@mds.mds.np0005626466.vaywlp.service
             │ ├─libpod-payload-13d86d0fc7f1279ceb33114723f84eb57a13a8060cd7e1f6fc0b04a1a71bdedf
             │ │ ├─285095 /run/podman-init -- /usr/bin/ceph-mds -n mds.mds.np0005626466.vaywlp -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ └─285097 /usr/bin/ceph-mds -n mds.mds.np0005626466.vaywlp -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ └─runtime
             │   └─285093 /usr/bin/conmon --api-version 1 -c 13d86d0fc7f1279ceb33114723f84eb57a13a8060cd7e1f6fc0b04a1a71bdedf -u 13d86d0fc7f1279ceb33114723f84eb57a13a8060cd7e1f6fc0b04a1a71bdedf -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/13d86d0fc7f1279ceb33114723f84eb57a13a8060cd7e1f6fc0b04a1a71bdedf/userdata -p /run/containers/storage/overlay-containers/13d86d0fc7f1279ceb33114723f84eb57a13a8060cd7e1f6fc0b04a1a71bdedf/userdata/pidfile -n ceph-f1fea371-cb69-578d-a3d0-b5c472a84b46-mds-mds-np0005626466-vaywlp --exit-dir /run/libpod/exits --full-attach -l journald --log-level warning --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/13d86d0fc7f1279ceb33114723f84eb57a13a8060cd7e1f6fc0b04a1a71bdedf/userdata/oci-log --conmon-pidfile /run/ceph-f1fea371-cb69-578d-a3d0-b5c472a84b46@mds.mds.np0005626466.vaywlp.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --transient-store=false --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --rm --exit-command-arg 13d86d0fc7f1279ceb33114723f84eb57a13a8060cd7e1f6fc0b04a1a71bdedf
             ├─ceph-f1fea371-cb69-578d-a3d0-b5c472a84b46@mgr.np0005626466.nisqfq.service
             │ ├─libpod-payload-9f7e89f74412aa4d39adf99ebe74e72087671d5f3c70fe114ec0db57835e5bcf
             │ │ ├─286340 /run/podman-init -- /usr/bin/ceph-mgr -n mgr.np0005626466.nisqfq -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ └─286342 /usr/bin/ceph-mgr -n mgr.np0005626466.nisqfq -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ └─runtime
             │   └─286338 /usr/bin/conmon --api-version 1 -c 9f7e89f74412aa4d39adf99ebe74e72087671d5f3c70fe114ec0db57835e5bcf -u 9f7e89f74412aa4d39adf99ebe74e72087671d5f3c70fe114ec0db57835e5bcf -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/9f7e89f74412aa4d39adf99ebe74e72087671d5f3c70fe114ec0db57835e5bcf/userdata -p /run/containers/storage/overlay-containers/9f7e89f74412aa4d39adf99ebe74e72087671d5f3c70fe114ec0db57835e5bcf/userdata/pidfile -n ceph-f1fea371-cb69-578d-a3d0-b5c472a84b46-mgr-np0005626466-nisqfq --exit-dir /run/libpod/exits --full-attach -l journald --log-level warning --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/9f7e89f74412aa4d39adf99ebe74e72087671d5f3c70fe114ec0db57835e5bcf/userdata/oci-log --conmon-pidfile /run/ceph-f1fea371-cb69-578d-a3d0-b5c472a84b46@mgr.np0005626466.nisqfq.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --transient-store=false --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --rm --exit-command-arg 9f7e89f74412aa4d39adf99ebe74e72087671d5f3c70fe114ec0db57835e5bcf
             ├─ceph-f1fea371-cb69-578d-a3d0-b5c472a84b46@mon.np0005626466.service
             │ ├─libpod-payload-2ceecf5aca3e1844048535c9c84c12fc98d118507e171300ca4a76145722e20a
             │ │ ├─300839 /run/podman-init -- /usr/bin/ceph-mon -n mon.np0005626466 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false
             │ │ └─300841 /usr/bin/ceph-mon -n mon.np0005626466 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false
             │ └─runtime
             │   └─300837 /usr/bin/conmon --api-version 1 -c 2ceecf5aca3e1844048535c9c84c12fc98d118507e171300ca4a76145722e20a -u 2ceecf5aca3e1844048535c9c84c12fc98d118507e171300ca4a76145722e20a -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/2ceecf5aca3e1844048535c9c84c12fc98d118507e171300ca4a76145722e20a/userdata -p /run/containers/storage/overlay-containers/2ceecf5aca3e1844048535c9c84c12fc98d118507e171300ca4a76145722e20a/userdata/pidfile -n ceph-f1fea371-cb69-578d-a3d0-b5c472a84b46-mon-np0005626466 --exit-dir /run/libpod/exits --full-attach -l journald --log-level warning --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/2ceecf5aca3e1844048535c9c84c12fc98d118507e171300ca4a76145722e20a/userdata/oci-log --conmon-pidfile /run/ceph-f1fea371-cb69-578d-a3d0-b5c472a84b46@mon.np0005626466.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --transient-store=false --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --rm --exit-command-arg 2ceecf5aca3e1844048535c9c84c12fc98d118507e171300ca4a76145722e20a
             ├─ceph-f1fea371-cb69-578d-a3d0-b5c472a84b46@osd.1.service
             │ ├─libpod-payload-f272037b1a08b3104e31c243e058b49b6cfe9c3898252c2d34ed954af84eea3b
             │ │ ├─31871 /run/podman-init -- /usr/bin/ceph-osd -n osd.1 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ └─31873 /usr/bin/ceph-osd -n osd.1 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ └─runtime
             │   └─31869 /usr/bin/conmon --api-version 1 -c f272037b1a08b3104e31c243e058b49b6cfe9c3898252c2d34ed954af84eea3b -u f272037b1a08b3104e31c243e058b49b6cfe9c3898252c2d34ed954af84eea3b -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/f272037b1a08b3104e31c243e058b49b6cfe9c3898252c2d34ed954af84eea3b/userdata -p /run/containers/storage/overlay-containers/f272037b1a08b3104e31c243e058b49b6cfe9c3898252c2d34ed954af84eea3b/userdata/pidfile -n ceph-f1fea371-cb69-578d-a3d0-b5c472a84b46-osd-1 --exit-dir /run/libpod/exits --full-attach -l journald --log-level warning --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/f272037b1a08b3104e31c243e058b49b6cfe9c3898252c2d34ed954af84eea3b/userdata/oci-log --conmon-pidfile /run/ceph-f1fea371-cb69-578d-a3d0-b5c472a84b46@osd.1.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --transient-store=false --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --rm --exit-command-arg f272037b1a08b3104e31c243e058b49b6cfe9c3898252c2d34ed954af84eea3b
             └─ceph-f1fea371-cb69-578d-a3d0-b5c472a84b46@osd.4.service
               ├─libpod-payload-149ab20d951c6d638af0c8d29ce496a98dac476b4b563eca8217acb3595a760e
               │ ├─32811 /run/podman-init -- /usr/bin/ceph-osd -n osd.4 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
               │ └─32813 /usr/bin/ceph-osd -n osd.4 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
               └─runtime
                 └─32809 /usr/bin/conmon --api-version 1 -c 149ab20d951c6d638af0c8d29ce496a98dac476b4b563eca8217acb3595a760e -u 149ab20d951c6d638af0c8d29ce496a98dac476b4b563eca8217acb3595a760e -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/149ab20d951c6d638af0c8d29ce496a98dac476b4b563eca8217acb3595a760e/userdata -p /run/containers/storage/overlay-containers/149ab20d951c6d638af0c8d29ce496a98dac476b4b563eca8217acb3595a760e/userdata/pidfile -n ceph-f1fea371-cb69-578d-a3d0-b5c472a84b46-osd-4 --exit-dir /run/libpod/exits --full-attach -l journald --log-level warning --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/149ab20d951c6d638af0c8d29ce496a98dac476b4b563eca8217acb3595a760e/userdata/oci-log --conmon-pidfile /run/ceph-f1fea371-cb69-578d-a3d0-b5c472a84b46@osd.4.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --transient-store=false --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --rm --exit-command-arg 149ab20d951c6d638af0c8d29ce496a98dac476b4b563eca8217acb3595a760e

Feb 23 10:11:02 np0005626466.localdomain ceph-mon[300841]: from='client.? 172.18.0.107:0/3166253469' entity='client.admin' cmd={"prefix": "osd stat", "format": "json-pretty"} : dispatch
Feb 23 10:11:02 np0005626466.localdomain ceph-mon[300841]: from='client.? 172.18.0.108:0/785348751' entity='client.admin' cmd={"prefix": "status", "format": "json-pretty"} : dispatch
Feb 23 10:11:02 np0005626466.localdomain ceph-mon[300841]: mon.np0005626466@2(peon) e15 handle_command mon_command({"prefix": "time-sync-status", "format": "json-pretty"} v 0)
Feb 23 10:11:02 np0005626466.localdomain ceph-mon[300841]: log_channel(audit) log [DBG] : from='client.? 172.18.0.108:0/252287053' entity='client.admin' cmd={"prefix": "time-sync-status", "format": "json-pretty"} : dispatch
Feb 23 10:11:03 np0005626466.localdomain ceph-mon[300841]: from='client.70112 -' entity='client.admin' cmd=[{"prefix": "pg stat", "target": ["mon-mgr", ""], "format": "json-pretty"}]: dispatch
Feb 23 10:11:03 np0005626466.localdomain ceph-mon[300841]: from='client.50166 -' entity='client.admin' cmd=[{"prefix": "pg dump", "target": ["mon-mgr", ""], "format": "json-pretty"}]: dispatch
Feb 23 10:11:03 np0005626466.localdomain ceph-mon[300841]: from='client.59791 -' entity='client.admin' cmd=[{"prefix": "pg dump", "target": ["mon-mgr", ""], "format": "json-pretty"}]: dispatch
Feb 23 10:11:03 np0005626466.localdomain ceph-mon[300841]: from='client.? 172.18.0.108:0/252287053' entity='client.admin' cmd={"prefix": "time-sync-status", "format": "json-pretty"} : dispatch
Feb 23 10:11:03 np0005626466.localdomain ceph-mon[300841]: from='client.? 172.18.0.107:0/4193047412' entity='client.admin' cmd={"prefix": "status", "format": "json-pretty"} : dispatch
Feb 23 10:11:03 np0005626466.localdomain ceph-mon[300841]: from='client.? 172.18.0.106:0/3750201930' entity='client.admin' cmd={"prefix": "status", "format": "json-pretty"} : dispatch

● system-dbus\x2d:1.1\x2dorg.fedoraproject.SetroubleshootPrivileged.slice - Slice /system/dbus-:1.1-org.fedoraproject.SetroubleshootPrivileged
     Loaded: loaded
     Active: active since Mon 2026-02-23 09:24:06 UTC; 46min ago
      Until: Mon 2026-02-23 09:24:06 UTC; 46min ago
         IO: 4.0K read, 0B written
      Tasks: 0
     Memory: 4.0K
        CPU: 1.905s
     CGroup: /system.slice/system-dbus\x2d:1.1\x2dorg.fedoraproject.SetroubleshootPrivileged.slice

Feb 23 09:24:06 np0005626466.localdomain systemd[1]: Created slice Slice /system/dbus-:1.1-org.fedoraproject.SetroubleshootPrivileged.

● system-getty.slice - Slice /system/getty
     Loaded: loaded
     Active: active since Mon 2026-02-23 06:36:02 UTC; 3h 35min ago
      Until: Mon 2026-02-23 06:36:02 UTC; 3h 35min ago
         IO: 4.0K read, 0B written
      Tasks: 1
     Memory: 232.0K
        CPU: 7ms
     CGroup: /system.slice/system-getty.slice
             └─getty@tty1.service
               └─1140 /sbin/agetty -o "-p -- \\u" --noclear - linux

● system-modprobe.slice - Slice /system/modprobe
     Loaded: loaded
     Active: active since Mon 2026-02-23 06:36:02 UTC; 3h 35min ago
      Until: Mon 2026-02-23 06:36:02 UTC; 3h 35min ago
         IO: 432.0K read, 0B written
      Tasks: 0
     Memory: 120.0K
        CPU: 184ms
     CGroup: /system.slice/system-modprobe.slice

● system-serial\x2dgetty.slice - Slice /system/serial-getty
     Loaded: loaded
     Active: active since Mon 2026-02-23 06:36:02 UTC; 3h 35min ago
      Until: Mon 2026-02-23 06:36:02 UTC; 3h 35min ago
         IO: 60.0K read, 0B written
      Tasks: 1
     Memory: 300.0K
        CPU: 11ms
     CGroup: /system.slice/system-serial\x2dgetty.slice
             └─serial-getty@ttyS0.service
               └─1142 /sbin/agetty -o "-p -- \\u" --keep-baud 115200,57600,38400,9600 - vt220

● system-sshd\x2dkeygen.slice - Slice /system/sshd-keygen
     Loaded: loaded
     Active: active since Mon 2026-02-23 06:36:02 UTC; 3h 35min ago
      Until: Mon 2026-02-23 06:36:02 UTC; 3h 35min ago
         IO: 0B read, 0B written
      Tasks: 0
     Memory: 0B
        CPU: 0
     CGroup: /system.slice/system-sshd\x2dkeygen.slice

● system-systemd\x2dcoredump.slice - Slice /system/systemd-coredump
     Loaded: loaded
     Active: active since Mon 2026-02-23 09:01:07 UTC; 1h 9min ago
      Until: Mon 2026-02-23 09:01:07 UTC; 1h 9min ago
         IO: 0B read, 0B written
      Tasks: 0
     Memory: 4.0K
        CPU: 115ms
     CGroup: /system.slice/system-systemd\x2dcoredump.slice

Feb 23 09:01:07 np0005626466.localdomain systemd[1]: Created slice Slice /system/systemd-coredump.
Feb 23 09:01:07 np0005626466.localdomain systemd-coredump[109126]: Resource limits disable core dumping for process 54560 (qdrouterd).
Feb 23 09:01:07 np0005626466.localdomain systemd-coredump[109126]: Process 54560 (qdrouterd) of user 42465 dumped core.

● system-systemd\x2dfsck.slice - Slice /system/systemd-fsck
     Loaded: loaded
     Active: active since Mon 2026-02-23 06:36:02 UTC; 3h 35min ago
      Until: Mon 2026-02-23 06:36:02 UTC; 3h 35min ago
         IO: 2.5M read, 0B written
      Tasks: 0
     Memory: 16.0K
        CPU: 36ms
     CGroup: /system.slice/system-systemd\x2dfsck.slice

● system.slice - System Slice
     Loaded: loaded
     Active: active since Mon 2026-02-23 06:36:00 UTC; 3h 35min ago
      Until: Mon 2026-02-23 06:36:00 UTC; 3h 35min ago
       Docs: man:systemd.special(7)
         IO: 307.7M read, 255.2M written
      Tasks: 351
     Memory: 3.2G
        CPU: 21min 9.309s
     CGroup: /system.slice
             ├─NetworkManager.service
             │ └─5981 /usr/sbin/NetworkManager --no-daemon
             ├─auditd.service
             │ ├─726 /sbin/auditd
             │ └─728 /usr/sbin/sedispatch
             ├─chronyd.service
             │ └─139217 /usr/sbin/chronyd -F 2
             ├─crond.service
             │ └─1139 /usr/sbin/crond -n
             ├─dbus-broker.service
             │ ├─751 /usr/bin/dbus-broker-launch --scope system --audit
             │ └─755 dbus-broker --log 4 --controller 9 --machine-id c0212a8b024a111cfc61293864f36c87 --max-bytes 536870912 --max-fds 4096 --max-matches 131072 --audit
             ├─edpm_ceilometer_agent_compute.service
             │ └─236489 /usr/bin/conmon --api-version 1 -c fe7013c5595cf36029c5acd08a92109de37036b8a083179769dbc4e27b30aaf9 -u fe7013c5595cf36029c5acd08a92109de37036b8a083179769dbc4e27b30aaf9 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/fe7013c5595cf36029c5acd08a92109de37036b8a083179769dbc4e27b30aaf9/userdata -p /run/containers/storage/overlay-containers/fe7013c5595cf36029c5acd08a92109de37036b8a083179769dbc4e27b30aaf9/userdata/pidfile -n ceilometer_agent_compute --exit-dir /run/libpod/exits --full-attach -s -l journald --log-level warning --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/fe7013c5595cf36029c5acd08a92109de37036b8a083179769dbc4e27b30aaf9/userdata/oci-log --conmon-pidfile /run/ceilometer_agent_compute.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --transient-store=false --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg fe7013c5595cf36029c5acd08a92109de37036b8a083179769dbc4e27b30aaf9
             ├─edpm_neutron_dhcp_agent.service
             │ └─263819 /usr/bin/conmon --api-version 1 -c bdeaed09d67af00ebcf3af49678b8d850a09c9b8240ebddb072ba501276f201d -u bdeaed09d67af00ebcf3af49678b8d850a09c9b8240ebddb072ba501276f201d -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/bdeaed09d67af00ebcf3af49678b8d850a09c9b8240ebddb072ba501276f201d/userdata -p /run/containers/storage/overlay-containers/bdeaed09d67af00ebcf3af49678b8d850a09c9b8240ebddb072ba501276f201d/userdata/pidfile -n neutron_dhcp_agent --exit-dir /run/libpod/exits --full-attach -s -l journald --log-level warning --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/bdeaed09d67af00ebcf3af49678b8d850a09c9b8240ebddb072ba501276f201d/userdata/oci-log --conmon-pidfile /run/neutron_dhcp_agent.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --transient-store=false --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg bdeaed09d67af00ebcf3af49678b8d850a09c9b8240ebddb072ba501276f201d
             ├─edpm_neutron_sriov_agent.service
             │ └─256425 /usr/bin/conmon --api-version 1 -c 3648631d61f37afb17aa928ef74b239e7715b60e5cab331e4a494567755448eb -u 3648631d61f37afb17aa928ef74b239e7715b60e5cab331e4a494567755448eb -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/3648631d61f37afb17aa928ef74b239e7715b60e5cab331e4a494567755448eb/userdata -p /run/containers/storage/overlay-containers/3648631d61f37afb17aa928ef74b239e7715b60e5cab331e4a494567755448eb/userdata/pidfile -n neutron_sriov_agent --exit-dir /run/libpod/exits --full-attach -s -l journald --log-level warning --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/3648631d61f37afb17aa928ef74b239e7715b60e5cab331e4a494567755448eb/userdata/oci-log --conmon-pidfile /run/neutron_sriov_agent.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --transient-store=false --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg 3648631d61f37afb17aa928ef74b239e7715b60e5cab331e4a494567755448eb
             ├─edpm_node_exporter.service
             │ └─238753 /usr/bin/conmon --api-version 1 -c cd17978adeb2b0468483202f98a4facd0357edbe019deb13313adab6b6b71f52 -u cd17978adeb2b0468483202f98a4facd0357edbe019deb13313adab6b6b71f52 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/cd17978adeb2b0468483202f98a4facd0357edbe019deb13313adab6b6b71f52/userdata -p /run/containers/storage/overlay-containers/cd17978adeb2b0468483202f98a4facd0357edbe019deb13313adab6b6b71f52/userdata/pidfile -n node_exporter --exit-dir /run/libpod/exits --full-attach -s -l journald --log-level warning --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/cd17978adeb2b0468483202f98a4facd0357edbe019deb13313adab6b6b71f52/userdata/oci-log --conmon-pidfile /run/node_exporter.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --transient-store=false --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg cd17978adeb2b0468483202f98a4facd0357edbe019deb13313adab6b6b71f52
             ├─edpm_nova_compute.service
             │ └─280690 /usr/bin/conmon --api-version 1 -c 2812510312ec1c5f807144229711d7908bd35d002349ffc7e5627b311d4edf79 -u 2812510312ec1c5f807144229711d7908bd35d002349ffc7e5627b311d4edf79 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/2812510312ec1c5f807144229711d7908bd35d002349ffc7e5627b311d4edf79/userdata -p /run/containers/storage/overlay-containers/2812510312ec1c5f807144229711d7908bd35d002349ffc7e5627b311d4edf79/userdata/pidfile -n nova_compute --exit-dir /run/libpod/exits --full-attach -s -l journald --log-level warning --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/2812510312ec1c5f807144229711d7908bd35d002349ffc7e5627b311d4edf79/userdata/oci-log --conmon-pidfile /run/nova_compute.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --transient-store=false --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg 2812510312ec1c5f807144229711d7908bd35d002349ffc7e5627b311d4edf79
             ├─edpm_openstack_network_exporter.service
             │ └─243610 /usr/bin/conmon --api-version 1 -c 6a5bdcbb336925d11e98e079f4b8b7828b30e368d9b142bcbf78246faf17115e -u 6a5bdcbb336925d11e98e079f4b8b7828b30e368d9b142bcbf78246faf17115e -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/6a5bdcbb336925d11e98e079f4b8b7828b30e368d9b142bcbf78246faf17115e/userdata -p /run/containers/storage/overlay-containers/6a5bdcbb336925d11e98e079f4b8b7828b30e368d9b142bcbf78246faf17115e/userdata/pidfile -n openstack_network_exporter --exit-dir /run/libpod/exits --full-attach -s -l journald --log-level warning --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/6a5bdcbb336925d11e98e079f4b8b7828b30e368d9b142bcbf78246faf17115e/userdata/oci-log --conmon-pidfile /run/openstack_network_exporter.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --transient-store=false --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg 6a5bdcbb336925d11e98e079f4b8b7828b30e368d9b142bcbf78246faf17115e
             ├─edpm_ovn_controller.service
             │ └─156066 /usr/bin/conmon --api-version 1 -c 122635f0e8d39054eca80d2e540d00d16b4a0e4979d56e5a2e398526a74c57c4 -u 122635f0e8d39054eca80d2e540d00d16b4a0e4979d56e5a2e398526a74c57c4 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/122635f0e8d39054eca80d2e540d00d16b4a0e4979d56e5a2e398526a74c57c4/userdata -p /run/containers/storage/overlay-containers/122635f0e8d39054eca80d2e540d00d16b4a0e4979d56e5a2e398526a74c57c4/userdata/pidfile -n ovn_controller --exit-dir /run/libpod/exits --full-attach -s -l journald --log-level warning --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/122635f0e8d39054eca80d2e540d00d16b4a0e4979d56e5a2e398526a74c57c4/userdata/oci-log --conmon-pidfile /run/ovn_controller.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --transient-store=false --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg 122635f0e8d39054eca80d2e540d00d16b4a0e4979d56e5a2e398526a74c57c4
             ├─edpm_ovn_metadata_agent.service
             │ └─161939 /usr/bin/conmon --api-version 1 -c 8ca16bfb2f59ef8fc3fb8572c3270f7856db7f62fc874baed2fd5084beb7f01e -u 8ca16bfb2f59ef8fc3fb8572c3270f7856db7f62fc874baed2fd5084beb7f01e -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/8ca16bfb2f59ef8fc3fb8572c3270f7856db7f62fc874baed2fd5084beb7f01e/userdata -p /run/containers/storage/overlay-containers/8ca16bfb2f59ef8fc3fb8572c3270f7856db7f62fc874baed2fd5084beb7f01e/userdata/pidfile -n ovn_metadata_agent --exit-dir /run/libpod/exits --full-attach -s -l journald --log-level warning --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/8ca16bfb2f59ef8fc3fb8572c3270f7856db7f62fc874baed2fd5084beb7f01e/userdata/oci-log --conmon-pidfile /run/ovn_metadata_agent.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --transient-store=false --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg 8ca16bfb2f59ef8fc3fb8572c3270f7856db7f62fc874baed2fd5084beb7f01e
             ├─edpm_podman_exporter.service
             │ └─241164 /usr/bin/conmon --api-version 1 -c c0dd95da0b8090e1dfefd899739a6f9a1e0b730eec9db8d0d0094fa6bfc519c8 -u c0dd95da0b8090e1dfefd899739a6f9a1e0b730eec9db8d0d0094fa6bfc519c8 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/c0dd95da0b8090e1dfefd899739a6f9a1e0b730eec9db8d0d0094fa6bfc519c8/userdata -p /run/containers/storage/overlay-containers/c0dd95da0b8090e1dfefd899739a6f9a1e0b730eec9db8d0d0094fa6bfc519c8/userdata/pidfile -n podman_exporter --exit-dir /run/libpod/exits --full-attach -s -l journald --log-level warning --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/c0dd95da0b8090e1dfefd899739a6f9a1e0b730eec9db8d0d0094fa6bfc519c8/userdata/oci-log --conmon-pidfile /run/podman_exporter.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --transient-store=false --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg c0dd95da0b8090e1dfefd899739a6f9a1e0b730eec9db8d0d0094fa6bfc519c8
             ├─gssproxy.service
             │ └─802 /usr/sbin/gssproxy -D
             ├─irqbalance.service
             │ └─758 /usr/sbin/irqbalance --foreground
             ├─iscsid.service
             │ └─212770 /usr/sbin/iscsid -f
             ├─multipathd.service
             │ └─218083 /sbin/multipathd -d -s
             ├─ovs-vswitchd.service
             │ └─22519 ovs-vswitchd unix:/var/run/openvswitch/db.sock -vconsole:emer -vsyslog:err -vfile:info --mlockall --user openvswitch:hugetlbfs --no-chdir --log-file=/var/log/openvswitch/ovs-vswitchd.log --pidfile=/var/run/openvswitch/ovs-vswitchd.pid --detach
             ├─ovsdb-server.service
             │ └─22437 ovsdb-server /etc/openvswitch/conf.db -vconsole:emer -vsyslog:err -vfile:info --remote=punix:/var/run/openvswitch/db.sock --private-key=db:Open_vSwitch,SSL,private_key --certificate=db:Open_vSwitch,SSL,certificate --bootstrap-ca-cert=db:Open_vSwitch,SSL,ca_cert --user openvswitch:hugetlbfs --no-chdir --log-file=/var/log/openvswitch/ovsdb-server.log --pidfile=/var/run/openvswitch/ovsdb-server.pid --detach
             ├─podman.service
             │ └─241175 /usr/bin/podman --log-level=info system service
             ├─polkit.service
             │ └─1036 /usr/lib/polkit-1/polkitd --no-debug
             ├─rhsm.service
             │ └─6643 /usr/bin/python3 /usr/libexec/rhsm-service
             ├─rhsmcertd.service
             │ └─795 /usr/bin/rhsmcertd
             ├─rpcbind.service
             │ └─724 /usr/bin/rpcbind -w -f
             ├─rsyslog.service
             │ └─759 /usr/sbin/rsyslogd -n
             ├─snmpd.service
             │ └─67626 /usr/sbin/snmpd -LS0-5d -f
             ├─sshd.service
             │ └─186135 "sshd: /usr/sbin/sshd -D [listener] 0 of 10-100 startups"
             ├─system-ceph\x2df1fea371\x2dcb69\x2d578d\x2da3d0\x2db5c472a84b46.slice
             │ ├─ceph-f1fea371-cb69-578d-a3d0-b5c472a84b46@crash.np0005626466.service
             │ │ ├─libpod-payload-975e762c1132156a568538ebdacd7315274fa978e3e6da81d658e2351a66b71e
             │ │ │ ├─29138 /run/podman-init -- /usr/bin/ceph-crash -n client.crash.np0005626466
             │ │ │ └─29140 /usr/bin/python3 -s /usr/bin/ceph-crash -n client.crash.np0005626466
             │ │ └─runtime
             │ │   └─29136 /usr/bin/conmon --api-version 1 -c 975e762c1132156a568538ebdacd7315274fa978e3e6da81d658e2351a66b71e -u 975e762c1132156a568538ebdacd7315274fa978e3e6da81d658e2351a66b71e -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/975e762c1132156a568538ebdacd7315274fa978e3e6da81d658e2351a66b71e/userdata -p /run/containers/storage/overlay-containers/975e762c1132156a568538ebdacd7315274fa978e3e6da81d658e2351a66b71e/userdata/pidfile -n ceph-f1fea371-cb69-578d-a3d0-b5c472a84b46-crash-np0005626466 --exit-dir /run/libpod/exits --full-attach -l journald --log-level warning --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/975e762c1132156a568538ebdacd7315274fa978e3e6da81d658e2351a66b71e/userdata/oci-log --conmon-pidfile /run/ceph-f1fea371-cb69-578d-a3d0-b5c472a84b46@crash.np0005626466.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --transient-store=false --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --rm --exit-command-arg 975e762c1132156a568538ebdacd7315274fa978e3e6da81d658e2351a66b71e
             │ ├─ceph-f1fea371-cb69-578d-a3d0-b5c472a84b46@mds.mds.np0005626466.vaywlp.service
             │ │ ├─libpod-payload-13d86d0fc7f1279ceb33114723f84eb57a13a8060cd7e1f6fc0b04a1a71bdedf
             │ │ │ ├─285095 /run/podman-init -- /usr/bin/ceph-mds -n mds.mds.np0005626466.vaywlp -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ │ └─285097 /usr/bin/ceph-mds -n mds.mds.np0005626466.vaywlp -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ └─runtime
             │ │   └─285093 /usr/bin/conmon --api-version 1 -c 13d86d0fc7f1279ceb33114723f84eb57a13a8060cd7e1f6fc0b04a1a71bdedf -u 13d86d0fc7f1279ceb33114723f84eb57a13a8060cd7e1f6fc0b04a1a71bdedf -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/13d86d0fc7f1279ceb33114723f84eb57a13a8060cd7e1f6fc0b04a1a71bdedf/userdata -p /run/containers/storage/overlay-containers/13d86d0fc7f1279ceb33114723f84eb57a13a8060cd7e1f6fc0b04a1a71bdedf/userdata/pidfile -n ceph-f1fea371-cb69-578d-a3d0-b5c472a84b46-mds-mds-np0005626466-vaywlp --exit-dir /run/libpod/exits --full-attach -l journald --log-level warning --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/13d86d0fc7f1279ceb33114723f84eb57a13a8060cd7e1f6fc0b04a1a71bdedf/userdata/oci-log --conmon-pidfile /run/ceph-f1fea371-cb69-578d-a3d0-b5c472a84b46@mds.mds.np0005626466.vaywlp.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --transient-store=false --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --rm --exit-command-arg 13d86d0fc7f1279ceb33114723f84eb57a13a8060cd7e1f6fc0b04a1a71bdedf
             │ ├─ceph-f1fea371-cb69-578d-a3d0-b5c472a84b46@mgr.np0005626466.nisqfq.service
             │ │ ├─libpod-payload-9f7e89f74412aa4d39adf99ebe74e72087671d5f3c70fe114ec0db57835e5bcf
             │ │ │ ├─286340 /run/podman-init -- /usr/bin/ceph-mgr -n mgr.np0005626466.nisqfq -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ │ └─286342 /usr/bin/ceph-mgr -n mgr.np0005626466.nisqfq -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ └─runtime
             │ │   └─286338 /usr/bin/conmon --api-version 1 -c 9f7e89f74412aa4d39adf99ebe74e72087671d5f3c70fe114ec0db57835e5bcf -u 9f7e89f74412aa4d39adf99ebe74e72087671d5f3c70fe114ec0db57835e5bcf -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/9f7e89f74412aa4d39adf99ebe74e72087671d5f3c70fe114ec0db57835e5bcf/userdata -p /run/containers/storage/overlay-containers/9f7e89f74412aa4d39adf99ebe74e72087671d5f3c70fe114ec0db57835e5bcf/userdata/pidfile -n ceph-f1fea371-cb69-578d-a3d0-b5c472a84b46-mgr-np0005626466-nisqfq --exit-dir /run/libpod/exits --full-attach -l journald --log-level warning --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/9f7e89f74412aa4d39adf99ebe74e72087671d5f3c70fe114ec0db57835e5bcf/userdata/oci-log --conmon-pidfile /run/ceph-f1fea371-cb69-578d-a3d0-b5c472a84b46@mgr.np0005626466.nisqfq.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --transient-store=false --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --rm --exit-command-arg 9f7e89f74412aa4d39adf99ebe74e72087671d5f3c70fe114ec0db57835e5bcf
             │ ├─ceph-f1fea371-cb69-578d-a3d0-b5c472a84b46@mon.np0005626466.service
             │ │ ├─libpod-payload-2ceecf5aca3e1844048535c9c84c12fc98d118507e171300ca4a76145722e20a
             │ │ │ ├─300839 /run/podman-init -- /usr/bin/ceph-mon -n mon.np0005626466 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false
             │ │ │ └─300841 /usr/bin/ceph-mon -n mon.np0005626466 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false
             │ │ └─runtime
             │ │   └─300837 /usr/bin/conmon --api-version 1 -c 2ceecf5aca3e1844048535c9c84c12fc98d118507e171300ca4a76145722e20a -u 2ceecf5aca3e1844048535c9c84c12fc98d118507e171300ca4a76145722e20a -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/2ceecf5aca3e1844048535c9c84c12fc98d118507e171300ca4a76145722e20a/userdata -p /run/containers/storage/overlay-containers/2ceecf5aca3e1844048535c9c84c12fc98d118507e171300ca4a76145722e20a/userdata/pidfile -n ceph-f1fea371-cb69-578d-a3d0-b5c472a84b46-mon-np0005626466 --exit-dir /run/libpod/exits --full-attach -l journald --log-level warning --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/2ceecf5aca3e1844048535c9c84c12fc98d118507e171300ca4a76145722e20a/userdata/oci-log --conmon-pidfile /run/ceph-f1fea371-cb69-578d-a3d0-b5c472a84b46@mon.np0005626466.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --transient-store=false --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --rm --exit-command-arg 2ceecf5aca3e1844048535c9c84c12fc98d118507e171300ca4a76145722e20a
             │ ├─ceph-f1fea371-cb69-578d-a3d0-b5c472a84b46@osd.1.service
             │ │ ├─libpod-payload-f272037b1a08b3104e31c243e058b49b6cfe9c3898252c2d34ed954af84eea3b
             │ │ │ ├─31871 /run/podman-init -- /usr/bin/ceph-osd -n osd.1 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ │ └─31873 /usr/bin/ceph-osd -n osd.1 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ └─runtime
             │ │   └─31869 /usr/bin/conmon --api-version 1 -c f272037b1a08b3104e31c243e058b49b6cfe9c3898252c2d34ed954af84eea3b -u f272037b1a08b3104e31c243e058b49b6cfe9c3898252c2d34ed954af84eea3b -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/f272037b1a08b3104e31c243e058b49b6cfe9c3898252c2d34ed954af84eea3b/userdata -p /run/containers/storage/overlay-containers/f272037b1a08b3104e31c243e058b49b6cfe9c3898252c2d34ed954af84eea3b/userdata/pidfile -n ceph-f1fea371-cb69-578d-a3d0-b5c472a84b46-osd-1 --exit-dir /run/libpod/exits --full-attach -l journald --log-level warning --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/f272037b1a08b3104e31c243e058b49b6cfe9c3898252c2d34ed954af84eea3b/userdata/oci-log --conmon-pidfile /run/ceph-f1fea371-cb69-578d-a3d0-b5c472a84b46@osd.1.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --transient-store=false --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --rm --exit-command-arg f272037b1a08b3104e31c243e058b49b6cfe9c3898252c2d34ed954af84eea3b
             │ └─ceph-f1fea371-cb69-578d-a3d0-b5c472a84b46@osd.4.service
             │   ├─libpod-payload-149ab20d951c6d638af0c8d29ce496a98dac476b4b563eca8217acb3595a760e
             │   │ ├─32811 /run/podman-init -- /usr/bin/ceph-osd -n osd.4 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │   │ └─32813 /usr/bin/ceph-osd -n osd.4 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │   └─runtime
             │     └─32809 /usr/bin/conmon --api-version 1 -c 149ab20d951c6d638af0c8d29ce496a98dac476b4b563eca8217acb3595a760e -u 149ab20d951c6d638af0c8d29ce496a98dac476b4b563eca8217acb3595a760e -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/149ab20d951c6d638af0c8d29ce496a98dac476b4b563eca8217acb3595a760e/userdata -p /run/containers/storage/overlay-containers/149ab20d951c6d638af0c8d29ce496a98dac476b4b563eca8217acb3595a760e/userdata/pidfile -n ceph-f1fea371-cb69-578d-a3d0-b5c472a84b46-osd-4 --exit-dir /run/libpod/exits --full-attach -l journald --log-level warning --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/149ab20d951c6d638af0c8d29ce496a98dac476b4b563eca8217acb3595a760e/userdata/oci-log --conmon-pidfile /run/ceph-f1fea371-cb69-578d-a3d0-b5c472a84b46@osd.4.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --transient-store=false --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --rm --exit-command-arg 149ab20d951c6d638af0c8d29ce496a98dac476b4b563eca8217acb3595a760e
             ├─system-getty.slice
             │ └─getty@tty1.service
             │   └─1140 /sbin/agetty -o "-p -- \\u" --noclear - linux
             ├─system-serial\x2dgetty.slice
             │ └─serial-getty@ttyS0.service
             │   └─1142 /sbin/agetty -o "-p -- \\u" --keep-baud 115200,57600,38400,9600 - vt220
             ├─systemd-hostnamed.service
             │ └─334843 /usr/lib/systemd/systemd-hostnamed
             ├─systemd-journald.service
             │ └─47881 /usr/lib/systemd/systemd-journald
             ├─systemd-logind.service
             │ └─760 /usr/lib/systemd/systemd-logind
             ├─systemd-machined.service
             │ └─205757 /usr/lib/systemd/systemd-machined
             ├─systemd-udevd.service
             │ └─udev
             │   ├─ 47889 /usr/lib/systemd/systemd-udevd
             │   └─339919 /usr/lib/systemd/systemd-udevd
             ├─tuned.service
             │ └─125602 /usr/bin/python3 -Es /usr/sbin/tuned -l -P
             ├─virtlogd.service
             │ └─205249 /usr/sbin/virtlogd
             ├─virtnodedevd.service
             │ └─229338 /usr/sbin/virtnodedevd --timeout 120
             ├─virtqemud.service
             │ └─229010 /usr/sbin/virtqemud --timeout 120
             └─virtsecretd.service
               └─323059 /usr/sbin/virtsecretd --timeout 120

Feb 23 10:10:59 np0005626466.localdomain openstack_network_exporter[243610]: 
Feb 23 10:11:01 np0005626466.localdomain nova_compute[280690]: 2026-02-23 10:11:01.680 280697 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] 4997-ms timeout __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:248[00m
Feb 23 10:11:01 np0005626466.localdomain nova_compute[280690]: 2026-02-23 10:11:01.682 280697 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 26 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263[00m
Feb 23 10:11:01 np0005626466.localdomain nova_compute[280690]: 2026-02-23 10:11:01.682 280697 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] tcp:127.0.0.1:6640: idle 5002 ms, sending inactivity probe run /usr/lib64/python3.9/site-packages/ovs/reconnect.py:117[00m
Feb 23 10:11:01 np0005626466.localdomain nova_compute[280690]: 2026-02-23 10:11:01.682 280697 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] tcp:127.0.0.1:6640: entering IDLE _transition /usr/lib64/python3.9/site-packages/ovs/reconnect.py:519[00m
Feb 23 10:11:01 np0005626466.localdomain nova_compute[280690]: 2026-02-23 10:11:01.682 280697 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] tcp:127.0.0.1:6640: entering ACTIVE _transition /usr/lib64/python3.9/site-packages/ovs/reconnect.py:519[00m
Feb 23 10:11:01 np0005626466.localdomain nova_compute[280690]: 2026-02-23 10:11:01.684 280697 DEBUG ovsdbapp.backend.ovs_idl.vlog [-] [POLLIN] on fd 26 __log_wakeup /usr/lib64/python3.9/site-packages/ovs/poller.py:263[00m
Feb 23 10:11:02 np0005626466.localdomain podman[340121]: 2026-02-23 10:11:02.876554167 +0000 UTC m=+0.109551703 container health_status 8ca16bfb2f59ef8fc3fb8572c3270f7856db7f62fc874baed2fd5084beb7f01e (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=ovn_metadata_agent, health_status=healthy, managed_by=edpm_ansible, org.label-schema.license=GPLv2, config_id=ovn_metadata_agent, container_name=ovn_metadata_agent, io.buildah.version=1.43.0, org.label-schema.schema-version=1.0, tcib_managed=true, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, tcib_build_tag=8419493e1fd846703d277695e03fc5eb, config_data={'cgroupns': 'host', 'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'EDPM_CONFIG_HASH': 'a07a4436e1d1ca1c6231f11309616a78b1ad9830450b5c2d2fc3fb113cfbf838-df122b180261157f1de1391083b3d8abac306e2f12893ac7b9291feafc874311'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_metadata_agent', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified', 'net': 'host', 'pid': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/run/openvswitch:/run/openvswitch:z', '/var/lib/openstack/neutron-ovn-metadata-agent:/etc/neutron.conf.d:z', '/run/netns:/run/netns:shared', '/var/lib/kolla/config_files/ovn_metadata_agent.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/neutron:/var/lib/neutron:shared,z', '/var/lib/neutron/ovn_metadata_haproxy_wrapper:/usr/local/bin/haproxy:ro', '/var/lib/neutron/kill_scripts:/etc/neutron/kill_scripts:ro', '/var/lib/openstack/cacerts/neutron-metadata/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/healthchecks/ovn_metadata_agent:/openstack:ro,z']}, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20260216)
Feb 23 10:11:02 np0005626466.localdomain podman[340121]: 2026-02-23 10:11:02.889159968 +0000 UTC m=+0.122157544 container exec_died 8ca16bfb2f59ef8fc3fb8572c3270f7856db7f62fc874baed2fd5084beb7f01e (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=ovn_metadata_agent, tcib_managed=true, config_id=ovn_metadata_agent, container_name=ovn_metadata_agent, io.buildah.version=1.43.0, org.label-schema.build-date=20260216, org.label-schema.license=GPLv2, tcib_build_tag=8419493e1fd846703d277695e03fc5eb, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, config_data={'cgroupns': 'host', 'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'EDPM_CONFIG_HASH': 'a07a4436e1d1ca1c6231f11309616a78b1ad9830450b5c2d2fc3fb113cfbf838-df122b180261157f1de1391083b3d8abac306e2f12893ac7b9291feafc874311'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_metadata_agent', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified', 'net': 'host', 'pid': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/run/openvswitch:/run/openvswitch:z', '/var/lib/openstack/neutron-ovn-metadata-agent:/etc/neutron.conf.d:z', '/run/netns:/run/netns:shared', '/var/lib/kolla/config_files/ovn_metadata_agent.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/neutron:/var/lib/neutron:shared,z', '/var/lib/neutron/ovn_metadata_haproxy_wrapper:/usr/local/bin/haproxy:ro', '/var/lib/neutron/kill_scripts:/etc/neutron/kill_scripts:ro', '/var/lib/openstack/cacerts/neutron-metadata/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/healthchecks/ovn_metadata_agent:/openstack:ro,z']}, maintainer=OpenStack Kubernetes Operator team, managed_by=edpm_ansible)
Feb 23 10:11:03 np0005626466.localdomain virtqemud[229010]: Failed to connect socket to '/var/run/libvirt/virtstoraged-sock-ro': No such file or directory

● user-1000.slice - User Slice of UID 1000
     Loaded: loaded
    Drop-In: /usr/lib/systemd/system/user-.slice.d
             └─10-defaults.conf
     Active: active since Mon 2026-02-23 06:36:32 UTC; 3h 34min ago
      Until: Mon 2026-02-23 06:36:32 UTC; 3h 34min ago
       Docs: man:user@.service(5)
         IO: 264.9M read, 4.1G written
      Tasks: 22 (limit: 41341)
     Memory: 5.2G
        CPU: 25min 34.521s
     CGroup: /user.slice/user-1000.slice
             ├─session-1.scope
             │ └─4336 /usr/bin/python3
             ├─session-84.scope
             │ ├─331435 "sshd: zuul [priv]"
             │ ├─331438 "sshd: zuul@notty"
             │ ├─331439 sudo bash -c "rm -rf /var/tmp/sos-osp && mkdir /var/tmp/sos-osp && sos report --batch --all-logs --tmp-dir=/var/tmp/sos-osp  -p container,openstack_edpm,system,storage,virt"
             │ ├─331456 /usr/bin/python3 -s /sbin/sos report --batch --all-logs --tmp-dir=/var/tmp/sos-osp -p container,openstack_edpm,system,storage,virt
             │ ├─340111 timeout 300s systemctl status --all
             │ ├─340118 systemctl status --all
             │ ├─340176 timeout 300s xfs_admin -l -u /dev/vda4
             │ ├─340177 /usr/bin/sh -f /usr/sbin/xfs_admin -l -u /dev/vda4
             │ └─340178 xfs_db -x -p xfs_admin -r -c label -r -c uuid /dev/vda4
             └─user@1000.service
               ├─app.slice
               │ └─dbus-broker.service
               │   ├─14084 /usr/bin/dbus-broker-launch --scope user
               │   └─14097 dbus-broker --log 4 --controller 9 --machine-id c0212a8b024a111cfc61293864f36c87 --max-bytes 100000000000000 --max-fds 25000000000000 --max-matches 5000000000
               ├─init.scope
               │ ├─4178 /usr/lib/systemd/systemd --user
               │ └─4180 "(sd-pam)"
               └─user.slice
                 └─podman-pause-de66dbe9.scope
                   └─13932 podman

Feb 23 10:09:29 np0005626466.localdomain sshd[331063]: Disconnected from user zuul 38.102.83.114 port 40474
Feb 23 10:09:29 np0005626466.localdomain sudo[331086]:     zuul : PWD=/home/zuul ; USER=root ; COMMAND=/bin/rsync --server --sender -lLogDtprze.LsfxC . /home/zuul/ansible_hostname
Feb 23 10:09:29 np0005626466.localdomain sudo[331086]: pam_unix(sudo:session): session opened for user root(uid=0) by (uid=1000)
Feb 23 10:09:29 np0005626466.localdomain sudo[331086]: pam_unix(sudo:session): session closed for user root
Feb 23 10:09:29 np0005626466.localdomain sshd[331085]: Received disconnect from 38.102.83.114 port 40480:11: disconnected by user
Feb 23 10:09:29 np0005626466.localdomain sshd[331085]: Disconnected from user zuul 38.102.83.114 port 40480
Feb 23 10:10:20 np0005626466.localdomain sudo[331439]:     zuul : PWD=/home/zuul ; USER=root ; COMMAND=/bin/bash -c rm -rf /var/tmp/sos-osp && mkdir /var/tmp/sos-osp && sos report --batch --all-logs --tmp-dir=/var/tmp/sos-osp  -p container,openstack_edpm,system,storage,virt
Feb 23 10:10:20 np0005626466.localdomain sudo[331439]: pam_unix(sudo:session): session opened for user root(uid=0) by (uid=1000)
Feb 23 10:10:26 np0005626466.localdomain ovs-vsctl[331754]: ovs|00001|db_ctl_base|ERR|no key "dpdk-init" in Open_vSwitch record "." column other_config
Feb 23 10:10:50 np0005626466.localdomain ovs-appctl[337647]: ovs|00001|daemon_unix|WARN|/var/run/openvswitch/ovs-monitor-ipsec.pid: open: No such file or directory

● user-1002.slice - User Slice of UID 1002
     Loaded: loaded
    Drop-In: /usr/lib/systemd/system/user-.slice.d
             └─10-defaults.conf
     Active: active since Mon 2026-02-23 07:37:14 UTC; 2h 33min ago
      Until: Mon 2026-02-23 07:37:14 UTC; 2h 33min ago
       Docs: man:user@.service(5)
         IO: 2.1M read, 651.1M written
      Tasks: 4 (limit: 41341)
     Memory: 1.0G
        CPU: 5min 37.548s
     CGroup: /user.slice/user-1002.slice
             ├─session-71.scope
             │ ├─303883 "sshd: ceph-admin [priv]"
             │ └─303886 "sshd: ceph-admin@notty"
             └─user@1002.service
               └─init.scope
                 ├─26358 /usr/lib/systemd/systemd --user
                 └─26360 "(sd-pam)"

Feb 23 10:10:40 np0005626466.localdomain podman[333879]: 2026-02-23 10:10:40.082579696 +0000 UTC m=+0.095411934 container create 7e66abf3937775ffe686996113c815576940c8beda8502961d82d4fd25f7de64 (image=registry.redhat.io/rhceph/rhceph-7-rhel9:latest, name=pensive_robinson, io.openshift.expose-services=, io.openshift.tags=rhceph ceph, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, org.opencontainers.image.revision=b3986a21dbd047e1edac0f24f7c0e811518e5b14, summary=Provides the latest Red Hat Ceph Storage 7 on RHEL 9 in a fully featured and supported base image., GIT_REPO=https://github.com/ceph/ceph-container.git, io.k8s.display-name=Red Hat Ceph Storage 7 on RHEL 9, version=7, name=rhceph, ceph=True, release=1770267347, org.opencontainers.image.created=2026-02-09T10:25:24Z, distribution-scope=public, maintainer=Guillaume Abrioux <gabrioux@redhat.com>, CEPH_POINT_RELEASE=, vendor=Red Hat, Inc., GIT_CLEAN=True, cpe=cpe:/a:redhat:enterprise_linux:9::appstream, architecture=x86_64, GIT_BRANCH=main, description=Red Hat Ceph Storage 7, build-date=2026-02-09T10:25:24Z, io.k8s.description=Red Hat Ceph Storage 7, RELEASE=main, com.redhat.component=rhceph-container, vcs-type=git, GIT_COMMIT=12717c0777377369ea674892da98b0d85250f5b0, url=https://catalog.redhat.com/en/search?searchType=containers, io.buildah.version=1.42.2, vcs-ref=b3986a21dbd047e1edac0f24f7c0e811518e5b14)
Feb 23 10:10:40 np0005626466.localdomain podman[333879]: 2026-02-23 10:10:40.053540795 +0000 UTC m=+0.066373093 image pull  registry.redhat.io/rhceph/rhceph-7-rhel9:latest
Feb 23 10:10:40 np0005626466.localdomain podman[333879]: 2026-02-23 10:10:40.157852304 +0000 UTC m=+0.170684572 container init 7e66abf3937775ffe686996113c815576940c8beda8502961d82d4fd25f7de64 (image=registry.redhat.io/rhceph/rhceph-7-rhel9:latest, name=pensive_robinson, vcs-type=git, CEPH_POINT_RELEASE=, io.k8s.display-name=Red Hat Ceph Storage 7 on RHEL 9, GIT_BRANCH=main, version=7, org.opencontainers.image.created=2026-02-09T10:25:24Z, org.opencontainers.image.revision=b3986a21dbd047e1edac0f24f7c0e811518e5b14, GIT_REPO=https://github.com/ceph/ceph-container.git, com.redhat.component=rhceph-container, vendor=Red Hat, Inc., io.openshift.tags=rhceph ceph, description=Red Hat Ceph Storage 7, io.buildah.version=1.42.2, io.k8s.description=Red Hat Ceph Storage 7, ceph=True, vcs-ref=b3986a21dbd047e1edac0f24f7c0e811518e5b14, maintainer=Guillaume Abrioux <gabrioux@redhat.com>, cpe=cpe:/a:redhat:enterprise_linux:9::appstream, RELEASE=main, architecture=x86_64, summary=Provides the latest Red Hat Ceph Storage 7 on RHEL 9 in a fully featured and supported base image., release=1770267347, GIT_COMMIT=12717c0777377369ea674892da98b0d85250f5b0, io.openshift.expose-services=, distribution-scope=public, build-date=2026-02-09T10:25:24Z, url=https://catalog.redhat.com/en/search?searchType=containers, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, GIT_CLEAN=True, name=rhceph)
Feb 23 10:10:40 np0005626466.localdomain podman[333879]: 2026-02-23 10:10:40.166506923 +0000 UTC m=+0.179339191 container start 7e66abf3937775ffe686996113c815576940c8beda8502961d82d4fd25f7de64 (image=registry.redhat.io/rhceph/rhceph-7-rhel9:latest, name=pensive_robinson, org.opencontainers.image.revision=b3986a21dbd047e1edac0f24f7c0e811518e5b14, distribution-scope=public, release=1770267347, version=7, ceph=True, com.redhat.component=rhceph-container, io.buildah.version=1.42.2, summary=Provides the latest Red Hat Ceph Storage 7 on RHEL 9 in a fully featured and supported base image., GIT_REPO=https://github.com/ceph/ceph-container.git, url=https://catalog.redhat.com/en/search?searchType=containers, vcs-type=git, cpe=cpe:/a:redhat:enterprise_linux:9::appstream, io.openshift.tags=rhceph ceph, description=Red Hat Ceph Storage 7, CEPH_POINT_RELEASE=, vendor=Red Hat, Inc., GIT_COMMIT=12717c0777377369ea674892da98b0d85250f5b0, io.openshift.expose-services=, io.k8s.display-name=Red Hat Ceph Storage 7 on RHEL 9, org.opencontainers.image.created=2026-02-09T10:25:24Z, maintainer=Guillaume Abrioux <gabrioux@redhat.com>, io.k8s.description=Red Hat Ceph Storage 7, name=rhceph, architecture=x86_64, vcs-ref=b3986a21dbd047e1edac0f24f7c0e811518e5b14, RELEASE=main, GIT_BRANCH=main, GIT_CLEAN=True, build-date=2026-02-09T10:25:24Z, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI)
Feb 23 10:10:40 np0005626466.localdomain podman[333879]: 2026-02-23 10:10:40.166842423 +0000 UTC m=+0.179674681 container attach 7e66abf3937775ffe686996113c815576940c8beda8502961d82d4fd25f7de64 (image=registry.redhat.io/rhceph/rhceph-7-rhel9:latest, name=pensive_robinson, org.opencontainers.image.created=2026-02-09T10:25:24Z, CEPH_POINT_RELEASE=, GIT_COMMIT=12717c0777377369ea674892da98b0d85250f5b0, summary=Provides the latest Red Hat Ceph Storage 7 on RHEL 9 in a fully featured and supported base image., url=https://catalog.redhat.com/en/search?searchType=containers, vendor=Red Hat, Inc., description=Red Hat Ceph Storage 7, io.buildah.version=1.42.2, io.openshift.tags=rhceph ceph, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, distribution-scope=public, io.k8s.description=Red Hat Ceph Storage 7, RELEASE=main, architecture=x86_64, GIT_CLEAN=True, name=rhceph, cpe=cpe:/a:redhat:enterprise_linux:9::appstream, build-date=2026-02-09T10:25:24Z, vcs-ref=b3986a21dbd047e1edac0f24f7c0e811518e5b14, maintainer=Guillaume Abrioux <gabrioux@redhat.com>, version=7, vcs-type=git, release=1770267347, GIT_REPO=https://github.com/ceph/ceph-container.git, org.opencontainers.image.revision=b3986a21dbd047e1edac0f24f7c0e811518e5b14, com.redhat.component=rhceph-container, io.openshift.expose-services=, io.k8s.display-name=Red Hat Ceph Storage 7 on RHEL 9, ceph=True, GIT_BRANCH=main)
Feb 23 10:10:41 np0005626466.localdomain podman[333879]: 2026-02-23 10:10:41.07274878 +0000 UTC m=+1.085581008 container died 7e66abf3937775ffe686996113c815576940c8beda8502961d82d4fd25f7de64 (image=registry.redhat.io/rhceph/rhceph-7-rhel9:latest, name=pensive_robinson, release=1770267347, com.redhat.component=rhceph-container, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, io.buildah.version=1.42.2, maintainer=Guillaume Abrioux <gabrioux@redhat.com>, build-date=2026-02-09T10:25:24Z, architecture=x86_64, GIT_COMMIT=12717c0777377369ea674892da98b0d85250f5b0, io.openshift.expose-services=, GIT_BRANCH=main, version=7, cpe=cpe:/a:redhat:enterprise_linux:9::appstream, url=https://catalog.redhat.com/en/search?searchType=containers, io.k8s.display-name=Red Hat Ceph Storage 7 on RHEL 9, distribution-scope=public, CEPH_POINT_RELEASE=, io.openshift.tags=rhceph ceph, org.opencontainers.image.revision=b3986a21dbd047e1edac0f24f7c0e811518e5b14, description=Red Hat Ceph Storage 7, org.opencontainers.image.created=2026-02-09T10:25:24Z, name=rhceph, RELEASE=main, GIT_CLEAN=True, GIT_REPO=https://github.com/ceph/ceph-container.git, vcs-type=git, ceph=True, summary=Provides the latest Red Hat Ceph Storage 7 on RHEL 9 in a fully featured and supported base image., io.k8s.description=Red Hat Ceph Storage 7, vcs-ref=b3986a21dbd047e1edac0f24f7c0e811518e5b14, vendor=Red Hat, Inc.)
Feb 23 10:10:41 np0005626466.localdomain sudo[333626]: pam_unix(sudo:session): session closed for user root
Feb 23 10:10:41 np0005626466.localdomain sudo[335960]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/ls /etc/sysctl.d
Feb 23 10:10:41 np0005626466.localdomain sudo[335960]: pam_unix(sudo:session): session opened for user root(uid=0) by (uid=1002)
Feb 23 10:10:41 np0005626466.localdomain sudo[335960]: pam_unix(sudo:session): session closed for user root

● user.slice - User and Session Slice
     Loaded: loaded (/usr/lib/systemd/system/user.slice; static)
     Active: active since Mon 2026-02-23 06:36:02 UTC; 3h 35min ago
      Until: Mon 2026-02-23 06:36:02 UTC; 3h 35min ago
       Docs: man:systemd.special(7)
         IO: 256.3M read, 4.7G written
      Tasks: 27
     Memory: 9.5G
        CPU: 38min 35.668s
     CGroup: /user.slice
             ├─user-1000.slice
             │ ├─session-1.scope
             │ │ └─4336 /usr/bin/python3
             │ ├─session-84.scope
             │ │ ├─331435 "sshd: zuul [priv]"
             │ │ ├─331438 "sshd: zuul@notty"
             │ │ ├─331439 sudo bash -c "rm -rf /var/tmp/sos-osp && mkdir /var/tmp/sos-osp && sos report --batch --all-logs --tmp-dir=/var/tmp/sos-osp  -p container,openstack_edpm,system,storage,virt"
             │ │ ├─331456 /usr/bin/python3 -s /sbin/sos report --batch --all-logs --tmp-dir=/var/tmp/sos-osp -p container,openstack_edpm,system,storage,virt
             │ │ ├─340111 timeout 300s systemctl status --all
             │ │ ├─340118 systemctl status --all
             │ │ ├─340176 timeout 300s xfs_admin -l -u /dev/vda4
             │ │ ├─340177 /usr/bin/sh -f /usr/sbin/xfs_admin -l -u /dev/vda4
             │ │ ├─340178 xfs_db -x -p xfs_admin -r -c label -r -c uuid /dev/vda4
             │ │ ├─340243 timeout --foreground 300s virsh -r nodedev-dumpxml net_vlan44_7a_25_39_c7_cd_d8
             │ │ └─340244 virsh -r nodedev-dumpxml net_vlan44_7a_25_39_c7_cd_d8
             │ └─user@1000.service
             │   ├─app.slice
             │   │ └─dbus-broker.service
             │   │   ├─14084 /usr/bin/dbus-broker-launch --scope user
             │   │   └─14097 dbus-broker --log 4 --controller 9 --machine-id c0212a8b024a111cfc61293864f36c87 --max-bytes 100000000000000 --max-fds 25000000000000 --max-matches 5000000000
             │   ├─init.scope
             │   │ ├─4178 /usr/lib/systemd/systemd --user
             │   │ └─4180 "(sd-pam)"
             │   └─user.slice
             │     └─podman-pause-de66dbe9.scope
             │       └─13932 podman
             └─user-1002.slice
               ├─session-71.scope
               │ ├─303883 "sshd: ceph-admin [priv]"
               │ └─303886 "sshd: ceph-admin@notty"
               └─user@1002.service
                 └─init.scope
                   ├─26358 /usr/lib/systemd/systemd --user
                   └─26360 "(sd-pam)"

● dbus.socket - D-Bus System Message Bus Socket
     Loaded: loaded (/usr/lib/systemd/system/dbus.socket; enabled; preset: enabled)
     Active: active (running) since Mon 2026-02-23 06:36:04 UTC; 3h 34min ago
      Until: Mon 2026-02-23 06:36:04 UTC; 3h 34min ago
   Triggers: ● dbus-broker.service
     Listen: /run/dbus/system_bus_socket (Stream)
     CGroup: /system.slice/dbus.socket

Feb 23 06:36:04 localhost systemd[1]: Listening on D-Bus System Message Bus Socket.

● dm-event.socket - Device-mapper event daemon FIFOs
     Loaded: loaded (/usr/lib/systemd/system/dm-event.socket; enabled; preset: enabled)
     Active: active (listening) since Mon 2026-02-23 07:17:16 UTC; 2h 53min ago
      Until: Mon 2026-02-23 07:17:16 UTC; 2h 53min ago
   Triggers: ● dm-event.service
       Docs: man:dmeventd(8)
     Listen: /run/dmeventd-server (FIFO)
             /run/dmeventd-client (FIFO)
     CGroup: /system.slice/dm-event.socket

Feb 23 07:17:16 np0005626466.localdomain systemd[1]: Listening on Device-mapper event daemon FIFOs.

● iscsid.socket - Open-iSCSI iscsid Socket
     Loaded: loaded (/usr/lib/systemd/system/iscsid.socket; enabled; preset: enabled)
     Active: active (running) since Mon 2026-02-23 09:25:17 UTC; 45min ago
      Until: Mon 2026-02-23 09:25:17 UTC; 45min ago
   Triggers: ● iscsid.service
       Docs: man:iscsid(8)
             man:iscsiadm(8)
     Listen: @ISCSIADM_ABSTRACT_NAMESPACE (Stream)
     CGroup: /system.slice/iscsid.socket

Feb 23 09:25:17 np0005626466.localdomain systemd[1]: Listening on Open-iSCSI iscsid Socket.

○ iscsiuio.socket - Open-iSCSI iscsiuio Socket
     Loaded: loaded (/usr/lib/systemd/system/iscsiuio.socket; enabled; preset: enabled)
     Active: inactive (dead)
   Triggers: ● iscsiuio.service
       Docs: man:iscsiuio(8)
     Listen: @ISCSID_UIP_ABSTRACT_NAMESPACE (Stream)

○ libvirtd-admin.socket - libvirt legacy monolithic daemon admin socket
     Loaded: loaded (/usr/lib/systemd/system/libvirtd-admin.socket; disabled; preset: disabled)
     Active: inactive (dead)
   Triggers: ● libvirtd.service
     Listen: /run/libvirt/libvirt-admin-sock (Stream)

○ libvirtd-ro.socket - libvirt legacy monolithic daemon read-only socket
     Loaded: loaded (/usr/lib/systemd/system/libvirtd-ro.socket; disabled; preset: disabled)
     Active: inactive (dead)
   Triggers: ● libvirtd.service
     Listen: /run/libvirt/libvirt-sock-ro (Stream)

○ libvirtd.socket - libvirt legacy monolithic daemon socket
     Loaded: loaded (/usr/lib/systemd/system/libvirtd.socket; disabled; preset: disabled)
     Active: inactive (dead)
   Triggers: ● libvirtd.service
     Listen: /run/libvirt/libvirt-sock (Stream)

● lvm2-lvmpolld.socket - LVM2 poll daemon socket
     Loaded: loaded (/usr/lib/systemd/system/lvm2-lvmpolld.socket; enabled; preset: enabled)
     Active: active (listening) since Mon 2026-02-23 07:17:17 UTC; 2h 53min ago
      Until: Mon 2026-02-23 07:17:17 UTC; 2h 53min ago
   Triggers: ● lvm2-lvmpolld.service
       Docs: man:lvmpolld(8)
     Listen: /run/lvm/lvmpolld.socket (Stream)
     CGroup: /system.slice/lvm2-lvmpolld.socket

Feb 23 07:17:17 np0005626466.localdomain systemd[1]: Listening on LVM2 poll daemon socket.

● multipathd.socket - multipathd control socket
     Loaded: loaded (/usr/lib/systemd/system/multipathd.socket; enabled; preset: disabled)
     Active: active (running) since Mon 2026-02-23 09:25:47 UTC; 45min ago
      Until: Mon 2026-02-23 09:25:47 UTC; 45min ago
   Triggers: ● multipathd.service
     Listen: @/org/kernel/linux/storage/multipathd (Stream)
             /run/multipathd.socket (Stream)
     CGroup: /system.slice/multipathd.socket

Feb 23 09:25:47 np0005626466.localdomain systemd[1]: Listening on multipathd control socket.

● podman.socket - Podman API Socket
     Loaded: loaded (/usr/lib/systemd/system/podman.socket; enabled; preset: disabled)
     Active: active (running) since Mon 2026-02-23 09:28:35 UTC; 42min ago
      Until: Mon 2026-02-23 09:28:35 UTC; 42min ago
   Triggers: ● podman.service
       Docs: man:podman-system-service(1)
     Listen: /run/podman/podman.sock (Stream)
     CGroup: /system.slice/podman.socket

Feb 23 09:28:35 np0005626466.localdomain systemd[1]: Listening on Podman API Socket.

● rpcbind.socket - RPCbind Server Activation Socket
     Loaded: loaded (/usr/lib/systemd/system/rpcbind.socket; enabled; preset: enabled)
     Active: active (running) since Mon 2026-02-23 06:36:02 UTC; 3h 35min ago
      Until: Mon 2026-02-23 06:36:02 UTC; 3h 35min ago
   Triggers: ● rpcbind.service
     Listen: /run/rpcbind.sock (Stream)
             0.0.0.0:111 (Stream)
             0.0.0.0:111 (Datagram)
             [::]:111 (Stream)
             [::]:111 (Datagram)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 100220)
     Memory: 16.0K
        CPU: 3ms
     CGroup: /system.slice/rpcbind.socket

● sssd-kcm.socket - SSSD Kerberos Cache Manager responder socket
     Loaded: loaded (/usr/lib/systemd/system/sssd-kcm.socket; enabled; preset: enabled)
     Active: active (listening) since Mon 2026-02-23 06:36:04 UTC; 3h 34min ago
      Until: Mon 2026-02-23 06:36:04 UTC; 3h 34min ago
   Triggers: ● sssd-kcm.service
       Docs: man:sssd-kcm(8)
     Listen: /run/.heim_org.h5l.kcm-socket (Stream)
     CGroup: /system.slice/sssd-kcm.socket

Feb 23 06:36:04 localhost systemd[1]: Listening on SSSD Kerberos Cache Manager responder socket.

○ syslog.socket - Syslog Socket
     Loaded: loaded (/usr/lib/systemd/system/syslog.socket; static)
     Active: inactive (dead)
   Triggers: ● syslog.service
       Docs: man:systemd.special(7)
             https://www.freedesktop.org/wiki/Software/systemd/syslog
     Listen: /run/systemd/journal/syslog (Datagram)

● systemd-coredump.socket - Process Core Dump Socket
     Loaded: loaded (/usr/lib/systemd/system/systemd-coredump.socket; static)
     Active: active (listening) since Mon 2026-02-23 07:38:10 UTC; 2h 32min ago
      Until: Mon 2026-02-23 07:38:10 UTC; 2h 32min ago
       Docs: man:systemd-coredump(8)
     Listen: /run/systemd/coredump (SequentialPacket)
   Accepted: 1; Connected: 0;
     CGroup: /system.slice/systemd-coredump.socket

Feb 23 07:38:10 np0005626466.localdomain systemd[1]: Listening on Process Core Dump Socket.

● systemd-initctl.socket - initctl Compatibility Named Pipe
     Loaded: loaded (/usr/lib/systemd/system/systemd-initctl.socket; static)
     Active: active (listening) since Mon 2026-02-23 06:36:02 UTC; 3h 35min ago
      Until: Mon 2026-02-23 06:36:02 UTC; 3h 35min ago
   Triggers: ● systemd-initctl.service
       Docs: man:systemd-initctl.socket(8)
     Listen: /run/initctl (FIFO)
     CGroup: /system.slice/systemd-initctl.socket

● systemd-journald-dev-log.socket - Journal Socket (/dev/log)
     Loaded: loaded (/usr/lib/systemd/system/systemd-journald-dev-log.socket; static)
     Active: active (running) since Mon 2026-02-23 06:36:00 UTC; 3h 35min ago
      Until: Mon 2026-02-23 06:36:00 UTC; 3h 35min ago
   Triggers: ● systemd-journald.service
       Docs: man:systemd-journald.service(8)
             man:journald.conf(5)
     Listen: /run/systemd/journal/dev-log (Datagram)
     CGroup: /system.slice/systemd-journald-dev-log.socket

Notice: journal has been rotated since unit was started, output may be incomplete.

● systemd-journald.socket - Journal Socket
     Loaded: loaded (/usr/lib/systemd/system/systemd-journald.socket; static)
     Active: active (running) since Mon 2026-02-23 06:36:00 UTC; 3h 35min ago
      Until: Mon 2026-02-23 06:36:00 UTC; 3h 35min ago
   Triggers: ● systemd-journald.service
       Docs: man:systemd-journald.service(8)
             man:journald.conf(5)
     Listen: /run/systemd/journal/socket (Datagram)
             /run/systemd/journal/stdout (Stream)
     CGroup: /system.slice/systemd-journald.socket

Notice: journal has been rotated since unit was started, output may be incomplete.

● systemd-rfkill.socket - Load/Save RF Kill Switch Status /dev/rfkill Watch
     Loaded: loaded (/usr/lib/systemd/system/systemd-rfkill.socket; static)
     Active: active (listening) since Mon 2026-02-23 06:36:05 UTC; 3h 34min ago
      Until: Mon 2026-02-23 06:36:05 UTC; 3h 34min ago
   Triggers: ● systemd-rfkill.service
       Docs: man:systemd-rfkill.socket(8)
     Listen: /dev/rfkill (Special)
     CGroup: /system.slice/systemd-rfkill.socket

Feb 23 06:36:05 np0005626466.novalocal systemd[1]: Listening on Load/Save RF Kill Switch Status /dev/rfkill Watch.

● systemd-udevd-control.socket - udev Control Socket
     Loaded: loaded (/usr/lib/systemd/system/systemd-udevd-control.socket; static)
     Active: active (running) since Mon 2026-02-23 06:36:02 UTC; 3h 35min ago
      Until: Mon 2026-02-23 06:36:02 UTC; 3h 35min ago
   Triggers: ● systemd-udevd.service
       Docs: man:systemd-udevd-control.socket(8)
             man:udev(7)
     Listen: /run/udev/control (SequentialPacket)
     CGroup: /system.slice/systemd-udevd-control.socket

● systemd-udevd-kernel.socket - udev Kernel Socket
     Loaded: loaded (/usr/lib/systemd/system/systemd-udevd-kernel.socket; static)
     Active: active (running) since Mon 2026-02-23 06:36:02 UTC; 3h 35min ago
      Until: Mon 2026-02-23 06:36:02 UTC; 3h 35min ago
   Triggers: ● systemd-udevd.service
       Docs: man:systemd-udevd-kernel.socket(8)
             man:udev(7)
     Listen: kobject-uevent 1 (Netlink)
     CGroup: /system.slice/systemd-udevd-kernel.socket

○ virtinterfaced-admin.socket - libvirt interface daemon admin socket
     Loaded: loaded (/usr/lib/systemd/system/virtinterfaced-admin.socket; disabled; preset: disabled)
     Active: inactive (dead)
   Triggers: ● virtinterfaced.service
     Listen: /run/libvirt/virtinterfaced-admin-sock (Stream)

○ virtinterfaced-ro.socket - libvirt interface daemon read-only socket
     Loaded: loaded (/usr/lib/systemd/system/virtinterfaced-ro.socket; disabled; preset: disabled)
     Active: inactive (dead)
   Triggers: ● virtinterfaced.service
     Listen: /run/libvirt/virtinterfaced-sock-ro (Stream)

○ virtinterfaced.socket - libvirt interface daemon socket
     Loaded: loaded (/usr/lib/systemd/system/virtinterfaced.socket; enabled; preset: enabled)
     Active: inactive (dead)
   Triggers: ● virtinterfaced.service
     Listen: /run/libvirt/virtinterfaced-sock (Stream)

○ virtlockd-admin.socket - libvirt locking daemon admin socket
     Loaded: loaded (/usr/lib/systemd/system/virtlockd-admin.socket; enabled; preset: disabled)
     Active: inactive (dead)
   Triggers: ● virtlockd.service
     Listen: /run/libvirt/virtlockd-admin-sock (Stream)

● virtlockd.socket - libvirt locking daemon socket
     Loaded: loaded (/usr/lib/systemd/system/virtlockd.socket; enabled; preset: disabled)
     Active: active (listening) since Mon 2026-02-23 09:24:08 UTC; 46min ago
      Until: Mon 2026-02-23 09:24:08 UTC; 46min ago
   Triggers: ● virtlockd.service
     Listen: /run/libvirt/virtlockd-sock (Stream)
     CGroup: /system.slice/virtlockd.socket

Feb 23 09:24:08 np0005626466.localdomain systemd[1]: Listening on libvirt locking daemon socket.

● virtlogd-admin.socket - libvirt logging daemon admin socket
     Loaded: loaded (/usr/lib/systemd/system/virtlogd-admin.socket; enabled; preset: disabled)
    Drop-In: /etc/systemd/system/virtlogd-admin.socket.d
             └─override.conf
     Active: active (running) since Mon 2026-02-23 09:24:04 UTC; 46min ago
      Until: Mon 2026-02-23 09:24:04 UTC; 46min ago
   Triggers: ● virtlogd.service
     Listen: /run/libvirt/virtlogd-admin-sock (Stream)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 100220)
     Memory: 0B
        CPU: 1ms
     CGroup: /system.slice/virtlogd-admin.socket

Feb 23 09:24:04 np0005626466.localdomain systemd[1]: Starting libvirt logging daemon admin socket...
Feb 23 09:24:04 np0005626466.localdomain systemd[1]: Listening on libvirt logging daemon admin socket.

● virtlogd.socket - libvirt logging daemon socket
     Loaded: loaded (/usr/lib/systemd/system/virtlogd.socket; enabled; preset: enabled)
    Drop-In: /etc/systemd/system/virtlogd.socket.d
             └─override.conf
     Active: active (running) since Mon 2026-02-23 09:24:04 UTC; 46min ago
      Until: Mon 2026-02-23 09:24:04 UTC; 46min ago
   Triggers: ● virtlogd.service
     Listen: /run/libvirt/virtlogd-sock (Stream)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 100220)
     Memory: 0B
        CPU: 1ms
     CGroup: /system.slice/virtlogd.socket

Feb 23 09:24:04 np0005626466.localdomain systemd[1]: Starting libvirt logging daemon socket...
Feb 23 09:24:04 np0005626466.localdomain systemd[1]: Listening on libvirt logging daemon socket.

○ virtnetworkd-admin.socket - libvirt network daemon admin socket
     Loaded: loaded (/usr/lib/systemd/system/virtnetworkd-admin.socket; disabled; preset: disabled)
     Active: inactive (dead)
   Triggers: ● virtnetworkd.service
     Listen: /run/libvirt/virtnetworkd-admin-sock (Stream)

○ virtnetworkd-ro.socket - libvirt network daemon read-only socket
     Loaded: loaded (/usr/lib/systemd/system/virtnetworkd-ro.socket; disabled; preset: disabled)
     Active: inactive (dead)
   Triggers: ● virtnetworkd.service
     Listen: /run/libvirt/virtnetworkd-sock-ro (Stream)

○ virtnetworkd.socket - libvirt network daemon socket
     Loaded: loaded (/usr/lib/systemd/system/virtnetworkd.socket; enabled; preset: enabled)
     Active: inactive (dead)
   Triggers: ● virtnetworkd.service
     Listen: /run/libvirt/virtnetworkd-sock (Stream)

● virtnodedevd-admin.socket - libvirt nodedev daemon admin socket
     Loaded: loaded (/usr/lib/systemd/system/virtnodedevd-admin.socket; enabled; preset: disabled)
    Drop-In: /etc/systemd/system/virtnodedevd-admin.socket.d
             └─override.conf
     Active: active (running) since Mon 2026-02-23 09:24:06 UTC; 46min ago
      Until: Mon 2026-02-23 09:24:06 UTC; 46min ago
   Triggers: ● virtnodedevd.service
     Listen: /run/libvirt/virtnodedevd-admin-sock (Stream)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 100220)
     Memory: 0B
        CPU: 2ms
     CGroup: /system.slice/virtnodedevd-admin.socket

Feb 23 09:24:06 np0005626466.localdomain systemd[1]: Starting libvirt nodedev daemon admin socket...
Feb 23 09:24:06 np0005626466.localdomain systemd[1]: Listening on libvirt nodedev daemon admin socket.

● virtnodedevd-ro.socket - libvirt nodedev daemon read-only socket
     Loaded: loaded (/usr/lib/systemd/system/virtnodedevd-ro.socket; enabled; preset: disabled)
    Drop-In: /etc/systemd/system/virtnodedevd-ro.socket.d
             └─override.conf
     Active: active (running) since Mon 2026-02-23 09:24:06 UTC; 46min ago
      Until: Mon 2026-02-23 09:24:06 UTC; 46min ago
   Triggers: ● virtnodedevd.service
     Listen: /run/libvirt/virtnodedevd-sock-ro (Stream)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 100220)
     Memory: 0B
        CPU: 1ms
     CGroup: /system.slice/virtnodedevd-ro.socket

Feb 23 09:24:06 np0005626466.localdomain systemd[1]: Starting libvirt nodedev daemon read-only socket...
Feb 23 09:24:06 np0005626466.localdomain systemd[1]: Listening on libvirt nodedev daemon read-only socket.

● virtnodedevd.socket - libvirt nodedev daemon socket
     Loaded: loaded (/usr/lib/systemd/system/virtnodedevd.socket; enabled; preset: enabled)
    Drop-In: /etc/systemd/system/virtnodedevd.socket.d
             └─override.conf
     Active: active (running) since Mon 2026-02-23 09:24:06 UTC; 46min ago
      Until: Mon 2026-02-23 09:24:06 UTC; 46min ago
   Triggers: ● virtnodedevd.service
     Listen: /run/libvirt/virtnodedevd-sock (Stream)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 100220)
     Memory: 0B
        CPU: 2ms
     CGroup: /system.slice/virtnodedevd.socket

Feb 23 09:24:06 np0005626466.localdomain systemd[1]: Starting libvirt nodedev daemon socket...
Feb 23 09:24:06 np0005626466.localdomain systemd[1]: Listening on libvirt nodedev daemon socket.

○ virtnwfilterd-admin.socket - libvirt nwfilter daemon admin socket
     Loaded: loaded (/usr/lib/systemd/system/virtnwfilterd-admin.socket; disabled; preset: disabled)
     Active: inactive (dead)
   Triggers: ● virtnwfilterd.service
     Listen: /run/libvirt/virtnwfilterd-admin-sock (Stream)

○ virtnwfilterd-ro.socket - libvirt nwfilter daemon read-only socket
     Loaded: loaded (/usr/lib/systemd/system/virtnwfilterd-ro.socket; disabled; preset: disabled)
     Active: inactive (dead)
   Triggers: ● virtnwfilterd.service
     Listen: /run/libvirt/virtnwfilterd-sock-ro (Stream)

○ virtnwfilterd.socket - libvirt nwfilter daemon socket
     Loaded: loaded (/usr/lib/systemd/system/virtnwfilterd.socket; enabled; preset: enabled)
     Active: inactive (dead)
   Triggers: ● virtnwfilterd.service
     Listen: /run/libvirt/virtnwfilterd-sock (Stream)

● virtproxyd-admin.socket - libvirt proxy daemon admin socket
     Loaded: loaded (/usr/lib/systemd/system/virtproxyd-admin.socket; enabled; preset: disabled)
    Drop-In: /etc/systemd/system/virtproxyd-admin.socket.d
             └─override.conf
     Active: active (listening) since Mon 2026-02-23 09:24:07 UTC; 46min ago
      Until: Mon 2026-02-23 09:24:07 UTC; 46min ago
   Triggers: ● virtproxyd.service
     Listen: /run/libvirt/libvirt-admin-sock (Stream)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 100220)
     Memory: 0B
        CPU: 1ms
     CGroup: /system.slice/virtproxyd-admin.socket

Feb 23 09:24:07 np0005626466.localdomain systemd[1]: Starting libvirt proxy daemon admin socket...
Feb 23 09:24:07 np0005626466.localdomain systemd[1]: Listening on libvirt proxy daemon admin socket.

● virtproxyd-ro.socket - libvirt proxy daemon read-only socket
     Loaded: loaded (/usr/lib/systemd/system/virtproxyd-ro.socket; enabled; preset: disabled)
    Drop-In: /etc/systemd/system/virtproxyd-ro.socket.d
             └─override.conf
     Active: active (listening) since Mon 2026-02-23 09:24:07 UTC; 46min ago
      Until: Mon 2026-02-23 09:24:07 UTC; 46min ago
   Triggers: ● virtproxyd.service
     Listen: /run/libvirt/libvirt-sock-ro (Stream)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 100220)
     Memory: 0B
        CPU: 2ms
     CGroup: /system.slice/virtproxyd-ro.socket

Feb 23 09:24:07 np0005626466.localdomain systemd[1]: Starting libvirt proxy daemon read-only socket...
Feb 23 09:24:07 np0005626466.localdomain systemd[1]: Listening on libvirt proxy daemon read-only socket.

● virtproxyd.socket - libvirt proxy daemon socket
     Loaded: loaded (/usr/lib/systemd/system/virtproxyd.socket; enabled; preset: enabled)
    Drop-In: /etc/systemd/system/virtproxyd.socket.d
             └─override.conf
     Active: active (listening) since Mon 2026-02-23 09:24:07 UTC; 46min ago
      Until: Mon 2026-02-23 09:24:07 UTC; 46min ago
   Triggers: ● virtproxyd.service
     Listen: /run/libvirt/libvirt-sock (Stream)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 100220)
     Memory: 0B
        CPU: 2ms
     CGroup: /system.slice/virtproxyd.socket

Feb 23 09:24:07 np0005626466.localdomain systemd[1]: Starting libvirt proxy daemon socket...
Feb 23 09:24:07 np0005626466.localdomain systemd[1]: Listening on libvirt proxy daemon socket.

● virtqemud-admin.socket - libvirt QEMU daemon admin socket
     Loaded: loaded (/usr/lib/systemd/system/virtqemud-admin.socket; enabled; preset: disabled)
    Drop-In: /etc/systemd/system/virtqemud-admin.socket.d
             └─override.conf
     Active: active (running) since Mon 2026-02-23 09:24:08 UTC; 46min ago
      Until: Mon 2026-02-23 09:24:08 UTC; 46min ago
   Triggers: ● virtqemud.service
     Listen: /run/libvirt/virtqemud-admin-sock (Stream)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 100220)
     Memory: 0B
        CPU: 2ms
     CGroup: /system.slice/virtqemud-admin.socket

Feb 23 09:24:08 np0005626466.localdomain systemd[1]: Starting libvirt QEMU daemon admin socket...
Feb 23 09:24:08 np0005626466.localdomain systemd[1]: Listening on libvirt QEMU daemon admin socket.

● virtqemud-ro.socket - libvirt QEMU daemon read-only socket
     Loaded: loaded (/usr/lib/systemd/system/virtqemud-ro.socket; enabled; preset: disabled)
    Drop-In: /etc/systemd/system/virtqemud-ro.socket.d
             └─override.conf
     Active: active (running) since Mon 2026-02-23 09:24:08 UTC; 46min ago
      Until: Mon 2026-02-23 09:24:08 UTC; 46min ago
   Triggers: ● virtqemud.service
     Listen: /run/libvirt/virtqemud-sock-ro (Stream)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 100220)
     Memory: 0B
        CPU: 1ms
     CGroup: /system.slice/virtqemud-ro.socket

Feb 23 09:24:08 np0005626466.localdomain systemd[1]: Starting libvirt QEMU daemon read-only socket...
Feb 23 09:24:08 np0005626466.localdomain systemd[1]: Listening on libvirt QEMU daemon read-only socket.

● virtqemud.socket - libvirt QEMU daemon socket
     Loaded: loaded (/usr/lib/systemd/system/virtqemud.socket; enabled; preset: disabled)
    Drop-In: /etc/systemd/system/virtqemud.socket.d
             └─override.conf
     Active: active (running) since Mon 2026-02-23 09:24:08 UTC; 46min ago
      Until: Mon 2026-02-23 09:24:08 UTC; 46min ago
   Triggers: ● virtqemud.service
     Listen: /run/libvirt/virtqemud-sock (Stream)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 100220)
     Memory: 0B
        CPU: 2ms
     CGroup: /system.slice/virtqemud.socket

Feb 23 09:24:08 np0005626466.localdomain systemd[1]: Starting libvirt QEMU daemon socket...
Feb 23 09:24:08 np0005626466.localdomain systemd[1]: Listening on libvirt QEMU daemon socket.

● virtsecretd-admin.socket - libvirt secret daemon admin socket
     Loaded: loaded (/usr/lib/systemd/system/virtsecretd-admin.socket; enabled; preset: disabled)
    Drop-In: /etc/systemd/system/virtsecretd-admin.socket.d
             └─override.conf
     Active: active (running) since Mon 2026-02-23 09:24:10 UTC; 46min ago
      Until: Mon 2026-02-23 09:24:10 UTC; 46min ago
   Triggers: ● virtsecretd.service
     Listen: /run/libvirt/virtsecretd-admin-sock (Stream)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 100220)
     Memory: 0B
        CPU: 1ms
     CGroup: /system.slice/virtsecretd-admin.socket

Feb 23 09:24:10 np0005626466.localdomain systemd[1]: Starting libvirt secret daemon admin socket...
Feb 23 09:24:10 np0005626466.localdomain systemd[1]: Listening on libvirt secret daemon admin socket.

● virtsecretd-ro.socket - libvirt secret daemon read-only socket
     Loaded: loaded (/usr/lib/systemd/system/virtsecretd-ro.socket; enabled; preset: disabled)
    Drop-In: /etc/systemd/system/virtsecretd-ro.socket.d
             └─override.conf
     Active: active (running) since Mon 2026-02-23 09:24:10 UTC; 46min ago
      Until: Mon 2026-02-23 09:24:10 UTC; 46min ago
   Triggers: ● virtsecretd.service
     Listen: /run/libvirt/virtsecretd-sock-ro (Stream)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 100220)
     Memory: 0B
        CPU: 2ms
     CGroup: /system.slice/virtsecretd-ro.socket

Feb 23 09:24:10 np0005626466.localdomain systemd[1]: Starting libvirt secret daemon read-only socket...
Feb 23 09:24:10 np0005626466.localdomain systemd[1]: Listening on libvirt secret daemon read-only socket.

● virtsecretd.socket - libvirt secret daemon socket
     Loaded: loaded (/usr/lib/systemd/system/virtsecretd.socket; enabled; preset: enabled)
    Drop-In: /etc/systemd/system/virtsecretd.socket.d
             └─override.conf
     Active: active (running) since Mon 2026-02-23 09:24:10 UTC; 46min ago
      Until: Mon 2026-02-23 09:24:10 UTC; 46min ago
   Triggers: ● virtsecretd.service
     Listen: /run/libvirt/virtsecretd-sock (Stream)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 100220)
     Memory: 0B
        CPU: 2ms
     CGroup: /system.slice/virtsecretd.socket

Feb 23 09:24:10 np0005626466.localdomain systemd[1]: Starting libvirt secret daemon socket...
Feb 23 09:24:10 np0005626466.localdomain systemd[1]: Listening on libvirt secret daemon socket.

○ virtstoraged-admin.socket - libvirt storage daemon admin socket
     Loaded: loaded (/usr/lib/systemd/system/virtstoraged-admin.socket; disabled; preset: disabled)
     Active: inactive (dead)
   Triggers: ● virtstoraged.service
     Listen: /run/libvirt/virtstoraged-admin-sock (Stream)

○ virtstoraged-ro.socket - libvirt storage daemon read-only socket
     Loaded: loaded (/usr/lib/systemd/system/virtstoraged-ro.socket; disabled; preset: disabled)
     Active: inactive (dead)
   Triggers: ● virtstoraged.service
     Listen: /run/libvirt/virtstoraged-sock-ro (Stream)

○ virtstoraged.socket - libvirt storage daemon socket
     Loaded: loaded (/usr/lib/systemd/system/virtstoraged.socket; enabled; preset: enabled)
     Active: inactive (dead)
   Triggers: ● virtstoraged.service
     Listen: /run/libvirt/virtstoraged-sock (Stream)

● swap.swap - /swap
     Loaded: loaded (/etc/fstab; generated)
     Active: active since Mon 2026-02-23 09:10:56 UTC; 1h 0min ago
      Until: Mon 2026-02-23 09:10:56 UTC; 1h 0min ago
       What: /swap
       Docs: man:fstab(5)
             man:systemd-fstab-generator(8)

● basic.target - Basic System
     Loaded: loaded (/usr/lib/systemd/system/basic.target; static)
     Active: active since Mon 2026-02-23 06:36:04 UTC; 3h 35min ago
      Until: Mon 2026-02-23 06:36:04 UTC; 3h 35min ago
       Docs: man:systemd.special(7)

Feb 23 06:36:04 localhost systemd[1]: Reached target Basic System.

○ blockdev@dev-disk-by\x2duuid-7B77\x2d95E7.target - Block Device Preparation for /dev/disk/by-uuid/7B77-95E7
     Loaded: loaded (/usr/lib/systemd/system/blockdev@.target; static)
     Active: inactive (dead)
       Docs: man:systemd.special(7)

○ blockdev@dev-disk-by\x2duuid-a3dd82de\x2dffc6\x2d4652\x2d88b9\x2d80e003b8f20a.target - Block Device Preparation for /dev/disk/by-uuid/a3dd82de-ffc6-4652-88b9-80e003b8f20a
     Loaded: loaded (/usr/lib/systemd/system/blockdev@.target; static)
     AcUnit firewalld.target could not be found.
tive: inactive (dead)
       Docs: man:systemd.special(7)

○ blockdev@dev-disk-by\x2duuid-b141154b\x2d6a70\x2d437a\x2da97f\x2dd160c9ba37eb.target - Block Device Preparation for /dev/disk/by-uuid/b141154b-6a70-437a-a97f-d160c9ba37eb
     Loaded: loaded (/usr/lib/systemd/system/blockdev@.target; static)
     Active: inactive (dead)
       Docs: man:systemd.special(7)

○ blockdev@dev-vda2.target - Block Device Preparation for /dev/vda2
     Loaded: loaded (/usr/lib/systemd/system/blockdev@.target; static)
     Active: inactive (dead)
       Docs: man:systemd.special(7)

○ blockdev@dev-vda3.target - Block Device Preparation for /dev/vda3
     Loaded: loaded (/usr/lib/systemd/system/blockdev@.target; static)
     Active: inactive (dead)
       Docs: man:systemd.special(7)

○ blockdev@dev-vda4.target - Block Device Preparation for /dev/vda4
     Loaded: loaded (/usr/lib/systemd/system/blockdev@.target; static)
     Active: inactive (dead)
       Docs: man:systemd.special(7)

● ceph-f1fea371-cb69-578d-a3d0-b5c472a84b46.target - Ceph cluster f1fea371-cb69-578d-a3d0-b5c472a84b46
     Loaded: loaded (/etc/systemd/system/ceph-f1fea371-cb69-578d-a3d0-b5c472a84b46.target; enabled; preset: disabled)
     Active: active since Mon 2026-02-23 07:38:40 UTC; 2h 32min ago
      Until: Mon 2026-02-23 07:38:40 UTC; 2h 32min ago

Feb 23 07:38:40 np0005626466.localdomain systemd[1]: Reached target Ceph cluster f1fea371-cb69-578d-a3d0-b5c472a84b46.

● ceph.target - All Ceph clusters and services
     Loaded: loaded (/etc/systemd/system/ceph.target; enabled; preset: disabled)
     Active: active since Mon 2026-02-23 07:38:40 UTC; 2h 32min ago
      Until: Mon 2026-02-23 07:38:40 UTC; 2h 32min ago

Feb 23 07:38:40 np0005626466.localdomain systemd[1]: Reached target All Ceph clusters and services.

● cloud-config.target - Cloud-config availability
     Loaded: loaded (/usr/lib/systemd/system/cloud-config.target; static)
     Active: active since Mon 2026-02-23 06:36:09 UTC; 3h 34min ago
      Until: Mon 2026-02-23 06:36:09 UTC; 3h 34min ago

Feb 23 06:36:09 np0005626466.novalocal systemd[1]: Reached target Cloud-config availability.

● cloud-init.target - Cloud-init target
     Loaded: loaded (/usr/lib/systemd/system/cloud-init.target; enabled-runtime; preset: disabled)
     Active: active since Mon 2026-02-23 06:36:10 UTC; 3h 34min ago
      Until: Mon 2026-02-23 06:36:10 UTC; 3h 34min ago

Feb 23 06:36:10 np0005626466.novalocal systemd[1]: Reached target Cloud-init target.

○ cryptsetup-pre.target - Local Encrypted Volumes (Pre)
     Loaded: loaded (/usr/lib/systemd/system/cryptsetup-pre.target; static)
     Active: inactive (dead)
       Docs: man:systemd.special(7)

● cryptsetup.target - Local Encrypted Volumes
     Loaded: loaded (/usr/lib/systemd/system/cryptsetup.target; static)
     Active: active since Mon 2026-02-23 06:36:02 UTC; 3h 35min ago
      Until: Mon 2026-02-23 06:36:02 UTC; 3h 35min ago
       Docs: man:systemd.special(7)

● edpm_libvirt.target
     Loaded: loaded (/etc/systemd/system/edpm_libvirt.target; static)
     Active: active since Mon 2026-02-23 09:24:44 UTC; 46min ago
      Until: Mon 2026-02-23 09:24:44 UTC; 46min ago

Feb 23 09:24:44 np0005626466.localdomain systemd[1]: Reached target edpm_libvirt.target.

○ emergency.target - Emergency Mode
     Loaded: loaded (/usr/lib/systemd/system/emergency.target; static)
     Active: inactive (dead)
       Docs: man:systemd.special(7)

○ first-boot-complete.target - First Boot Complete
     Loaded: loaded (/usr/lib/systemd/system/first-boot-complete.target; static)
     Active: inactive (dead)
  Condition: start condition failed at Mon 2026-02-23 06:36:02 UTC; 3h 35min ago
       Docs: man:systemd.special(7)

Feb 23 06:36:02 localhost systemd[1]: First Boot Complete was skipped because of an unmet condition check (ConditionFirstBoot=yes).

○ getty-pre.target - Preparation for Logins
     Loaded: loaded (/usr/lib/systemd/system/getty-pre.target; static)
     Active: inactive (dead)
       Docs: man:systemd.special(7)
             man:systemd-getty-generator(8)
             http://0pointer.de/blog/projects/serial-console.html

● getty.target - Login Prompts
     Loaded: loaded (/usr/lib/systemd/system/getty.target; static)
     Active: active since Mon 2026-02-23 06:36:09 UTC; 3h 34min ago
      Until: Mon 2026-02-23 06:36:09 UTC; 3h 34min ago
       Docs: man:systemd.special(7)
             man:systemd-getty-generator(8)
             http://0pointer.de/blog/projects/serial-console.html

Feb 23 06:36:09 np0005626466.novalocal systemd[1]: Reached target Login Prompts.

○ graphical.target - Graphical Interface
     Loaded: loaded (/usr/lib/systemd/system/graphical.target; static)
     Active: inactive (dead)
       Docs: man:systemd.special(7)

○ initrd-fs.target - Initrd File Systems
     Loaded: loaded (/usr/lib/systemd/system/initrd-fs.target; static)
     Active: inactive (dead) since Mon 2026-02-23 06:36:02 UTC; 3h 35min ago
       Docs: man:systemd.special(7)

Feb 23 06:36:01 localhost systemd[1]: Reached target Initrd File Systems.

○ initrd-root-device.target - Initrd Root Device
     Loaded: loaded (/usr/lib/systemd/system/initrd-root-device.target; static)
     Active: inactive (dead) since Mon 2026-02-23 06:36:01 UTC; 3h 35min ago
       Docs: man:systemd.special(7)

Feb 23 06:36:00 localhost systemd[1]: Reached target Initrd Root Device.
Feb 23 06:36:01 localhost systemd[1]: Stopped target Initrd Root Device.

○ initrd-root-fs.target - Initrd Root File System
     Loaded: loaded (/usr/lib/systemd/system/initrd-root-fs.target; static)
     Active: inactive (dead) since Mon 2026-02-23 06:36:02 UTC; 3h 35min ago
       Docs: man:systemd.special(7)

Feb 23 06:36:01 localhost systemd[1]: Reached target Initrd Root File System.

○ initrd-switch-root.target - Switch Root
     Loaded: loaded (/usr/lib/systemd/system/initrd-switch-root.target; static)
     Active: inactive (dead) since Mon 2026-02-23 06:36:02 UTC; 3h 35min ago

Feb 23 06:36:01 localhost systemd[1]: Reached target Switch Root.

○ initrd-usr-fs.target - Initrd /usr File System
     Loaded: loaded (/usr/lib/systemd/system/initrd-usr-fs.target; static)
     Active: inactive (dead) since Mon 2026-02-23 06:36:01 UTC; 3h 35min ago
       Docs: man:systemd.special(7)

Feb 23 06:36:01 localhost systemd[1]: Stopped target Initrd /usr File System.
Notice: journal has been rotated since unit was started, output may be incomplete.

○ initrd.target - Initrd Default Target
     Loaded: loaded (/usr/lib/systemd/system/initrd.target; static)
     Active: inactive (dead) since Mon 2026-02-23 06:36:01 UTC; 3h 35min ago
       Docs: man:systemd.special(7)

Feb 23 06:36:01 localhost systemd[1]: Reached target Initrd Default Target.
Feb 23 06:36:01 localhost systemd[1]: Stopped target Initrd Default Target.

● integritysetup.target - Local Integrity Protected Volumes
     Loaded: loaded (/usr/lib/systemd/system/integritysetup.target; static)
     Active: active since Mon 2026-02-23 06:36:02 UTC; 3h 35min ago
      Until: Mon 2026-02-23 06:36:02 UTC; 3h 35min ago
       Docs: man:systemd.special(7)

● local-fs-pre.target - Preparation for Local File Systems
     Loaded: loaded (/usr/lib/systemd/system/local-fs-pre.target; static)
     Active: active since Mon 2026-02-23 06:36:03 UTC; 3h 35min ago
      Until: Mon 2026-02-23 06:36:03 UTC; 3h 35min ago
       Docs: man:systemd.special(7)

Feb 23 06:36:03 localhost systemd[1]: Reached target Preparation for Local File Systems.

● local-fs.target - Local File Systems
     Loaded: loaded (/usr/lib/systemd/system/local-fs.target; static)
     Active: active since Mon 2026-02-23 06:36:04 UTC; 3h 35min ago
      Until: Mon 2026-02-23 06:36:04 UTC; 3h 35min ago
       Docs: man:systemd.special(7)

Feb 23 06:36:04 localhost systemd[1]: Reached target Local File Systems.

● multi-user.target - Multi-User System
     Loaded: loaded (/usr/lib/systemd/system/multi-user.target; indirect; preset: disabled)
     Active: active since Mon 2026-02-23 06:36:09 UTC; 3h 34min ago
      Until: Mon 2026-02-23 06:36:09 UTC; 3h 34min ago
       Docs: man:systemd.special(7)

Feb 23 06:36:09 np0005626466.novalocal systemd[1]: Reached target Multi-User System.

● network-online.target - Network is Online
     Loaded: loaded (/usr/lib/systemd/system/network-online.target; static)
     Active: active since Mon 2026-02-23 06:36:09 UTC; 3h 34min ago
      Until: Mon 2026-02-23 06:36:09 UTC; 3h 34min ago
       Docs: man:systemd.special(7)
             https://systemd.io/NETWORK_ONLINE

Feb 23 06:36:09 np0005626466.novalocal systemd[1]: Reached target Network is Online.

● network-pre.target - Preparation for Network
     Loaded: loaded (/usr/lib/systemd/system/network-pre.target; static)
     Active: active since Mon 2026-02-23 06:36:05 UTC; 3h 34min ago
      Until: Mon 2026-02-23 06:36:05 UTC; 3h 34min ago
       Docs: man:systemd.special(7)
             https://systemd.io/NETWORK_ONLINE

Feb 23 06:36:05 np0005626466.novalocal systemd[1]: Reached target Preparation for Network.

● network.target - Network
     Loaded: loaded (/usr/lib/systemd/system/network.target; static)
     Active: active since Mon 2026-02-23 06:36:05 UTC; 3h 34min ago
      Until: Mon 2026-02-23 06:36:05 UTC; 3h 34min ago
       Docs: man:systemd.special(7)
             https://systemd.io/NETWORK_ONLINE

Feb 23 06:36:05 np0005626466.novalocal systemd[1]: Reached target Network.

● nfs-client.target - NFS client services
     Loaded: loaded (/usr/lib/systemd/system/nfs-client.target; enabled; preset: disabled)
     Active: active since Mon 2026-02-23 06:36:05 UTC; 3h 34min ago
      Until: Mon 2026-02-23 06:36:05 UTC; 3h 34min ago

Feb 23 06:36:05 np0005626466.novalocal systemd[1]: Reached target NFS client services.

○ nss-lookup.target - Host and Network Name Lookups
     Loaded: loaded (/usr/lib/systemd/system/nss-lookup.target; static)
     Active: inactive (dead)
       Docs: man:systemd.special(7)

● nss-user-lookup.target - User and Group Name Lookups
     Loaded: loaded (/usr/lib/systemd/system/nss-user-lookup.target; static)
     Active: active since Mon 2026-02-23 06:36:04 UTC; 3h 35min ago
      Until: Mon 2026-02-23 06:36:04 UTC; 3h 35min ago
       Docs: man:systemd.special(7)

Feb 23 06:36:04 localhost systemd[1]: Reached target User and Group Name Lookups.

● paths.target - Path Units
     Loaded: loaded (/usr/lib/systemd/system/paths.target; static)
     Active: active since Mon 2026-02-23 06:36:02 UTC; 3h 35min ago
      Until: Mon 2026-02-23 06:36:02 UTC; 3h 35min ago
       Docs: man:systemd.special(7)

○ remote-cryptsetup.target - Remote Encrypted Volumes
     Loaded: loaded (/usr/lib/systemd/system/remote-cryptsetup.target; disabled; preset: enabled)
     Active: inactive (dead) since Mon 2026-02-23 06:36:01 UTC; 3h 35min ago
       Docs: man:systemd.special(7)

Feb 23 06:36:01 localhost systemd[1]: Reached target Remote Encrypted Volumes.
Feb 23 06:36:01 localhost systemd[1]: Stopped target Remote Encrypted Volumes.

● remote-fs-pre.target - Preparation for Remote File Systems
     Loaded: loaded (/usr/lib/systemd/system/remote-fs-pre.target; static)
     Active: active since Mon 2026-02-23 06:36:05 UTC; 3h 34min ago
      Until: Mon 2026-02-23 06:36:05 UTC; 3h 34min ago
       Docs: man:systemd.special(7)

Feb 23 06:36:05 np0005626466.novalocal systemd[1]: Reached target Preparation for Remote File Systems.

● remote-fs.target - Remote File Systems
     Loaded: loaded (/usr/lib/systemd/system/remote-fs.target; enabled; preset: enabled)
     Active: active since Mon 2026-02-23 06:36:05 UTC; 3h 34min ago
      Until: Mon 2026-02-23 06:36:05 UTC; 3h 34min ago
       Docs: man:systemd.special(7)

Feb 23 06:36:05 np0005626466.novalocal systemd[1]: Reached target Remote File Systems.

○ remote-veritysetup.target - Remote Verity Protected Volumes
     Loaded: loaded (/usr/lib/systemd/system/remote-veritysetup.target; disabled; preset: disabled)
     Active: inactive (dead)
       Docs: man:systemd.special(7)

○ rescue.target - Rescue Mode
     Loaded: loaded (/usr/lib/systemd/system/rescue.target; static)
     Active: inactive (dead)
       Docs: man:systemd.special(7)

● rpc_pipefs.tUnit syslog.target could not be found.
arget
     Loaded: loaded (/usr/lib/systemd/system/rpc_pipefs.target; static)
     Active: active since Mon 2026-02-23 06:36:02 UTC; 3h 35min ago
      Until: Mon 2026-02-23 06:36:02 UTC; 3h 35min ago

● rpcbind.target - RPC Port Mapper
     Loaded: loaded (/usr/lib/systemd/system/rpcbind.target; static)
     Active: active since Mon 2026-02-23 06:36:02 UTC; 3h 35min ago
      Until: Mon 2026-02-23 06:36:02 UTC; 3h 35min ago
       Docs: man:systemd.special(7)

○ shutdown.target - System Shutdown
     Loaded: loaded (/usr/lib/systemd/system/shutdown.target; static)
     Active: inactive (dead)
       Docs: man:systemd.special(7)

● slices.target - Slice Units
     Loaded: loaded (/usr/lib/systemd/system/slices.target; static)
     Active: active since Mon 2026-02-23 06:36:02 UTC; 3h 35min ago
      Until: Mon 2026-02-23 06:36:02 UTC; 3h 35min ago
       Docs: man:systemd.special(7)

● sockets.target - Socket Units
     Loaded: loaded (/usr/lib/systemd/system/sockets.target; static)
     Active: active since Mon 2026-02-23 06:36:04 UTC; 3h 35min ago
      Until: Mon 2026-02-23 06:36:04 UTC; 3h 35min ago
       Docs: man:systemd.special(7)

Feb 23 06:36:04 localhost systemd[1]: Reached target Socket Units.

● sshd-keygen.target
     Loaded: loaded (/usr/lib/systemd/system/sshd-keygen.target; static)
     Active: active since Mon 2026-02-23 09:22:32 UTC; 48min ago
      Until: Mon 2026-02-23 09:22:32 UTC; 48min ago

Feb 23 09:22:32 np0005626466.localdomain systemd[1]: Reached target sshd-keygen.target.

● swap.target - Swaps
     Loaded: loaded (/usr/lib/systemd/system/swap.target; static)
     Active: active since Mon 2026-02-23 06:36:02 UTC; 3h 35min ago
      Until: Mon 2026-02-23 06:36:02 UTC; 3h 35min ago
       Docs: man:systemd.special(7)

● sysinit.target - System Initialization
     Loaded: loaded (/usr/lib/systemd/system/sysinit.target; static)
     Active: active since Mon 2026-02-23 06:36:04 UTC; 3h 35min ago
      Until: Mon 2026-02-23 06:36:04 UTC; 3h 35min ago
       Docs: man:systemd.special(7)

Feb 23 06:36:04 localhost systemd[1]: Reached target System Initialization.

● time-set.target - System Time Set
     Loaded: loaded (/usr/lib/systemd/system/time-set.target; static)
     Active: active since Mon 2026-02-23 07:38:40 UTC; 2h 32min ago
      Until: Mon 2026-02-23 07:38:40 UTC; 2h 32min ago
       Docs: man:systemd.special(7)

Feb 23 07:38:40 np0005626466.localdomain systemd[1]: Reached target System Time Set.

● time-sync.target - System Time Synchronized
     Loaded: loaded (/usr/lib/systemd/system/time-sync.target; static)
     Active: active since Mon 2026-02-23 07:38:40 UTC; 2h 32min ago
      Until: Mon 2026-02-23 07:38:40 UTC; 2h 32min ago
       Docs: man:systemd.special(7)

Feb 23 07:38:40 np0005626466.localdomain systemd[1]: Reached target System Time Synchronized.

● timers.target - Timer Units
     Loaded: loaded (/usr/lib/systemd/system/timers.target; static)
     Active: active since Mon 2026-02-23 06:36:04 UTC; 3h 35min ago
      Until: Mon 2026-02-23 06:36:04 UTC; 3h 35min ago
       Docs: man:systemd.special(7)

Feb 23 06:36:04 localhost systemd[1]: Reached target Timer Units.

○ umount.target - Unmount All Filesystems
     Loaded: loaded (/usr/lib/systemd/system/umount.target; static)
     Active: inactive (dead)
       Docs: man:systemd.special(7)

○ veritysetup-pre.target - Local Verity Protected Volumes (Pre)
     Loaded: loaded (/usr/lib/systemd/system/veritysetup-pre.target; static)
     Active: inactive (dead)
       Docs: man:systemd.special(7)

● veritysetup.target - Local Verity Protected Volumes
     Loaded: loaded (/usr/lib/systemd/system/veritysetup.target; static)
     Active: active since Mon 2026-02-23 06:36:02 UTC; 3h 35min ago
      Until: Mon 2026-02-23 06:36:02 UTC; 3h 35min ago
       Docs: man:systemd.special(7)

○ virt-guest-shutdown.target - Libvirt guests shutdown
     Loaded: loaded (/etc/systemd/system/virt-guest-shutdown.target; static)
     Active: inactive (dead)
       Docs: https://libvirt.org

● 122635f0e8d39054eca80d2e540d00d16b4a0e4979d56e5a2e398526a74c57c4.timer - /usr/bin/podman healthcheck run 122635f0e8d39054eca80d2e540d00d16b4a0e4979d56e5a2e398526a74c57c4
     Loaded: loaded (/run/systemd/transient/122635f0e8d39054eca80d2e540d00d16b4a0e4979d56e5a2e398526a74c57c4.timer; transient)
  Transient: yes
     Active: active (waiting) since Mon 2026-02-23 09:18:38 UTC; 52min ago
      Until: Mon 2026-02-23 09:18:38 UTC; 52min ago
    Trigger: Mon 2026-02-23 10:11:26 UTC; 21s left
   Triggers: ● 122635f0e8d39054eca80d2e540d00d16b4a0e4979d56e5a2e398526a74c57c4.service

Feb 23 09:18:38 np0005626466.localdomain systemd[1]: Started /usr/bin/podman healthcheck run 122635f0e8d39054eca80d2e540d00d16b4a0e4979d56e5a2e398526a74c57c4.

● 6a5bdcbb336925d11e98e079f4b8b7828b30e368d9b142bcbf78246faf17115e.timer - /usr/bin/podman healthcheck run 6a5bdcbb336925d11e98e079f4b8b7828b30e368d9b142bcbf78246faf17115e
     Loaded: loaded (/run/systemd/transient/6a5bdcbb336925d11e98e079f4b8b7828b30e368d9b142bcbf78246faf17115e.timer; transient)
  Transient: yes
     Active: active (waiting) since Mon 2026-02-23 09:30:19 UTC; 40min ago
      Until: Mon 2026-02-23 09:30:19 UTC; 40min ago
    Trigger: Mon 2026-02-23 10:11:25 UTC; 20s left
   Triggers: ● 6a5bdcbb336925d11e98e079f4b8b7828b30e368d9b142bcbf78246faf17115e.service

Feb 23 09:30:19 np0005626466.localdomain systemd[1]: Started /usr/bin/podman healthcheck run 6a5bdcbb336925d11e98e079f4b8b7828b30e368d9b142bcbf78246faf17115e.

● 8ca16bfb2f59ef8fc3fb8572c3270f7856db7f62fc874baed2fd5084beb7f01e.timer - /usr/bin/podman healthcheck run 8ca16bfb2f59ef8fc3fb8572c3270f7856db7f62fc874baed2fd5084beb7f01e
     Loaded: loaded (/run/systemd/transient/8ca16bfb2f59ef8fc3fb8572c3270f7856db7f62fc874baed2fd5084beb7f01e.timer; transient)
  Transient: yes
     Active: active (waiting) since Mon 2026-02-23 09:19:48 UTC; 51min ago
      Until: Mon 2026-02-23 09:19:48 UTC; 51min ago
    Trigger: Mon 2026-02-23 10:11:32 UTC; 27s left
   Triggers: ● 8ca16bfb2f59ef8fc3fb8572c3270f7856db7f62fc874baed2fd5084beb7f01e.service

Feb 23 09:19:48 np0005626466.localdomain systemd[1]: Started /usr/bin/podman healthcheck run 8ca16bfb2f59ef8fc3fb8572c3270f7856db7f62fc874baed2fd5084beb7f01e.

● c0dd95da0b8090e1dfefd899739a6f9a1e0b730eec9db8d0d0094fa6bfc519c8.timer - /usr/bin/podman healthcheck run c0dd95da0b8090e1dfefd899739a6f9a1e0b730eec9db8d0d0094fa6bfc519c8
     Loaded: loaded (/run/systemd/transient/c0dd95da0b8090e1dfefd899739a6f9a1e0b730eec9db8d0d0094fa6bfc519c8.timer; transient)
  Transient: yes
     Active: active (waiting) since Mon 2026-02-23 09:29:34 UTC; 41min ago
      Until: Mon 2026-02-23 09:29:34 UTC; 41min ago
    Trigger: Mon 2026-02-23 10:11:14 UTC; 9s left
   Triggers: ● c0dd95da0b8090e1dfefd899739a6f9a1e0b730eec9db8d0d0094fa6bfc519c8.service

Feb 23 09:29:34 np0005626466.localdomain systemd[1]: Started /usr/bin/podman healthcheck run c0dd95da0b8090e1dfefd899739a6f9a1e0b730eec9db8d0d0094fa6bfc519c8.

● cd17978adeb2b0468483202f98a4facd0357edbe019deb13313adab6b6b71f52.timer - /usr/bin/podman healthcheck run cd17978adeb2b0468483202f98a4facd0357edbe019deb13313adab6b6b71f52
     Loaded: loaded (/run/systemd/transient/cd17978adeb2b0468483202f98a4facd0357edbe019deb13313adab6b6b71f52.timer; transient)
  Transient: yes
     Active: active (waiting) since Mon 2026-02-23 09:29:13 UTC; 41min ago
      Until: Mon 2026-02-23 09:29:13 UTC; 41min ago
    Trigger: Mon 2026-02-23 10:11:14 UTC; 9s left
   Triggers: ● cd17978adeb2b0468483202f98a4facd0357edbe019deb13313adab6b6b71f52.service

Feb 23 09:29:13 np0005626466.localdomain systemd[1]: Started /usr/bin/podman healthcheck run cd17978adeb2b0468483202f98a4facd0357edbe019deb13313adab6b6b71f52.

● dnf-makecache.timer - dnf makecache --timer
     Loaded: loaded (/usr/lib/systemd/system/dnf-makecache.timer; enabled; preset: enabled)
     Active: active (waiting) since Mon 2026-02-23 06:36:04 UTC; 3h 35min ago
      Until: Mon 2026-02-23 06:36:04 UTC; 3h 35min ago
    Trigger: Mon 2026-02-23 10:23:38 UTC; 12min left
   Triggers: ● dnf-makecache.service

Feb 23 06:36:04 localhost systemd[1]: Started dnf makecache --timer.

● fe7013c5595cf36029c5acd08a92109de37036b8a083179769dbc4e27b30aaf9.timer - /usr/bin/podman healthcheck run fe7013c5595cf36029c5acd08a92109de37036b8a083179769dbc4e27b30aaf9
     Loaded: loaded (/run/systemd/transient/fe7013c5595cf36029c5acd08a92109de37036b8a083179769dbc4e27b30aaf9.timer; transient)
  Transient: yes
     Active: active (waiting) since Mon 2026-02-23 09:28:53 UTC; 42min ago
      Until: Mon 2026-02-23 09:28:53 UTC; 42min ago
    Trigger: Mon 2026-02-23 10:11:26 UTC; 21s left
   Triggers: ● fe7013c5595cf36029c5acd08a92109de37036b8a083179769dbc4e27b30aaf9.service

Feb 23 09:28:53 np0005626466.localdomain systemd[1]: Started /usr/bin/podman healthcheck run fe7013c5595cf36029c5acd08a92109de37036b8a083179769dbc4e27b30aaf9.

● logrotate.timer - Daily rotation of log files
     Loaded: loaded (/usr/lib/systemd/system/logrotate.timer; enabled; preset: enabled)
     Active: active (waiting) since Mon 2026-02-23 06:36:04 UTC; 3h 35min ago
      Until: Mon 2026-02-23 06:36:04 UTC; 3h 35min ago
    Trigger: Tue 2026-02-24 00:00:00 UTC; 13h left
   Triggers: ● logrotate.service
       Docs: man:logrotate(8)
             man:logrotate.conf(5)

Feb 23 06:36:04 localhost systemd[1]: Started Daily rotation of log files.

○ sysstat-collect.timer - Run system activity accounting tool every 10 minutes
     Loaded: loaded (/usr/lib/systemd/system/sysstat-collect.timer; enabled; preset: disabled)
     Active: inactive (dead)
    Trigger: n/a
   Triggers: ● sysstat-collect.service

○ sysstat-summary.timer - Generate summary of yesterday's process accounting
     Loaded: loaded (/usr/lib/systemd/system/sysstat-summary.timer; enabled; preset: disabled)
     Active: inactive (dead)
    Trigger: n/a
   Triggers: ● sysstat-summary.service

● systemd-tmpfiles-clean.timer - Daily Cleanup of Temporary Directories
     Loaded: loaded (/usr/lib/systemd/system/systemd-tmpfiles-clean.timer; static)
     Active: active (waiting) since Mon 2026-02-23 06:36:04 UTC; 3h 35min ago
      Until: Mon 2026-02-23 06:36:04 UTC; 3h 35min ago
    Trigger: Tue 2026-02-24 06:51:10 UTC; 20h left
   Triggers: ● systemd-tmpfiles-clean.service
       Docs: man:tmpfiles.d(5)
             man:systemd-tmpfiles(8)

Feb 23 06:36:04 localhost systemd[1]: Started Daily Cleanup of Temporary Directories.

○ tripleo_nova_virtqemud_recover.timer - Check and recover tripleo_nova_virtqemud every 10m
     Loaded: loaded (/etc/systemd/system/tripleo_nova_virtqemud_recover.timer; enabled; preset: disabled)
     Active: inactive (dead) since Mon 2026-02-23 09:03:25 UTC; 1h 7min ago
   Duration: 54min 27.989s
    Trigger: n/a
   Triggers: ● tripleo_nova_virtqemud_recover.service

Feb 23 08:08:57 np0005626466.localdomain systemd[1]: Started Check and recover tripleo_nova_virtqemud every 10m.
Feb 23 09:03:25 np0005626466.localdomain systemd[1]: tripleo_nova_virtqemud_recover.timer: Deactivated successfully.
Feb 23 09:03:25 np0005626466.localdomain systemd[1]: Stopped Check and recover tripleo_nova_virtqemud every 10m.

● unbound-anchor.timer - daily update of the root trust anchor for DNSSEC
     Loaded: loaded (/usr/lib/systemd/system/unbound-anchor.timer; enabled; preset: enabled)
     Active: active (waiting) since Mon 2026-02-23 07:13:25 UTC; 2h 57min ago
      Until: Mon 2026-02-23 07:13:25 UTC; 2h 57min ago
    Trigger: Tue 2026-02-24 00:00:00 UTC; 13h left
   Triggers: ● unbound-anchor.service
       Docs: man:unbound-anchor(8)

Feb 23 07:13:25 np0005626466.novalocal systemd[1]: Started daily update of the root trust anchor for DNSSEC.
