● compute-2
    State: running
    Units: 454 loaded (incl. loaded aliases)
     Jobs: 0 queued
   Failed: 0 units
    Since: Thu 2026-01-22 12:49:13 UTC; 2h 56min ago
  systemd: 252-64.el9
   CGroup: /
           ├─297599 turbostat --debug sleep 10
           ├─297604 sleep 10
           ├─init.scope
           │ └─1 /usr/lib/systemd/systemd --switched-root --system --deserialize 31
           ├─machine.slice
           │ ├─libpod-572ffe12c89ef3d651b3d5a5d0d084d01048037ddf29c596a9682c34d685f649.scope
           │ │ └─container
           │ │   ├─226435 dumb-init --single-child -- kolla_start
           │ │   ├─226437 /usr/bin/python3 /usr/bin/nova-compute
           │ │   ├─230058 rbd import --pool vms /var/lib/nova/instances/_base/389efd6047b99779d5161939afa4f2bdb261bfd0 e0e74330-96df-479f-8baf-53fbd2ccba91_disk --image-format=2 --id openstack --conf /etc/ceph/ceph.conf
           │ │   ├─237484 /usr/bin/python3 /bin/privsep-helper --config-file /etc/nova/nova.conf --config-file /etc/nova/nova-compute.conf --config-dir /etc/nova/nova.conf.d --privsep_context vif_plug_ovs.privsep.vif_plug --privsep_sock_path /tmp/tmpwbd1s1u6/privsep.sock
           │ │   ├─238396 rbd import --pool vms /var/lib/nova/instances/_base/389efd6047b99779d5161939afa4f2bdb261bfd0 f591d61b-712e-49aa-85bd-8d222b607eb3_disk --image-format=2 --id openstack --conf /etc/ceph/ceph.conf
           │ │   ├─238793 rbd import --pool vms /var/lib/nova/instances/_base/389efd6047b99779d5161939afa4f2bdb261bfd0 87e798e6-6f00-4fe1-8412-75ddc9e2878e_disk --image-format=2 --id openstack --conf /etc/ceph/ceph.conf
           │ │   ├─244616 rbd import --pool vms /var/lib/nova/instances/_base/389efd6047b99779d5161939afa4f2bdb261bfd0 8331b067-1b3f-4a1d-a596-e966f6de776a_disk --image-format=2 --id openstack --conf /etc/ceph/ceph.conf
           │ │   ├─245897 rbd import --pool vms /var/lib/nova/instances/a0b3924b-4422-47c5-ba40-748e41b14d00/disk.config a0b3924b-4422-47c5-ba40-748e41b14d00_disk.config --image-format=2 --id openstack --conf /etc/ceph/ceph.conf
           │ │   ├─248518 /usr/bin/python3 /bin/privsep-helper --config-file /etc/nova/nova.conf --config-file /etc/nova/nova-compute.conf --config-dir /etc/nova/nova.conf.d --privsep_context os_brick.privileged.default --privsep_sock_path /tmp/tmpk2q2e022/privsep.sock
           │ │   └─250095 rbd import --pool vms /var/lib/nova/instances/_base/389efd6047b99779d5161939afa4f2bdb261bfd0 001ba9a6-ba0c-438d-8150-5cfbcec3d34f_disk --image-format=2 --id openstack --conf /etc/ceph/ceph.conf
           │ ├─libpod-65cda04b9c9e71d648ab5510147314c4de15a37ca8d4a48196c50c9ad6ccb44d.scope
           │ │ └─container
           │ │   ├─143494 dumb-init --single-child -- kolla_start
           │ │   ├─143497 "neutron-ovn-metadata-agent (/usr/bin/python3 /usr/bin/neutron-ovn-metadata-agent)"
           │ │   ├─143757 "neutron-ovn-metadata-agent (/usr/bin/python3 /usr/bin/neutron-ovn-metadata-agent)"
           │ │   ├─143856 /usr/bin/python3 /bin/privsep-helper --config-file /etc/neutron/neutron.conf --config-dir /etc/neutron.conf.d --privsep_context neutron.privileged.namespace_cmd --privsep_sock_path /tmp/tmp405dvk24/privsep.sock
           │ │   ├─237689 /usr/bin/python3 /bin/privsep-helper --config-file /etc/neutron/neutron.conf --config-dir /etc/neutron.conf.d --privsep_context neutron.privileged.default --privsep_sock_path /tmp/tmp_pg3kwj0/privsep.sock
           │ │   └─237788 /usr/bin/python3 /bin/privsep-helper --config-file /etc/neutron/neutron.conf --config-dir /etc/neutron.conf.d --privsep_context neutron.privileged.link_cmd --privsep_sock_path /tmp/tmp3y50ov6x/privsep.sock
           │ ├─libpod-8eec14eed05eebd169934b14ad23738a7c696fad3b7be75ce9a652966539c356.scope
           │ │ └─container
           │ │   ├─133158 dumb-init --single-child -- kolla_start
           │ │   └─133161 /usr/bin/ovn-controller --pidfile unix:/run/openvswitch/db.sock -p /etc/pki/tls/private/ovndb.key -c /etc/pki/tls/certs/ovndb.crt -C /etc/pki/tls/certs/ovndbca.crt
           │ └─machine-qemu\x2d6\x2dinstance\x2d00000016.scope
           │   └─libvirt
           │     └─252492 /usr/libexec/qemu-kvm -name guest=instance-00000016,debug-threads=on -S -object "{\"qom-type\":\"secret\",\"id\":\"masterKey0\",\"format\":\"raw\",\"file\":\"/var/lib/libvirt/qemu/domain-6-instance-00000016/master-key.aes\"}" -machine pc-q35-rhel9.8.0,usb=off,dump-guest-core=off,memory-backend=pc.ram,hpet=off,acpi=on -accel kvm -cpu Nehalem -m size=131072k -object "{\"qom-type\":\"memory-backend-ram\",\"id\":\"pc.ram\",\"size\":134217728}" -overcommit mem-lock=off -smp 1,sockets=1,dies=1,clusters=1,cores=1,threads=1 -uuid 839e8e64-64a9-4e35-85dd-cdbb7f8e71c5 -smbios "type=1,manufacturer=RDO,product=OpenStack Compute,version=27.5.2-0.20250829104910.6f8decf.el9,serial=839e8e64-64a9-4e35-85dd-cdbb7f8e71c5,uuid=839e8e64-64a9-4e35-85dd-cdbb7f8e71c5,family=Virtual Machine" -no-user-config -nodefaults -chardev socket,id=charmonitor,fd=26,server=on,wait=off -mon chardev=charmonitor,id=monitor,mode=control -rtc base=utc,driftfix=slew -global kvm-pit.lost_tick_policy=delay -no-shutdown -boot strict=on -device "{\"driver\":\"pcie-root-port\",\"port\":16,\"chassis\":1,\"id\":\"pci.1\",\"bus\":\"pcie.0\",\"multifunction\":true,\"addr\":\"0x2\"}" -device "{\"driver\":\"pcie-root-port\",\"port\":17,\"chassis\":2,\"id\":\"pci.2\",\"bus\":\"pcie.0\",\"addr\":\"0x2.0x1\"}" -device "{\"driver\":\"pcie-root-port\",\"port\":18,\"chassis\":3,\"id\":\"pci.3\",\"bus\":\"pcie.0\",\"addr\":\"0x2.0x2\"}" -device "{\"driver\":\"pcie-root-port\",\"port\":19,\"chassis\":4,\"id\":\"pci.4\",\"bus\":\"pcie.0\",\"addr\":\"0x2.0x3\"}" -device "{\"driver\":\"pcie-root-port\",\"port\":20,\"chassis\":5,\"id\":\"pci.5\",\"bus\":\"pcie.0\",\"addr\":\"0x2.0x4\"}" -device "{\"driver\":\"pcie-root-port\",\"port\":21,\"chassis\":6,\"id\":\"pci.6\",\"bus\":\"pcie.0\",\"addr\":\"0x2.0x5\"}" -device "{\"driver\":\"pcie-root-port\",\"port\":22,\"chassis\":7,\"id\":\"pci.7\",\"bus\":\"pcie.0\",\"addr\":\"0x2.0x6\"}" -device "{\"driver\":\"pcie-root-port\",\"port\":23,\"chassis\":8,\"id\":\"pci.8\",\"bus\":\"pcie.0\",\"addr\":\"0x2.0x7\"}" -device "{\"driver\":\"pcie-root-port\",\"port\":24,\"chassis\":9,\"id\":\"pci.9\",\"bus\":\"pcie.0\",\"multifunction\":true,\"addr\":\"0x3\"}" -device "{\"driver\":\"pcie-root-port\",\"port\":25,\"chassis\":10,\"id\":\"pci.10\",\"bus\":\"pcie.0\",\"addr\":\"0x3.0x1\"}" -device "{\"driver\":\"pcie-root-port\",\"port\":26,\"chassis\":11,\"id\":\"pci.11\",\"bus\":\"pcie.0\",\"addr\":\"0x3.0x2\"}" -device "{\"driver\":\"pcie-root-port\",\"port\":27,\"chassis\":12,\"id\":\"pci.12\",\"bus\":\"pcie.0\",\"addr\":\"0x3.0x3\"}" -device "{\"driver\":\"pcie-root-port\",\"port\":28,\"chassis\":13,\"id\":\"pci.13\",\"bus\":\"pcie.0\",\"addr\":\"0x3.0x4\"}" -device "{\"driver\":\"pcie-root-port\",\"port\":29,\"chassis\":14,\"id\":\"pci.14\",\"bus\":\"pcie.0\",\"addr\":\"0x3.0x5\"}" -device "{\"driver\":\"pcie-root-port\",\"port\":30,\"chassis\":15,\"id\":\"pci.15\",\"bus\":\"pcie.0\",\"addr\":\"0x3.0x6\"}" -device "{\"driver\":\"pcie-root-port\",\"port\":31,\"chassis\":16,\"id\":\"pci.16\",\"bus\":\"pcie.0\",\"addr\":\"0x3.0x7\"}" -device "{\"driver\":\"pcie-root-port\",\"port\":32,\"chassis\":17,\"id\":\"pci.17\",\"bus\":\"pcie.0\",\"multifunction\":true,\"addr\":\"0x4\"}" -device "{\"driver\":\"pcie-root-port\",\"port\":33,\"chassis\":18,\"id\":\"pci.18\",\"bus\":\"pcie.0\",\"addr\":\"0x4.0x1\"}" -device "{\"driver\":\"pcie-root-port\",\"port\":34,\"chassis\":19,\"id\":\"pci.19\",\"bus\":\"pcie.0\",\"addr\":\"0x4.0x2\"}" -device "{\"driver\":\"pcie-root-port\",\"port\":35,\"chassis\":20,\"id\":\"pci.20\",\"bus\":\"pcie.0\",\"addr\":\"0x4.0x3\"}" -device "{\"driver\":\"pcie-root-port\",\"port\":36,\"chassis\":21,\"id\":\"pci.21\",\"bus\":\"pcie.0\",\"addr\":\"0x4.0x4\"}" -device "{\"driver\":\"pcie-root-port\",\"port\":37,\"chassis\":22,\"id\":\"pci.22\",\"bus\":\"pcie.0\",\"addr\":\"0x4.0x5\"}" -device "{\"driver\":\"pcie-root-port\",\"port\":38,\"chassis\":23,\"id\":\"pci.23\",\"bus\":\"pcie.0\",\"addr\":\"0x4.0x6\"}" -device "{\"driver\":\"pcie-root-port\",\"port\":39,\"chassis\":24,\"id\":\"pci.24\",\"bus\":\"pcie.0\",\"addr\":\"0x4.0x7\"}" -device "{\"driver\":\"pcie-root-port\",\"port\":40,\"chassis\":25,\"id\":\"pci.25\",\"bus\":\"pcie.0\",\"addr\":\"0x5\"}" -device "{\"driver\":\"pcie-pci-bridge\",\"id\":\"pci.26\",\"bus\":\"pci.1\",\"addr\":\"0x0\"}" -device "{\"driver\":\"piix3-usb-uhci\",\"id\":\"usb\",\"bus\":\"pci.26\",\"addr\":\"0x1\"}" -device "{\"driver\":\"virtio-scsi-pci\",\"id\":\"scsi0\",\"bus\":\"pci.3\",\"addr\":\"0x0\"}" -object "{\"qom-type\":\"secret\",\"id\":\"libvirt-2-storage-auth-secret0\",\"data\":\"ckHvapQG84cP5zoSqf7m1gCY4qTASyRQDTUqj+xCcWI=\",\"keyid\":\"masterKey0\",\"iv\":\"qG5IcOgwSBm0Z7uTjw3pxQ==\",\"format\":\"base64\"}" -blockdev "{\"driver\":\"rbd\",\"pool\":\"vms\",\"image\":\"839e8e64-64a9-4e35-85dd-cdbb7f8e71c5_disk\",\"server\":[{\"host\":\"192.168.122.100\",\"port\":\"6789\"},{\"host\":\"192.168.122.102\",\"port\":\"6789\"},{\"host\":\"192.168.122.101\",\"port\":\"6789\"}],\"user\":\"openstack\",\"auth-client-required\":[\"cephx\",\"none\"],\"key-secret\":\"libvirt-2-storage-auth-secret0\",\"node-name\":\"libvirt-2-storage\",\"read-only\":false,\"cache\":{\"direct\":true,\"no-flush\":false}}" -device "{\"driver\":\"scsi-hd\",\"bus\":\"scsi0.0\",\"channel\":0,\"scsi-id\":0,\"lun\":0,\"device_id\":\"drive-scsi0-0-0-0\",\"drive\":\"libvirt-2-storage\",\"id\":\"scsi0-0-0-0\",\"bootindex\":1,\"write-cache\":\"on\"}" -object "{\"qom-type\":\"secret\",\"id\":\"libvirt-1-storage-auth-secret0\",\"data\":\"us/rqTgpkEOPr1e80IYYxNi8jF+jipoTDjYt15m4hho=\",\"keyid\":\"masterKey0\",\"iv\":\"aOqQCrTLnlRcP7tTW3+8PQ==\",\"format\":\"base64\"}" -blockdev "{\"driver\":\"rbd\",\"pool\":\"vms\",\"image\":\"839e8e64-64a9-4e35-85dd-cdbb7f8e71c5_disk.config\",\"server\":[{\"host\":\"192.168.122.100\",\"port\":\"6789\"},{\"host\":\"192.168.122.102\",\"port\":\"6789\"},{\"host\":\"192.168.122.101\",\"port\":\"6789\"}],\"user\":\"openstack\",\"auth-client-required\":[\"cephx\",\"none\"],\"key-secret\":\"libvirt-1-storage-auth-secret0\",\"node-name\":\"libvirt-1-storage\",\"read-only\":true,\"cache\":{\"direct\":true,\"no-flush\":false}}" -device "{\"driver\":\"scsi-cd\",\"bus\":\"scsi0.0\",\"channel\":0,\"scsi-id\":0,\"lun\":1,\"device_id\":\"drive-scsi0-0-0-1\",\"drive\":\"libvirt-1-storage\",\"id\":\"scsi0-0-0-1\",\"write-cache\":\"on\"}" -netdev "{\"type\":\"tap\",\"fd\":\"28\",\"vhost\":true,\"vhostfd\":\"30\",\"id\":\"hostnet0\"}" -device "{\"driver\":\"virtio-net-pci\",\"rx_queue_size\":512,\"host_mtu\":1442,\"netdev\":\"hostnet0\",\"id\":\"net0\",\"mac\":\"fa:16:3e:35:f2:b5\",\"bus\":\"pci.2\",\"addr\":\"0x0\"}" -add-fd set=0,fd=27,opaque=serial0-log -chardev pty,id=charserial0,logfile=/dev/fdset/0,logappend=on -device "{\"driver\":\"isa-serial\",\"chardev\":\"charserial0\",\"id\":\"serial0\",\"index\":0}" -device "{\"driver\":\"usb-tablet\",\"id\":\"input0\",\"bus\":\"usb.0\",\"port\":\"1\"}" -audiodev "{\"id\":\"audio1\",\"driver\":\"none\"}" -object "{\"qom-type\":\"tls-creds-x509\",\"id\":\"vnc-tls-creds0\",\"dir\":\"/etc/pki/qemu\",\"endpoint\":\"server\",\"verify-peer\":true}" -vnc "[::0]:0,tls-creds=vnc-tls-creds0,audiodev=audio1" -device "{\"driver\":\"virtio-vga\",\"id\":\"video0\",\"max_outputs\":1,\"bus\":\"pcie.0\",\"addr\":\"0x1\"}" -global ICH9-LPC.noreboot=off -watchdog-action reset -device "{\"driver\":\"virtio-balloon-pci\",\"id\":\"balloon0\",\"bus\":\"pci.4\",\"addr\":\"0x0\"}" -object "{\"qom-type\":\"rng-random\",\"id\":\"objrng0\",\"filename\":\"/dev/urandom\"}" -device "{\"driver\":\"virtio-rng-pci\",\"rng\":\"objrng0\",\"id\":\"rng0\",\"bus\":\"pci.5\",\"addr\":\"0x0\"}" -device "{\"driver\":\"vmcoreinfo\"}" -sandbox on,obsolete=deny,elevateprivileges=deny,spawn=deny,resourcecontrol=deny -msg timestamp=on
           ├─system.slice
           │ ├─NetworkManager.service
           │ │ └─49000 /usr/sbin/NetworkManager --no-daemon
           │ ├─auditd.service
           │ │ ├─699 /sbin/auditd
           │ │ └─701 /usr/sbin/sedispatch
           │ ├─chronyd.service
           │ │ └─58561 /usr/sbin/chronyd -F 2
           │ ├─crond.service
           │ │ └─1006 /usr/sbin/crond -n
           │ ├─dbus-broker.service
           │ │ ├─760 /usr/bin/dbus-broker-launch --scope system --audit
           │ │ └─772 dbus-broker --log 4 --controller 9 --machine-id 85ac68c10a6e7ae08ceb898dbdca0cb5 --max-bytes 536870912 --max-fds 4096 --max-matches 131072 --audit
           │ ├─edpm_nova_compute.service
           │ │ └─226433 /usr/bin/conmon --api-version 1 -c 572ffe12c89ef3d651b3d5a5d0d084d01048037ddf29c596a9682c34d685f649 -u 572ffe12c89ef3d651b3d5a5d0d084d01048037ddf29c596a9682c34d685f649 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/572ffe12c89ef3d651b3d5a5d0d084d01048037ddf29c596a9682c34d685f649/userdata -p /run/containers/storage/overlay-containers/572ffe12c89ef3d651b3d5a5d0d084d01048037ddf29c596a9682c34d685f649/userdata/pidfile -n nova_compute --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/572ffe12c89ef3d651b3d5a5d0d084d01048037ddf29c596a9682c34d685f649 --full-attach -s -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/572ffe12c89ef3d651b3d5a5d0d084d01048037ddf29c596a9682c34d685f649/userdata/oci-log --conmon-pidfile /run/nova_compute.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg 572ffe12c89ef3d651b3d5a5d0d084d01048037ddf29c596a9682c34d685f649
           │ ├─edpm_ovn_controller.service
           │ │ └─133156 /usr/bin/conmon --api-version 1 -c 8eec14eed05eebd169934b14ad23738a7c696fad3b7be75ce9a652966539c356 -u 8eec14eed05eebd169934b14ad23738a7c696fad3b7be75ce9a652966539c356 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/8eec14eed05eebd169934b14ad23738a7c696fad3b7be75ce9a652966539c356/userdata -p /run/containers/storage/overlay-containers/8eec14eed05eebd169934b14ad23738a7c696fad3b7be75ce9a652966539c356/userdata/pidfile -n ovn_controller --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/8eec14eed05eebd169934b14ad23738a7c696fad3b7be75ce9a652966539c356 --full-attach -s -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/8eec14eed05eebd169934b14ad23738a7c696fad3b7be75ce9a652966539c356/userdata/oci-log --conmon-pidfile /run/ovn_controller.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg 8eec14eed05eebd169934b14ad23738a7c696fad3b7be75ce9a652966539c356
           │ ├─edpm_ovn_metadata_agent.service
           │ │ └─143492 /usr/bin/conmon --api-version 1 -c 65cda04b9c9e71d648ab5510147314c4de15a37ca8d4a48196c50c9ad6ccb44d -u 65cda04b9c9e71d648ab5510147314c4de15a37ca8d4a48196c50c9ad6ccb44d -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/65cda04b9c9e71d648ab5510147314c4de15a37ca8d4a48196c50c9ad6ccb44d/userdata -p /run/containers/storage/overlay-containers/65cda04b9c9e71d648ab5510147314c4de15a37ca8d4a48196c50c9ad6ccb44d/userdata/pidfile -n ovn_metadata_agent --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/65cda04b9c9e71d648ab5510147314c4de15a37ca8d4a48196c50c9ad6ccb44d --full-attach -s -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/65cda04b9c9e71d648ab5510147314c4de15a37ca8d4a48196c50c9ad6ccb44d/userdata/oci-log --conmon-pidfile /run/ovn_metadata_agent.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg 65cda04b9c9e71d648ab5510147314c4de15a37ca8d4a48196c50c9ad6ccb44d
           │ ├─gssproxy.service
           │ │ └─868 /usr/sbin/gssproxy -D
           │ ├─irqbalance.service
           │ │ └─785 /usr/sbin/irqbalance
           │ ├─iscsid.service
           │ │ └─211449 /usr/sbin/iscsid -f
           │ ├─multipathd.service
           │ │ └─211608 /sbin/multipathd -d -s
           │ ├─ovs-vswitchd.service
           │ │ └─47295 ovs-vswitchd unix:/var/run/openvswitch/db.sock -vconsole:emer -vsyslog:err -vfile:info --mlockall --user openvswitch:hugetlbfs --no-chdir --log-file=/var/log/openvswitch/ovs-vswitchd.log --pidfile=/var/run/openvswitch/ovs-vswitchd.pid --detach
           │ ├─ovsdb-server.service
           │ │ └─47215 ovsdb-server /etc/openvswitch/conf.db -vconsole:emer -vsyslog:err -vfile:info --remote=punix:/var/run/openvswitch/db.sock --private-key=db:Open_vSwitch,SSL,private_key --certificate=db:Open_vSwitch,SSL,certificate --bootstrap-ca-cert=db:Open_vSwitch,SSL,ca_cert --user openvswitch:hugetlbfs --no-chdir --log-file=/var/log/openvswitch/ovsdb-server.log --pidfile=/var/run/openvswitch/ovsdb-server.pid --detach
           │ ├─polkit.service
           │ │ └─43481 /usr/lib/polkit-1/polkitd --no-debug
           │ ├─rpcbind.service
           │ │ └─697 /usr/bin/rpcbind -w -f
           │ ├─rsyslog.service
           │ │ └─1002 /usr/sbin/rsyslogd -n
           │ ├─sshd.service
           │ │ └─169467 "sshd: /usr/sbin/sshd -D [listener] 0 of 10-100 startups"
           │ ├─system-ceph\x2d088fe176\x2d0106\x2d5401\x2d803c\x2d2da38b73b76a.slice
           │ │ ├─ceph-088fe176-0106-5401-803c-2da38b73b76a@crash.compute-2.service
           │ │ │ ├─libpod-payload-52f09a99f1b294dc32194bfc1ab7f2d1320bd9205c0632fb77a4b4dfb25dbf93
           │ │ │ │ ├─77794 /run/podman-init -- /usr/bin/ceph-crash -n client.crash.compute-2
           │ │ │ │ └─77796 /usr/bin/python3 -s /usr/bin/ceph-crash -n client.crash.compute-2
           │ │ │ └─runtime
           │ │ │   └─77792 /usr/bin/conmon --api-version 1 -c 52f09a99f1b294dc32194bfc1ab7f2d1320bd9205c0632fb77a4b4dfb25dbf93 -u 52f09a99f1b294dc32194bfc1ab7f2d1320bd9205c0632fb77a4b4dfb25dbf93 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/52f09a99f1b294dc32194bfc1ab7f2d1320bd9205c0632fb77a4b4dfb25dbf93/userdata -p /run/containers/storage/overlay-containers/52f09a99f1b294dc32194bfc1ab7f2d1320bd9205c0632fb77a4b4dfb25dbf93/userdata/pidfile -n ceph-088fe176-0106-5401-803c-2da38b73b76a-crash-compute-2 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/52f09a99f1b294dc32194bfc1ab7f2d1320bd9205c0632fb77a4b4dfb25dbf93 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/52f09a99f1b294dc32194bfc1ab7f2d1320bd9205c0632fb77a4b4dfb25dbf93/userdata/oci-log --conmon-pidfile /run/ceph-088fe176-0106-5401-803c-2da38b73b76a@crash.compute-2.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 52f09a99f1b294dc32194bfc1ab7f2d1320bd9205c0632fb77a4b4dfb25dbf93
           │ │ ├─ceph-088fe176-0106-5401-803c-2da38b73b76a@haproxy.rgw.default.compute-2.zogxki.service
           │ │ │ ├─libpod-payload-ff608106d7c871852a462621c4b38466f0a089e42add90baa06df30604a36e5f
           │ │ │ │ ├─82540 /run/podman-init -- docker-entrypoint.sh haproxy -f /var/lib/haproxy/haproxy.cfg
           │ │ │ │ ├─82542 haproxy -W -db -f /var/lib/haproxy/haproxy.cfg
           │ │ │ │ └─82544 haproxy -W -db -f /var/lib/haproxy/haproxy.cfg
           │ │ │ └─runtime
           │ │ │   └─82538 /usr/bin/conmon --api-version 1 -c ff608106d7c871852a462621c4b38466f0a089e42add90baa06df30604a36e5f -u ff608106d7c871852a462621c4b38466f0a089e42add90baa06df30604a36e5f -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/ff608106d7c871852a462621c4b38466f0a089e42add90baa06df30604a36e5f/userdata -p /run/containers/storage/overlay-containers/ff608106d7c871852a462621c4b38466f0a089e42add90baa06df30604a36e5f/userdata/pidfile -n ceph-088fe176-0106-5401-803c-2da38b73b76a-haproxy-rgw-default-compute-2-zogxki --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/ff608106d7c871852a462621c4b38466f0a089e42add90baa06df30604a36e5f --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/ff608106d7c871852a462621c4b38466f0a089e42add90baa06df30604a36e5f/userdata/oci-log --conmon-pidfile /run/ceph-088fe176-0106-5401-803c-2da38b73b76a@haproxy.rgw.default.compute-2.zogxki.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg ff608106d7c871852a462621c4b38466f0a089e42add90baa06df30604a36e5f
           │ │ ├─ceph-088fe176-0106-5401-803c-2da38b73b76a@keepalived.rgw.default.compute-2.xbsrtt.service
           │ │ │ ├─libpod-payload-6667054e4ceb45c7be5e11486852d0790d9219015e6bca7cdf08e071806b9af4
           │ │ │ │ ├─82994 /run/podman-init -- ./init.sh
           │ │ │ │ ├─82996 /usr/sbin/keepalived -n -l -f /etc/keepalived/keepalived.conf
           │ │ │ │ └─82998 /usr/sbin/keepalived -n -l -f /etc/keepalived/keepalived.conf
           │ │ │ └─runtime
           │ │ │   └─82992 /usr/bin/conmon --api-version 1 -c 6667054e4ceb45c7be5e11486852d0790d9219015e6bca7cdf08e071806b9af4 -u 6667054e4ceb45c7be5e11486852d0790d9219015e6bca7cdf08e071806b9af4 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/6667054e4ceb45c7be5e11486852d0790d9219015e6bca7cdf08e071806b9af4/userdata -p /run/containers/storage/overlay-containers/6667054e4ceb45c7be5e11486852d0790d9219015e6bca7cdf08e071806b9af4/userdata/pidfile -n ceph-088fe176-0106-5401-803c-2da38b73b76a-keepalived-rgw-default-compute-2-xbsrtt --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/6667054e4ceb45c7be5e11486852d0790d9219015e6bca7cdf08e071806b9af4 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/6667054e4ceb45c7be5e11486852d0790d9219015e6bca7cdf08e071806b9af4/userdata/oci-log --conmon-pidfile /run/ceph-088fe176-0106-5401-803c-2da38b73b76a@keepalived.rgw.default.compute-2.xbsrtt.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 6667054e4ceb45c7be5e11486852d0790d9219015e6bca7cdf08e071806b9af4
           │ │ ├─ceph-088fe176-0106-5401-803c-2da38b73b76a@mds.cephfs.compute-2.zycvef.service
           │ │ │ ├─libpod-payload-28402c8a6e0adf22561a923d42802647af00df10eacceb300a94fe8b5f18bf63
           │ │ │ │ ├─81152 /run/podman-init -- /usr/bin/ceph-mds -n mds.cephfs.compute-2.zycvef -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
           │ │ │ │ └─81154 /usr/bin/ceph-mds -n mds.cephfs.compute-2.zycvef -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
           │ │ │ └─runtime
           │ │ │   └─81150 /usr/bin/conmon --api-version 1 -c 28402c8a6e0adf22561a923d42802647af00df10eacceb300a94fe8b5f18bf63 -u 28402c8a6e0adf22561a923d42802647af00df10eacceb300a94fe8b5f18bf63 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/28402c8a6e0adf22561a923d42802647af00df10eacceb300a94fe8b5f18bf63/userdata -p /run/containers/storage/overlay-containers/28402c8a6e0adf22561a923d42802647af00df10eacceb300a94fe8b5f18bf63/userdata/pidfile -n ceph-088fe176-0106-5401-803c-2da38b73b76a-mds-cephfs-compute-2-zycvef --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/28402c8a6e0adf22561a923d42802647af00df10eacceb300a94fe8b5f18bf63 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/28402c8a6e0adf22561a923d42802647af00df10eacceb300a94fe8b5f18bf63/userdata/oci-log --conmon-pidfile /run/ceph-088fe176-0106-5401-803c-2da38b73b76a@mds.cephfs.compute-2.zycvef.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 28402c8a6e0adf22561a923d42802647af00df10eacceb300a94fe8b5f18bf63
           │ │ ├─ceph-088fe176-0106-5401-803c-2da38b73b76a@mgr.compute-2.tjdsdx.service
           │ │ │ ├─libpod-payload-3f48eeed4688717dc1b70b826cbb76219abc8f1d02edfa4f514b989747c1506f
           │ │ │ │ ├─77436 /run/podman-init -- /usr/bin/ceph-mgr -n mgr.compute-2.tjdsdx -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
           │ │ │ │ └─77438 /usr/bin/ceph-mgr -n mgr.compute-2.tjdsdx -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
           │ │ │ └─runtime
           │ │ │   └─77434 /usr/bin/conmon --api-version 1 -c 3f48eeed4688717dc1b70b826cbb76219abc8f1d02edfa4f514b989747c1506f -u 3f48eeed4688717dc1b70b826cbb76219abc8f1d02edfa4f514b989747c1506f -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/3f48eeed4688717dc1b70b826cbb76219abc8f1d02edfa4f514b989747c1506f/userdata -p /run/containers/storage/overlay-containers/3f48eeed4688717dc1b70b826cbb76219abc8f1d02edfa4f514b989747c1506f/userdata/pidfile -n ceph-088fe176-0106-5401-803c-2da38b73b76a-mgr-compute-2-tjdsdx --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/3f48eeed4688717dc1b70b826cbb76219abc8f1d02edfa4f514b989747c1506f --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/3f48eeed4688717dc1b70b826cbb76219abc8f1d02edfa4f514b989747c1506f/userdata/oci-log --conmon-pidfile /run/ceph-088fe176-0106-5401-803c-2da38b73b76a@mgr.compute-2.tjdsdx.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 3f48eeed4688717dc1b70b826cbb76219abc8f1d02edfa4f514b989747c1506f
           │ │ ├─ceph-088fe176-0106-5401-803c-2da38b73b76a@mon.compute-2.service
           │ │ │ ├─libpod-payload-ad3fee4799b44f9e04b5aa9968630e9af6ffd410d7fc49c4207495984cf6bca6
           │ │ │ │ ├─77079 /run/podman-init -- /usr/bin/ceph-mon -n mon.compute-2 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false
           │ │ │ │ └─77081 /usr/bin/ceph-mon -n mon.compute-2 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false
           │ │ │ └─runtime
           │ │ │   └─77077 /usr/bin/conmon --api-version 1 -c ad3fee4799b44f9e04b5aa9968630e9af6ffd410d7fc49c4207495984cf6bca6 -u ad3fee4799b44f9e04b5aa9968630e9af6ffd410d7fc49c4207495984cf6bca6 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/ad3fee4799b44f9e04b5aa9968630e9af6ffd410d7fc49c4207495984cf6bca6/userdata -p /run/containers/storage/overlay-containers/ad3fee4799b44f9e04b5aa9968630e9af6ffd410d7fc49c4207495984cf6bca6/userdata/pidfile -n ceph-088fe176-0106-5401-803c-2da38b73b76a-mon-compute-2 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/ad3fee4799b44f9e04b5aa9968630e9af6ffd410d7fc49c4207495984cf6bca6 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/ad3fee4799b44f9e04b5aa9968630e9af6ffd410d7fc49c4207495984cf6bca6/userdata/oci-log --conmon-pidfile /run/ceph-088fe176-0106-5401-803c-2da38b73b76a@mon.compute-2.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg ad3fee4799b44f9e04b5aa9968630e9af6ffd410d7fc49c4207495984cf6bca6
           │ │ ├─ceph-088fe176-0106-5401-803c-2da38b73b76a@osd.2.service
           │ │ │ ├─libpod-payload-1f90ecb4fcc015bd1f2f979a5a563080acb2d28030758941d6958f2336c7101d
           │ │ │ │ ├─79777 /run/podman-init -- /usr/bin/ceph-osd -n osd.2 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
           │ │ │ │ └─79779 /usr/bin/ceph-osd -n osd.2 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
           │ │ │ └─runtime
           │ │ │   └─79775 /usr/bin/conmon --api-version 1 -c 1f90ecb4fcc015bd1f2f979a5a563080acb2d28030758941d6958f2336c7101d -u 1f90ecb4fcc015bd1f2f979a5a563080acb2d28030758941d6958f2336c7101d -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/1f90ecb4fcc015bd1f2f979a5a563080acb2d28030758941d6958f2336c7101d/userdata -p /run/containers/storage/overlay-containers/1f90ecb4fcc015bd1f2f979a5a563080acb2d28030758941d6958f2336c7101d/userdata/pidfile -n ceph-088fe176-0106-5401-803c-2da38b73b76a-osd-2 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/1f90ecb4fcc015bd1f2f979a5a563080acb2d28030758941d6958f2336c7101d --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/1f90ecb4fcc015bd1f2f979a5a563080acb2d28030758941d6958f2336c7101d/userdata/oci-log --conmon-pidfile /run/ceph-088fe176-0106-5401-803c-2da38b73b76a@osd.2.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 1f90ecb4fcc015bd1f2f979a5a563080acb2d28030758941d6958f2336c7101d
           │ │ └─ceph-088fe176-0106-5401-803c-2da38b73b76a@rgw.rgw.compute-2.gfsxzw.service
           │ │   ├─libpod-payload-49e687254f675aca5071ee91f471edf46c03564ea189efa6346b4d0c66cd7dc0
           │ │   │ ├─80767 /run/podman-init -- /usr/bin/radosgw -n client.rgw.rgw.compute-2.gfsxzw -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
           │ │   │ └─80769 /usr/bin/radosgw -n client.rgw.rgw.compute-2.gfsxzw -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
           │ │   └─runtime
           │ │     └─80765 /usr/bin/conmon --api-version 1 -c 49e687254f675aca5071ee91f471edf46c03564ea189efa6346b4d0c66cd7dc0 -u 49e687254f675aca5071ee91f471edf46c03564ea189efa6346b4d0c66cd7dc0 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/49e687254f675aca5071ee91f471edf46c03564ea189efa6346b4d0c66cd7dc0/userdata -p /run/containers/storage/overlay-containers/49e687254f675aca5071ee91f471edf46c03564ea189efa6346b4d0c66cd7dc0/userdata/pidfile -n ceph-088fe176-0106-5401-803c-2da38b73b76a-rgw-rgw-compute-2-gfsxzw --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/49e687254f675aca5071ee91f471edf46c03564ea189efa6346b4d0c66cd7dc0 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/49e687254f675aca5071ee91f471edf46c03564ea189efa6346b4d0c66cd7dc0/userdata/oci-log --conmon-pidfile /run/ceph-088fe176-0106-5401-803c-2da38b73b76a@rgw.rgw.compute-2.gfsxzw.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 49e687254f675aca5071ee91f471edf46c03564ea189efa6346b4d0c66cd7dc0
           │ ├─system-getty.slice
           │ │ └─getty@tty1.service
           │ │   └─1007 /sbin/agetty -o "-p -- \\u" --noclear - linux
           │ ├─system-serial\x2dgetty.slice
           │ │ └─serial-getty@ttyS0.service
           │ │   └─1009 /sbin/agetty -o "-p -- \\u" --keep-baud 115200,57600,38400,9600 - vt220
           │ ├─systemd-hostnamed.service
           │ │ └─294012 /usr/lib/systemd/systemd-hostnamed
           │ ├─systemd-journald.service
           │ │ └─675 /usr/lib/systemd/systemd-journald
           │ ├─systemd-logind.service
           │ │ └─787 /usr/lib/systemd/systemd-logind
           │ ├─systemd-machined.service
           │ │ └─194970 /usr/lib/systemd/systemd-machined
           │ ├─systemd-udevd.service
           │ │ └─udev
           │ │   └─727 /usr/lib/systemd/systemd-udevd
           │ ├─tuned.service
           │ │ └─92450 /usr/bin/python3 -Es /usr/sbin/tuned -l -P
           │ ├─virtlogd.service
           │ │ └─194338 /usr/sbin/virtlogd
           │ ├─virtnodedevd.service
           │ │ └─226244 /usr/sbin/virtnodedevd --timeout 120
           │ ├─virtqemud.service
           │ │ └─225907 /usr/sbin/virtqemud --timeout 120
           │ └─virtsecretd.service
           │   └─248555 /usr/sbin/virtsecretd --timeout 120
           └─user.slice
             ├─user-1000.slice
             │ ├─session-1.scope
             │ │ └─4517 /usr/bin/python3
             │ ├─session-51.scope
             │ │ ├─291623 "sshd-session: zuul [priv]"
             │ │ ├─291627 "sshd-session: zuul@notty"
             │ │ ├─291628 sudo bash -c "rm -rf /var/tmp/sos-osp && mkdir /var/tmp/sos-osp && sos report --batch --all-logs --tmp-dir=/var/tmp/sos-osp  -p container,openstack_edpm,system,storage,virt"
             │ │ ├─291652 /usr/bin/python3 -s /sbin/sos report --batch --all-logs --tmp-dir=/var/tmp/sos-osp -p container,openstack_edpm,system,storage,virt
             │ │ ├─297596 timeout 15s turbostat --debug sleep 10
             │ │ ├─298073 timeout 300s ceph osd pool autoscale-status --format json-pretty
             │ │ ├─298074 /usr/bin/python3 -s /usr/bin/ceph osd pool autoscale-status --format json-pretty
             │ │ ├─298090 timeout 300s ipcs
             │ │ ├─298091 timeout 300s systemctl status --all
             │ │ ├─298092 timeout 300s ipcs
             │ │ └─298093 systemctl status --all
             │ └─user@1000.service
             │   ├─app.slice
             │   │ └─dbus-broker.service
             │   │   ├─11936 /usr/bin/dbus-broker-launch --scope user
             │   │   └─11948 dbus-broker --log 4 --controller 9 --machine-id 85ac68c10a6e7ae08ceb898dbdca0cb5 --max-bytes 100000000000000 --max-fds 25000000000000 --max-matches 5000000000
             │   ├─init.scope
             │   │ ├─4305 /usr/lib/systemd/systemd --user
             │   │ └─4307 "(sd-pam)"
             │   └─user.slice
             │     └─podman-pause-3b1c51bd.scope
             │       └─11838 catatonit -P
             └─user-42477.slice
               ├─session-20.scope
               │ ├─72606 "sshd-session: ceph-admin [priv]"
               │ └─72628 "sshd-session: ceph-admin"
               ├─session-22.scope
               │ ├─72623 "sshd-session: ceph-admin [priv]"
               │ └─72629 "sshd-session: ceph-admin@notty"
               ├─session-23.scope
               │ ├─72680 "sshd-session: ceph-admin [priv]"
               │ └─72683 "sshd-session: ceph-admin@notty"
               ├─session-24.scope
               │ ├─72734 "sshd-session: ceph-admin [priv]"
               │ └─72737 "sshd-session: ceph-admin@notty"
               ├─session-25.scope
               │ ├─72788 "sshd-session: ceph-admin [priv]"
               │ └─72791 "sshd-session: ceph-admin@notty"
               ├─session-26.scope
               │ ├─72842 "sshd-session: ceph-admin [priv]"
               │ └─72845 "sshd-session: ceph-admin@notty"
               ├─session-27.scope
               │ ├─72896 "sshd-session: ceph-admin [priv]"
               │ └─72899 "sshd-session: ceph-admin@notty"
               ├─session-28.scope
               │ ├─72950 "sshd-session: ceph-admin [priv]"
               │ └─72953 "sshd-session: ceph-admin@notty"
               ├─session-29.scope
               │ ├─73004 "sshd-session: ceph-admin [priv]"
               │ └─73007 "sshd-session: ceph-admin@notty"
               ├─session-30.scope
               │ ├─73058 "sshd-session: ceph-admin [priv]"
               │ └─73061 "sshd-session: ceph-admin@notty"
               ├─session-31.scope
               │ ├─73085 "sshd-session: ceph-admin [priv]"
               │ └Unit boot.automount could not be found.
─73088 "sshd-session: ceph-admin@notty"
               ├─session-32.scope
               │ ├─73139 "sshd-session: ceph-admin [priv]"
               │ └─73142 "sshd-session: ceph-admin@notty"
               └─user@42477.service
                 └─init.scope
                   ├─72610 /usr/lib/systemd/systemd --user
                   └─72612 "(sd-pam)"

● proc-sys-fs-binfmt_misc.automount - Arbitrary Executable File Formats File System Automount Point
     Loaded: loaded (/usr/lib/systemd/system/proc-sys-fs-binfmt_misc.automount; static)
     Active: active (running) since Thu 2026-01-22 12:49:14 UTC; 2h 56min ago
      Until: Thu 2026-01-22 12:49:14 UTC; 2h 56min ago
   Triggers: ● proc-sys-fs-binfmt_misc.mount
      Where: /proc/sys/fs/binfmt_misc
       Docs: https://docs.kernel.org/admin-guide/binfmt-misc.html
             https://www.freedesktop.org/wiki/Software/systemd/APIFileSystems

Jan 22 13:34:37 compute-2 systemd[1]: proc-sys-fs-binfmt_misc.automount: Got automount request for /proc/sys/fs/binfmt_misc, triggered by 73633 (sysctl)

● dev-cdrom.device - QEMU_DVD-ROM config-2
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:01.1-ata1-host0-target0:0:0-0:0:0:0-block-sr0.device
     Loaded: loaded
     Active: active (plugged) since Thu 2026-01-22 12:49:15 UTC; 2h 56min ago
      Until: Thu 2026-01-22 12:49:15 UTC; 2h 56min ago
     Device: /sys/devices/pci0000:00/0000:00:01.1/ata1/host0/target0:0:0/0:0:0:0/block/sr0

● dev-ceph_vg0-ceph_lv0.device - /dev/ceph_vg0/ceph_lv0
    Follows: unit currently follows state of sys-devices-virtual-block-dm\x2d0.device
     Loaded: loaded
     Active: active (plugged) since Thu 2026-01-22 13:31:00 UTC; 2h 15min ago
      Until: Thu 2026-01-22 13:31:00 UTC; 2h 15min ago
     Device: /sys/devices/virtual/block/dm-0

● dev-disk-by\x2ddiskseq-1.device - /dev/disk/by-diskseq/1
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:04.0-virtio2-block-vda.device
     Loaded: loaded
     Active: active (plugged) since Thu 2026-01-22 12:49:15 UTC; 2h 56min ago
      Until: Thu 2026-01-22 12:49:15 UTC; 2h 56min ago
     Device: /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda

● dev-disk-by\x2ddiskseq-3.device - QEMU_DVD-ROM config-2
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:01.1-ata1-host0-target0:0:0-0:0:0:0-block-sr0.device
     Loaded: loaded
     Active: active (plugged) since Thu 2026-01-22 12:49:15 UTC; 2h 56min ago
      Until: Thu 2026-01-22 12:49:15 UTC; 2h 56min ago
     Device: /sys/devices/pci0000:00/0000:00:01.1/ata1/host0/target0:0:0/0:0:0:0/block/sr0

● dev-disk-by\x2ddiskseq-5.device - /dev/disk/by-diskseq/5
    Follows: unit currently follows state of sys-devices-virtual-block-loop3.device
     Loaded: loaded
     Active: active (plugged) since Thu 2026-01-22 13:30:59 UTC; 2h 15min ago
      Until: Thu 2026-01-22 13:30:59 UTC; 2h 15min ago
     Device: /sys/devices/virtual/block/loop3

● dev-disk-by\x2ddiskseq-6.device - /dev/disk/by-diskseq/6
    Follows: unit currently follows state of sys-devices-virtual-block-dm\x2d0.device
     Loaded: loaded
     Active: active (plugged) since Thu 2026-01-22 13:31:00 UTC; 2h 15min ago
      Until: Thu 2026-01-22 13:31:00 UTC; 2h 15min ago
     Device: /sys/devices/virtual/block/dm-0

● dev-disk-by\x2did-ata\x2dQEMU_DVD\x2dROM_QM00001.device - QEMU_DVD-ROM config-2
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:01.1-ata1-host0-target0:0:0-0:0:0:0-block-sr0.device
     Loaded: loaded
     Active: active (plugged) since Thu 2026-01-22 12:49:15 UTC; 2h 56min ago
      Until: Thu 2026-01-22 12:49:15 UTC; 2h 56min ago
     Device: /sys/devices/pci0000:00/0000:00:01.1/ata1/host0/target0:0:0/0:0:0:0/block/sr0

● dev-disk-by\x2did-dm\x2dname\x2dceph_vg0\x2dceph_lv0.device - /dev/disk/by-id/dm-name-ceph_vg0-ceph_lv0
    Follows: unit currently follows state of sys-devices-virtual-block-dm\x2d0.device
     Loaded: loaded
     Active: active (plugged) since Thu 2026-01-22 13:31:00 UTC; 2h 15min ago
      Until: Thu 2026-01-22 13:31:00 UTC; 2h 15min ago
     Device: /sys/devices/virtual/block/dm-0

● dev-disk-by\x2did-dm\x2duuid\x2dLVM\x2dTbs7Mx0tGJBy5eUqgOw94dy5L4cwOVHZjEocwvccRDGQ8s06tXi7z2rzc0cFSAk3.device - /dev/disk/by-id/dm-uuid-LVM-Tbs7Mx0tGJBy5eUqgOw94dy5L4cwOVHZjEocwvccRDGQ8s06tXi7z2rzc0cFSAk3
    Follows: unit currently follows state of sys-devices-virtual-block-dm\x2d0.device
     Loaded: loaded
     Active: active (plugged) since Thu 2026-01-22 13:31:00 UTC; 2h 15min ago
      Until: Thu 2026-01-22 13:31:00 UTC; 2h 15min ago
     Device: /sys/devices/virtual/block/dm-0

● dev-disk-by\x2did-lvm\x2dpv\x2duuid\x2d4eMdqo\x2d8tCe\x2d3u3z\x2d7D1p\x2dB5Na\x2dHpbr\x2dS814FL.device - /dev/disk/by-id/lvm-pv-uuid-4eMdqo-8tCe-3u3z-7D1p-B5Na-Hpbr-S814FL
    Follows: unit currently follows state of sys-devices-virtual-block-loop3.device
     Loaded: loaded
     Active: active (plugged) since Thu 2026-01-22 13:31:00 UTC; 2h 15min ago
      Until: Thu 2026-01-22 13:31:00 UTC; 2h 15min ago
     Device: /sys/devices/virtual/block/loop3

● dev-disk-by\x2dlabel-config\x2d2.device - QEMU_DVD-ROM config-2
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:01.1-ata1-host0-target0:0:0-0:0:0:0-block-sr0.device
     Loaded: loaded
     Active: active (plugged) since Thu 2026-01-22 12:49:15 UTC; 2h 56min ago
      Until: Thu 2026-01-22 12:49:15 UTC; 2h 56min ago
     Device: /sys/devices/pci0000:00/0000:00:01.1/ata1/host0/target0:0:0/0:0:0:0/block/sr0

● dev-disk-by\x2dpartuuid-97d3b354\x2d01.device - /dev/disk/by-partuuid/97d3b354-01
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:04.0-virtio2-block-vda-vda1.device
     Loaded: loaded
     Active: active (plugged) since Thu 2026-01-22 12:49:15 UTC; 2h 56min ago
      Until: Thu 2026-01-22 12:49:15 UTC; 2h 56min ago
     Device: /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda/vda1

● dev-disk-by\x2dpath-pci\x2d0000:00:01.1\x2data\x2d1.0.device - QEMU_DVD-ROM config-2
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:01.1-ata1-host0-target0:0:0-0:0:0:0-block-sr0.device
     Loaded: loaded
     Active: active (plugged) since Thu 2026-01-22 12:49:15 UTC; 2h 56min ago
      Until: Thu 2026-01-22 12:49:15 UTC; 2h 56min ago
     Device: /sys/devices/pci0000:00/0000:00:01.1/ata1/host0/target0:0:0/0:0:0:0/block/sr0

● dev-disk-by\x2dpath-pci\x2d0000:00:01.1\x2data\x2d1.device - QEMU_DVD-ROM config-2
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:01.1-ata1-host0-target0:0:0-0:0:0:0-block-sr0.device
     Loaded: loaded
     Active: active (plugged) since Thu 2026-01-22 12:49:15 UTC; 2h 56min ago
      Until: Thu 2026-01-22 12:49:15 UTC; 2h 56min ago
     Device: /sys/devices/pci0000:00/0000:00:01.1/ata1/host0/target0:0:0/0:0:0:0/block/sr0

● dev-disk-by\x2dpath-pci\x2d0000:00:04.0.device - /dev/disk/by-path/pci-0000:00:04.0
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:04.0-virtio2-block-vda.device
     Loaded: loaded
     Active: active (plugged) since Thu 2026-01-22 12:49:15 UTC; 2h 56min ago
      Until: Thu 2026-01-22 12:49:15 UTC; 2h 56min ago
     Device: /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda

● dev-disk-by\x2dpath-pci\x2d0000:00:04.0\x2dpart1.device - /dev/disk/by-path/pci-0000:00:04.0-part1
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:04.0-virtio2-block-vda-vda1.device
     Loaded: loaded
     Active: active (plugged) since Thu 2026-01-22 12:49:15 UTC; 2h 56min ago
      Until: Thu 2026-01-22 12:49:15 UTC; 2h 56min ago
     Device: /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda/vda1

● dev-disk-by\x2dpath-virtio\x2dpci\x2d0000:00:04.0.device - /dev/disk/by-path/virtio-pci-0000:00:04.0
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:04.0-virtio2-block-vda.device
     Loaded: loaded
     Active: active (plugged) since Thu 2026-01-22 12:49:15 UTC; 2h 56min ago
      Until: Thu 2026-01-22 12:49:15 UTC; 2h 56min ago
     Device: /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda

● dev-disk-by\x2dpath-virtio\x2dpci\x2d0000:00:04.0\x2dpart1.device - /dev/disk/by-path/virtio-pci-0000:00:04.0-part1
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:04.0-virtio2-block-vda-vda1.device
     Loaded: loaded
     Active: active (plugged) since Thu 2026-01-22 12:49:15 UTC; 2h 56min ago
      Until: Thu 2026-01-22 12:49:15 UTC; 2h 56min ago
     Device: /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda/vda1

● dev-disk-by\x2duuid-2026\x2d01\x2d22\x2d12\x2d49\x2d00\x2d00.device - QEMU_DVD-ROM config-2
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:01.1-ata1-host0-target0:0:0-0:0:0:0-block-sr0.device
     Loaded: loaded
     Active: active (plugged) since Thu 2026-01-22 12:49:15 UTC; 2h 56min ago
      Until: Thu 2026-01-22 12:49:15 UTC; 2h 56min ago
     Device: /sys/devices/pci0000:00/0000:00:01.1/ata1/host0/target0:0:0/0:0:0:0/block/sr0

● dev-disk-by\x2duuid-22ac9141\x2d3960\x2d4912\x2db20e\x2d19fc8a328d40.device - /dev/disk/by-uuid/22ac9141-3960-4912-b20e-19fc8a328d40
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:04.0-virtio2-block-vda-vda1.device
     Loaded: loaded
     Active: active (plugged) since Thu 2026-01-22 12:49:15 UTC; 2h 56min ago
      Until: Thu 2026-01-22 12:49:15 UTC; 2h 56min ago
     Device: /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda/vda1

Jan 22 12:49:12 localhost systemd[1]: Found device /dev/disk/by-uuid/22ac9141-3960-4912-b20e-19fc8a328d40.

● dev-dm\x2d0.device - /dev/dm-0
    Follows: unit currently follows state of sys-devices-virtual-block-dm\x2d0.device
     Loaded: loaded
     Active: active (plugged) since Thu 2026-01-22 13:31:00 UTC; 2h 15min ago
      Until: Thu 2026-01-22 13:31:00 UTC; 2h 15min ago
     Device: /sys/devices/virtual/block/dm-0

● dev-loop3.device - /dev/loop3
    Follows: unit currently follows state of sys-devices-virtual-block-loop3.device
     Loaded: loaded
     Active: active (plugged) since Thu 2026-01-22 13:30:59 UTC; 2h 15min ago
      Until: Thu 2026-01-22 13:30:59 UTC; 2h 15min ago
     Device: /sys/devices/virtual/block/loop3

● dev-mapper-ceph_vg0\x2dceph_lv0.device - /dev/mapper/ceph_vg0-ceph_lv0
    Follows: unit currently follows state of sys-devices-virtual-block-dm\x2d0.device
     Loaded: loaded
     Active: active (plugged) since Thu 2026-01-22 13:31:00 UTC; 2h 15min ago
      Until: Thu 2026-01-22 13:31:00 UTC; 2h 15min ago
     Device: /sys/devices/virtual/block/dm-0

● dev-rfkill.device - /dev/rfkill
    Follows: unit currently follows state of sys-devices-virtual-misc-rfkill.device
     Loaded: loaded
     Active: active (plugged) since Thu 2026-01-22 12:49:16 UTC; 2h 56min ago
      Until: Thu 2026-01-22 12:49:16 UTC; 2h 56min ago
     Device: /sys/devices/virtual/misc/rfkill

● dev-sr0.device - QEMU_DVD-ROM config-2
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:01.1-ata1-host0-target0:0:0-0:0:0:0-block-sr0.device
     Loaded: loaded
     Active: active (plugged) since Thu 2026-01-22 12:49:15 UTC; 2h 56min ago
      Until: Thu 2026-01-22 12:49:15 UTC; 2h 56min ago
     Device: /sys/devices/pci0000:00/0000:00:01.1/ata1/host0/target0:0:0/0:0:0:0/block/sr0

● dev-ttyS0.device - /dev/ttyS0
    Follows: unit currently follows state of sys-devices-pnp0-00:00-tty-ttyS0.device
     Loaded: loaded
     Active: active (plugged) since Thu 2026-01-22 12:49:15 UTC; 2h 56min ago
      Until: Thu 2026-01-22 12:49:15 UTC; 2h 56min ago
     Device: /sys/devices/pnp0/00:00/tty/ttyS0

Jan 22 12:49:15 localhost systemd[1]: Condition check resulted in /dev/ttyS0 being skipped.

● dev-ttyS1.device - /dev/ttyS1
    Follows: unit currently follows state of sys-devices-platform-serial8250-tty-ttyS1.device
     Loaded: loaded
     Active: active (plugged) since Thu 2026-01-22 12:49:15 UTC; 2h 56min ago
      Until: Thu 2026-01-22 12:49:15 UTC; 2h 56min ago
     Device: /sys/devices/platform/serial8250/tty/ttyS1

● dev-ttyS2.device - /dev/ttyS2
    Follows: unit currently follows state of sys-devices-platform-serial8250-tty-ttyS2.device
     Loaded: loaded
     Active: active (plugged) since Thu 2026-01-22 12:49:15 UTC; 2h 56min ago
      Until: Thu 2026-01-22 12:49:15 UTC; 2h 56min ago
     Device: /sys/devices/platform/serial8250/tty/ttyS2

● dev-ttyS3.device - /dev/ttyS3
    Follows: unit currently follows state of sys-devices-platform-serial8250-tty-ttyS3.device
     Loaded: loaded
     Active: active (plugged) since Thu 2026-01-22 12:49:15 UTC; 2h 56min ago
      Until: Thu 2026-01-22 12:49:15 UTC; 2h 56min ago
     Device: /sys/devices/platform/serial8250/tty/ttyS3

● dev-vda.device - /dev/vda
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:04.0-virtio2-block-vda.device
     Loaded: loaded
     Active: active (plugged) since Thu 2026-01-22 12:49:15 UTC; 2h 56min ago
      Until: Thu 2026-01-22 12:49:15 UTC; 2h 56min ago
     Device: /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda

● dev-vda1.device - /dev/vda1
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:04.0-virtio2-block-vda-vda1.device
     Loaded: loaded
     Active: active (plugged) since Thu 2026-01-22 12:49:15 UTC; 2h 56min ago
      Until: Thu 2026-01-22 12:49:15 UTC; 2h 56min ago
     Device: /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda/vda1

● sys-devices-pci0000:00-0000:00:01.1-ata1-host0-target0:0:0-0:0:0:0-block-sr0.device - QEMU_DVD-ROM config-2
     Loaded: loaded
     Active: active (plugged) since Thu 2026-01-22 12:49:15 UTC; 2h 56min ago
      Until: Thu 2026-01-22 12:49:15 UTC; 2h 56min ago
     Device: /sys/devices/pci0000:00/0000:00:01.1/ata1/host0/target0:0:0/0:0:0:0/block/sr0

● sys-devices-pci0000:00-0000:00:03.0-virtio1-net-eth0.device - Virtio network device
     Loaded: loaded
     Active: active (plugged) since Thu 2026-01-22 12:49:15 UTC; 2h 56min ago
      Until: Thu 2026-01-22 12:49:15 UTC; 2h 56min ago
     Device: /sys/devices/pci0000:00/0000:00:03.0/virtio1/net/eth0

● sys-devices-pci0000:00-0000:00:04.0-virtio2-block-vda-vda1.device - /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda/vda1
     Loaded: loaded
     Active: active (plugged) since Thu 2026-01-22 12:49:15 UTC; 2h 56min ago
      Until: Thu 2026-01-22 12:49:15 UTC; 2h 56min ago
     Device: /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda/vda1

● sys-devices-pci0000:00-0000:00:04.0-virtio2-block-vda.device - /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda
     Loaded: loaded
     Active: active (plugged) since Thu 2026-01-22 12:49:15 UTC; 2h 56min ago
      Until: Thu 2026-01-22 12:49:15 UTC; 2h 56min ago
     Device: /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda

● sys-devices-pci0000:00-0000:00:07.0-virtio5-net-eth1.device - Virtio network device
     Loaded: loaded
     Active: active (plugged) since Thu 2026-01-22 12:53:20 UTC; 2h 52min ago
      Until: Thu 2026-01-22 12:53:20 UTC; 2h 52min ago
     Device: /sys/devices/pci0000:00/0000:00:07.0/virtio5/net/eth1

● sys-devices-platform-serial8250-tty-ttyS1.device - /sys/devices/platform/serial8250/tty/ttyS1
     Loaded: loaded
     Active: active (plugged) since Thu 2026-01-22 12:49:15 UTC; 2h 56min ago
      Until: Thu 2026-01-22 12:49:15 UTC; 2h 56min ago
     Device: /sys/devices/platform/serial8250/tty/ttyS1

● sys-devices-platform-serial8250-tty-ttyS2.device - /sys/devices/platform/serial8250/tty/ttyS2
     Loaded: loaded
     Active: active (plugged) since Thu 2026-01-22 12:49:15 UTC; 2h 56min ago
      Until: Thu 2026-01-22 12:49:15 UTC; 2h 56min ago
     Device: /sys/devices/platform/serial8250/tty/ttyS2

● sys-devices-platform-serial8250-tty-ttyS3.device - /sys/devices/platform/serial8250/tty/ttyS3
     Loaded: loaded
     Active: active (plugged) since Thu 2026-01-22 12:49:15 UTC; 2h 56min ago
      Until: Thu 2026-01-22 12:49:15 UTC; 2h 56min ago
     Device: /sys/devices/platform/serial8250/tty/ttyS3

● sys-devices-pnp0-00:00-tty-ttyS0.device - /sys/devices/pnp0/00:00/tty/ttyS0
     Loaded: loaded
     Active: active (plugged) since Thu 2026-01-22 12:49:15 UTC; 2h 56min ago
      Until: Thu 2026-01-22 12:49:15 UTC; 2h 56min ago
     Device: /sys/devices/pnp0/00:00/tty/ttyS0

● sys-devices-virtual-block-dm\x2d0.device - /sys/devices/virtual/block/dm-0
     Loaded: loaded
     Active: active (plugged) since Thu 2026-01-22 13:31:00 UTC; 2h 15min ago
      Until: Thu 2026-01-22 13:31:00 UTC; 2h 15min ago
     Device: /sys/devices/virtual/block/dm-0

● sys-devices-virtual-block-loop3.device - /sys/devices/virtual/block/loop3
     Loaded: loaded
     Active: active (plugged) since Thu 2026-01-22 13:30:59 UTC; 2h 15min ago
      Until: Thu 2026-01-22 13:30:59 UTC; 2h 15min ago
     Device: /sys/devices/virtual/block/loop3

● sys-devices-virtual-misc-rfkill.device - /sys/devices/virtual/misc/rfkill
     Loaded: loaded
     Active: active (plugged) since Thu 2026-01-22 12:49:16 UTC; 2h 56min ago
      Until: Thu 2026-01-22 12:49:16 UTC; 2h 56min ago
     Device: /sys/devices/virtual/misc/rfkill

● sys-devices-virtual-net-br\x2dex.device - /sys/devices/virtual/net/br-ex
     Loaded: loaded
     Active: active (plugged) since Thu 2026-01-22 13:27:24 UTC; 2h 18min ago
      Until: Thu 2026-01-22 13:27:24 UTC; 2h 18min ago
     Device: /sys/devices/virtual/net/br-ex

● sys-devices-virtual-net-br\x2dint.device - /sys/devices/virtual/net/br-int
     Loaded: loaded
     Active: active (plugged) since Thu 2026-01-22 13:45:28 UTC; 2h 0min ago
      Until: Thu 2026-01-22 13:45:28 UTC; 2h 0min ago
     Device: /sys/devices/virtual/net/br-int

● sys-devices-virtual-net-genev_sys_6081.device - /sys/devices/virtual/net/genev_sys_6081
     Loaded: loaded
     Active: active (plugged) since Thu 2026-01-22 13:45:28 UTC; 2h 0min ago
      Until: Thu 2026-01-22 13:45:28 UTC; 2h 0min ago
     Device: /sys/devices/virtual/net/genev_sys_6081

● sys-devices-virtual-net-ovs\x2dsystem.device - /sys/devices/virtual/net/ovs-system
     Loaded: loaded
     Active: active (plugged) since Thu 2026-01-22 13:27:24 UTC; 2h 18min ago
      Until: Thu 2026-01-22 13:27:24 UTC; 2h 18min ago
     Device: /sys/devices/virtual/net/ovs-system

● sys-devices-virtual-net-tape581f563\x2d33.device - /sys/devices/virtual/net/tape581f563-33
     Loaded: loaded
     Active: active (plugged) since Thu 2026-01-22 14:34:15 UTC; 1h 11min ago
      Until: Thu 2026-01-22 14:34:15 UTC; 1h 11min ago
     Device: /sys/devices/virtual/net/tape581f563-33

● sys-devices-virtual-net-vlan20.device - /sys/devices/virtual/net/vlan20
     Loaded: loaded
     Active: active (plugged) since Thu 2026-01-22 13:27:25 UTC; 2h 18min ago
      Until: Thu 2026-01-22 13:27:25 UTC; 2h 18min ago
     Device: /sys/devices/virtual/net/vlan20

● sys-devices-virtual-net-vlan21.device - /sys/devices/virtual/net/vlan21
     Loaded: loaded
     Active: active (plugged) since Thu 2026-01-22 13:27:25 UTC; 2h 18min ago
      Until: Thu 2026-01-22 13:27:25 UTC; 2h 18min ago
     Device: /sys/devices/virtual/net/vlan21

● sys-devices-virtual-net-vlan22.device - /sys/devices/virtual/net/vlan22
     Loaded: loaded
     Active: active (plugged) since Thu 2026-01-22 13:27:25 UTC; 2h 18min ago
      Until: Thu 2026-01-22 13:27:25 UTC; 2h 18min ago
     Device: /sys/devices/virtual/net/vlan22

● sys-devices-virtual-net-vlan23.device - /sys/devices/virtual/net/vlan23
     Loaded: loaded
     Active: active (plugged) since Thu 2026-01-22 13:27:25 UTC; 2h 18min ago
      Until: Thu 2026-01-22 13:27:25 UTC; 2h 18min ago
     Device: /sys/devices/virtual/net/vlan23

● sys-module-configfs.device - /sys/module/configfs
     Loaded: loaded
     Active: active (plugged) since Thu 2026-01-22 12:49:15 UTC; 2h 56min ago
      Until: Thu 2026-01-22 12:49:15 UTC; 2h 56min ago
     Device: /sys/module/configfs

● sys-module-fuse.device - /sys/module/fuse
     Loaded: loaded
     Active: active (plugged) since Thu 2026-01-22 12:49:15 UTC; 2h 56min ago
      Until: Thu 2026-01-22 12:49:15 UTC; 2h 56min ago
     Device: /sys/module/fuse

● sys-subsystem-net-devices-br\x2dex.device - /sys/subsystem/net/devices/br-ex
     LoadedUnit boot.mount could not be found.
: loaded
     Active: active (plugged) since Thu 2026-01-22 13:27:24 UTC; 2h 18min ago
      Until: Thu 2026-01-22 13:27:24 UTC; 2h 18min ago
     Device: /sys/devices/virtual/net/br-ex

● sys-subsystem-net-devices-br\x2dint.device - /sys/subsystem/net/devices/br-int
     Loaded: loaded
     Active: active (plugged) since Thu 2026-01-22 13:45:28 UTC; 2h 0min ago
      Until: Thu 2026-01-22 13:45:28 UTC; 2h 0min ago
     Device: /sys/devices/virtual/net/br-int

● sys-subsystem-net-devices-eth0.device - Virtio network device
     Loaded: loaded
     Active: active (plugged) since Thu 2026-01-22 12:49:15 UTC; 2h 56min ago
      Until: Thu 2026-01-22 12:49:15 UTC; 2h 56min ago
     Device: /sys/devices/pci0000:00/0000:00:03.0/virtio1/net/eth0

● sys-subsystem-net-devices-eth1.device - Virtio network device
     Loaded: loaded
     Active: active (plugged) since Thu 2026-01-22 12:53:20 UTC; 2h 52min ago
      Until: Thu 2026-01-22 12:53:20 UTC; 2h 52min ago
     Device: /sys/devices/pci0000:00/0000:00:07.0/virtio5/net/eth1

● sys-subsystem-net-devices-genev_sys_6081.device - /sys/subsystem/net/devices/genev_sys_6081
     Loaded: loaded
     Active: active (plugged) since Thu 2026-01-22 13:45:28 UTC; 2h 0min ago
      Until: Thu 2026-01-22 13:45:28 UTC; 2h 0min ago
     Device: /sys/devices/virtual/net/genev_sys_6081

● sys-subsystem-net-devices-ovs\x2dsystem.device - /sys/subsystem/net/devices/ovs-system
     Loaded: loaded
     Active: active (plugged) since Thu 2026-01-22 13:27:24 UTC; 2h 18min ago
      Until: Thu 2026-01-22 13:27:24 UTC; 2h 18min ago
     Device: /sys/devices/virtual/net/ovs-system

● sys-subsystem-net-devices-tape581f563\x2d33.device - /sys/subsystem/net/devices/tape581f563-33
     Loaded: loaded
     Active: active (plugged) since Thu 2026-01-22 14:34:15 UTC; 1h 11min ago
      Until: Thu 2026-01-22 14:34:15 UTC; 1h 11min ago
     Device: /sys/devices/virtual/net/tape581f563-33

● sys-subsystem-net-devices-vlan20.device - /sys/subsystem/net/devices/vlan20
     Loaded: loaded
     Active: active (plugged) since Thu 2026-01-22 13:27:25 UTC; 2h 18min ago
      Until: Thu 2026-01-22 13:27:25 UTC; 2h 18min ago
     Device: /sys/devices/virtual/net/vlan20

● sys-subsystem-net-devices-vlan21.device - /sys/subsystem/net/devices/vlan21
     Loaded: loaded
     Active: active (plugged) since Thu 2026-01-22 13:27:25 UTC; 2h 18min ago
      Until: Thu 2026-01-22 13:27:25 UTC; 2h 18min ago
     Device: /sys/devices/virtual/net/vlan21

● sys-subsystem-net-devices-vlan22.device - /sys/subsystem/net/devices/vlan22
     Loaded: loaded
     Active: active (plugged) since Thu 2026-01-22 13:27:25 UTC; 2h 18min ago
      Until: Thu 2026-01-22 13:27:25 UTC; 2h 18min ago
     Device: /sys/devices/virtual/net/vlan22

● sys-subsystem-net-devices-vlan23.device - /sys/subsystem/net/devices/vlan23
     Loaded: loaded
     Active: active (plugged) since Thu 2026-01-22 13:27:25 UTC; 2h 18min ago
      Until: Thu 2026-01-22 13:27:25 UTC; 2h 18min ago
     Device: /sys/devices/virtual/net/vlan23

● -.mount - Root Mount
     Loaded: loaded (/etc/fstab; generated)
     Active: active (mounted)
      Where: /
       What: /dev/vda1
       Docs: man:fstab(5)
             man:systemd-fstab-generator(8)

● dev-hugepages.mount - Huge Pages File System
     Loaded: loaded (/usr/lib/systemd/system/dev-hugepages.mount; static)
     Active: active (mounted) since Thu 2026-01-22 12:49:14 UTC; 2h 56min ago
      Until: Thu 2026-01-22 12:49:14 UTC; 2h 56min ago
      Where: /dev/hugepages
       What: hugetlbfs
       Docs: https://docs.kernel.org/admin-guide/mm/hugetlbpage.html
             https://www.freedesktop.org/wiki/Software/systemd/APIFileSystems
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48560)
     Memory: 52.0K (peak: 556.0K)
        CPU: 5ms
     CGroup: /dev-hugepages.mount

● dev-hugepages1G.mount - /dev/hugepages1G
     Loaded: loaded (/etc/fstab; generated)
     Active: active (mounted) since Thu 2026-01-22 13:29:55 UTC; 2h 16min ago
      Until: Thu 2026-01-22 13:29:55 UTC; 2h 16min ago
     Unit home.mount could not be found.
 Where: /dev/hugepages1G
       What: none
       Docs: man:fstab(5)
             man:systemd-fstab-generator(8)

● dev-hugepages2M.mount - /dev/hugepages2M
     Loaded: loaded (/etc/fstab; generated)
     Active: active (mounted) since Thu 2026-01-22 13:29:56 UTC; 2h 16min ago
      Until: Thu 2026-01-22 13:29:56 UTC; 2h 16min ago
      Where: /dev/hugepages2M
       What: none
       Docs: man:fstab(5)
             man:systemd-fstab-generator(8)

● dev-mqueue.mount - POSIX Message Queue File System
     Loaded: loaded (/usr/lib/systemd/system/dev-mqueue.mount; static)
     Active: active (mounted) since Thu 2026-01-22 12:49:14 UTC; 2h 56min ago
      Until: Thu 2026-01-22 12:49:14 UTC; 2h 56min ago
      Where: /dev/mqueue
       What: mqueue
       Docs: man:mq_overview(7)
             https://www.freedesktop.org/wiki/Software/systemd/APIFileSystems
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48560)
     Memory: 4.0K (peak: 540.0K)
        CPU: 3ms
     CGroup: /dev-mqueue.mount

○ proc-fs-nfsd.mount - NFSD configuration filesystem
     Loaded: loaded (/usr/lib/systemd/system/proc-fs-nfsd.mount; static)
     Active: inactive (dead)
      Where: /proc/fs/nfsd
       What: nfsd

● proc-sys-fs-binfmt_misc.mount - Arbitrary Executable File Formats File System
     Loaded: loaded (/usr/lib/systemd/system/proc-sys-fs-binfmt_misc.mount; disabled; preset: disabled)
     Active: active (mounted) since Thu 2026-01-22 13:34:38 UTC; 2h 11min ago
      Until: Thu 2026-01-22 13:34:38 UTC; 2h 11min ago
TriggeredBy: ● proc-sys-fs-binfmt_misc.automount
      Where: /proc/sys/fs/binfmt_misc
       What: binfmt_misc
       Docs: https://docs.kernel.org/admin-guide/binfmt-misc.html
             https://www.freedesktop.org/wiki/Software/systemd/APIFileSystems
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48560)
     Memory: 8.0K (peak: 544.0K)
        CPU: 11ms
     CGroup: /proc-sys-fs-binfmt_misc.mount

Jan 22 13:34:38 compute-2 systemd[1]: Mounting Arbitrary Executable File Formats File System...
Jan 22 13:34:38 compute-2 systemd[1]: Mounted Arbitrary Executable File Formats File System.

● run-credentials-systemd\x2dsysctl.service.mount - /run/credentials/systemd-sysctl.service
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Thu 2026-01-22 13:25:58 UTC; 2h 20min ago
      Until: Thu 2026-01-22 13:25:58 UTC; 2h 20min ago
      Where: /run/credentials/systemd-sysctl.service
       What: none

● run-credentials-systemd\x2dsysusers.service.mount - /run/credentials/systemd-sysusers.service
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Thu 2026-01-22 12:49:14 UTC; 2h 56min ago
      Until: Thu 2026-01-22 12:49:14 UTC; 2h 56min ago
      Where: /run/credentials/systemd-sysusers.service
       What: none

● run-credentials-systemd\x2dtmpfiles\x2dsetup.service.mount - /run/credentials/systemd-tmpfiles-setup.service
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Thu 2026-01-22 12:49:14 UTC; 2h 56min ago
      Until: Thu 2026-01-22 12:49:14 UTC; 2h 56min ago
      Where: /run/credentials/systemd-tmpfiles-setup.service
       What: none

● run-credentials-systemd\x2dtmpfiles\x2dsetup\x2ddev.service.mount - /run/credentials/systemd-tmpfiles-setup-dev.service
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Thu 2026-01-22 12:49:14 UTC; 2h 56min ago
      Until: Thu 2026-01-22 12:49:14 UTC; 2h 56min ago
      Where: /run/credentials/systemd-tmpfiles-setup-dev.service
       What: none

● run-netns.mount - /run/netns
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Thu 2026-01-22 13:29:12 UTC; 2h 16min ago
      Until: Thu 2026-01-22 13:29:12 UTC; 2h 16min ago
      Where: /run/netns
       What: tmpfs

● run-user-1000.mount - /run/user/1000
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Thu 2026-01-22 12:50:30 UTC; 2h 55min ago
      Until: Thu 2026-01-22 12:50:30 UTC; 2h 55min ago
      Where: /run/user/1000
       What:Unit sysroot.mount could not be found.
 tmpfs

● run-user-42477.mount - /run/user/42477
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Thu 2026-01-22 13:33:33 UTC; 2h 12min ago
      Until: Thu 2026-01-22 13:33:33 UTC; 2h 12min ago
      Where: /run/user/42477
       What: tmpfs

● sys-fs-fuse-connections.mount - FUSE Control File System
     Loaded: loaded (/usr/lib/systemd/system/sys-fs-fuse-connections.mount; static)
     Active: active (mounted) since Thu 2026-01-22 12:49:14 UTC; 2h 56min ago
      Until: Thu 2026-01-22 12:49:14 UTC; 2h 56min ago
      Where: /sys/fs/fuse/connections
       What: fusectl
       Docs: https://docs.kernel.org/filesystems/fuse.html
             https://www.freedesktop.org/wiki/Software/systemd/APIFileSystems
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48560)
     Memory: 4.0K (peak: 540.0K)
        CPU: 2ms
     CGroup: /sys-fs-fuse-connections.mount

Jan 22 12:49:14 localhost systemd[1]: Mounting FUSE Control File System...
Jan 22 12:49:14 localhost systemd[1]: Mounted FUSE Control File System.

● sys-kernel-config.mount - Kernel Configuration File System
     Loaded: loaded (/proc/self/mountinfo; static)
     Active: active (mounted) since Thu 2026-01-22 12:49:14 UTC; 2h 56min ago
      Until: Thu 2026-01-22 12:49:14 UTC; 2h 56min ago
      Where: /sys/kernel/config
       What: configfs
       Docs: https://docs.kernel.org/filesystems/configfs.html
             https://www.freedesktop.org/wiki/Software/systemd/APIFileSystems

● sys-kernel-debug-tracing.mount - /sys/kernel/debug/tracing
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Thu 2026-01-22 15:45:35 UTC; 36s ago
      Until: Thu 2026-01-22 15:45:35 UTC; 36s ago
      Where: /sys/kernel/debug/tracing
       What: tracefs

● sys-kernel-debug.mount - Kernel Debug File System
     Loaded: loaded (/usr/lib/systemd/system/sys-kernel-debug.mount; static)
     Active: active (mounted) since Thu 2026-01-22 12:49:14 UTC; 2h 56min ago
      Until: Thu 2026-01-22 12:49:14 UTC; 2h 56min ago
      Where: /sys/kernel/debug
       What: debugfs
       Docs: https://docs.kernel.org/filesystems/debugfs.html
             https://www.freedesktop.org/wiki/Software/systemd/APIFileSystems
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48560)
     Memory: 4.0K (peak: 540.0K)
        CPU: 3ms
     CGroup: /sys-kernel-debug.mount

● sys-kernel-tracing.mount - Kernel Trace File System
     Loaded: loaded (/usr/lib/systemd/system/sys-kernel-tracing.mount; static)
     Active: active (mounted) since Thu 2026-01-22 12:49:14 UTC; 2h 56min ago
      Until: Thu 2026-01-22 12:49:14 UTC; 2h 56min ago
      Where: /sys/kernel/tracing
       What: tracefs
       Docs: https://docs.kernel.org/trace/ftrace.html
             https://www.freedesktop.org/wiki/Software/systemd/APIFileSystems
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48560)
     Memory: 4.0K (peak: 540.0K)
        CPU: 3ms
     CGroup: /sys-kernel-tracing.mount

○ tmp.mount - Temporary Directory /tmp
     Loaded: loaded (/usr/lib/systemd/system/tmp.mount; disabled; preset: disabled)
     Active: inactive (dead)
      Where: /tmp
       What: tmpfs
       Docs: https://systemd.io/TEMPORARY_DIRECTORIES
             man:file-hierarchy(7)
             https://www.freedesktop.org/wiki/Software/systemd/APIFileSystems

● var-lib-containers-storage-overlay-1734721b55cc982c684897978a32ef7483dd133591a02eac7552c372dda4a22e-merged.mount - /var/lib/containers/storage/overlay/1734721b55cc982c684897978a32ef7483dd133591a02eac7552c372dda4a22e/merged
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Thu 2026-01-22 13:45:27 UTC; 2h 0min ago
      Until: Thu 2026-01-22 13:45:27 UTC; 2h 0min ago
      Where: /var/lib/containers/storage/overlay/1734721b55cc982c684897978a32ef7483dd133591a02eac7552c372dda4a22e/merged
       What: overlay

● var-lib-containers-storage-overlay-35c0630db1aa3168f009364b4e271af26cc7d640ab40f4aa8151f0310302f5b9-merged.mount - /var/lib/containers/storage/overlay/35c0630db1aa3168f009364b4e271af26cc7d640ab40f4aa8151f0310302f5b9/merged
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Thu 2026-01-22 13:36:37 UTC; 2h 9min ago
      Until: Thu 2026-01-22 13:36:37 UTC; 2h 9min ago
      Where: /var/lib/containers/storage/overlay/35c0630db1aa3168f009364b4e271af26cc7d640ab40f4aa8151f0310302f5b9/merged
       What: overlay

● var-lib-containers-storage-overlay-6269f9312632c62e86d13c965ce5e4ccf9b1ba9a87f9e29364ed084fe61c1572-merged.mount - /var/lib/containers/storage/overlay/6269f9312632c62e86d13c965ce5e4ccf9b1ba9a87f9e29364ed084fe61c1572/merged
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Thu 2026-01-22 13:35:29 UTC; 2h 10min ago
      Until: Thu 2026-01-22 13:35:29 UTC; 2h 10min ago
      Where: /var/lib/containers/storage/overlay/6269f9312632c62e86d13c965ce5e4ccf9b1ba9a87f9e29364ed084fe61c1572/merged
       What: overlay

● var-lib-containers-storage-overlay-8245524d960b7a932b934d051adb52667e2f74f47a73b2cef671a61a33d93cae-merged.mount - /var/lib/containers/storage/overlay/8245524d960b7a932b934d051adb52667e2f74f47a73b2cef671a61a33d93cae/merged
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Thu 2026-01-22 13:35:49 UTC; 2h 10min ago
      Until: Thu 2026-01-22 13:35:49 UTC; 2h 10min ago
      Where: /var/lib/containers/storage/overlay/8245524d960b7a932b934d051adb52667e2f74f47a73b2cef671a61a33d93cae/merged
       What: overlay

● var-lib-containers-storage-overlay-a358ca8d9286b2c87ed8309fad35a1ad1ec5603e0132fed2f4d7473a5334162f-merged.mount - /var/lib/containers/storage/overlay/a358ca8d9286b2c87ed8309fad35a1ad1ec5603e0132fed2f4d7473a5334162f/merged
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Thu 2026-01-22 13:37:44 UTC; 2h 8min ago
      Until: Thu 2026-01-22 13:37:44 UTC; 2h 8min ago
      Where: /var/lib/containers/storage/overlay/a358ca8d9286b2c87ed8309fad35a1ad1ec5603e0132fed2f4d7473a5334162f/merged
       What: overlay

● var-lib-containers-storage-overlay-a7af02108f933d0bcb8c89c30d24a97786ef6bd18fd90154e0884f5f96987649-merged.mount - /var/lib/containers/storage/overlay/a7af02108f933d0bcb8c89c30d24a97786ef6bd18fd90154e0884f5f96987649/merged
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Thu 2026-01-22 13:36:49 UTC; 2h 9min ago
      Until: Thu 2026-01-22 13:36:49 UTC; 2h 9min ago
      Where: /var/lib/containers/storage/overlay/a7af02108f933d0bcb8c89c30d24a97786ef6bd18fd90154e0884f5f96987649/merged
       What: overlay

● var-lib-containers-storage-overlay-aaba1a4c1446d779d6c3516cfd324aad6d83d7c423cfe84d48f1bb4f78328aa6-merged.mount - /var/lib/containers/storage/overlay/aaba1a4c1446d779d6c3516cfd324aad6d83d7c423cfe84d48f1bb4f78328aa6/merged
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Thu 2026-01-22 13:35:43 UTC; 2h 10min ago
      Until: Thu 2026-01-22 13:35:43 UTC; 2h 10min ago
      Where: /var/lib/containers/storage/overlay/aaba1a4c1446d779d6c3516cfd324aad6d83d7c423cfe84d48f1bb4f78328aa6/merged
       What: overlay

● var-lib-containers-storage-overlay-bdd406aa7bdb74b2323a09e2995461363a4b1400f1ae42685b71b4e3d7c9a098-merged.mount - /var/lib/containers/storage/overlay/bdd406aa7bdb74b2323a09e2995461363a4b1400f1ae42685b71b4e3d7c9a098/merged
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Thu 2026-01-22 13:36:27 UTC; 2h 9min ago
      Until: Thu 2026-01-22 13:36:27 UTC; 2h 9min ago
      Where: /var/lib/containers/storage/overlay/bdd406aa7bdb74b2323a09e2995461363a4b1400f1ae42685b71b4e3d7c9a098/merged
       What: overlay

● var-lib-containers-storage-overlay-c6c548d1f25210951fff7cdd77840abeaccd4dd3dbddfe66f57affb74e2fc25b-merged.mount - /var/lib/containers/storage/overlay/c6c548d1f25210951fff7cdd77840abeaccd4dd3dbddfe66f57affb74e2fc25b/merged
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Thu 2026-01-22 13:55:22 UTC; 1h 50min ago
      Until: Thu 2026-01-22 13:55:22 UTC; 1h 50min ago
      Where: /var/lib/containers/storage/overlay/c6c548d1f25210951fff7cdd77840abeaccd4dd3dbddfe66f57affb74e2fc25b/merged
       What: overlay

● var-lib-containers-storage-overlay-e4b9657b1dcd91b4246a3241bc74c99303fc9f2fa9d335018691a9ddb1987399-merged.mount - /var/lib/containers/storage/overlay/e4b9657b1dcd91b4246a3241bc74c99303fc9f2fa9d335018691a9ddb1987399/merged
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Thu 2026-01-22 13:46:45 UTC; 1h 59min ago
      Until: Thu 2026-01-22 13:46:45 UTC; 1h 59min ago
      Where: /var/lib/containers/storage/overlay/e4b9657b1dcd91b4246a3241bc74c99303fc9f2fa9d335018691a9ddb1987399/merged
       What: overlay

● var-lib-containers-storage-overlay-eb07ffd7b803d428dbe6adac05a87d7037dad80cef11765c51e4ad5be67c2ac1-merged.mount - /var/lib/containers/storage/overlay/eb07ffd7b803d428dbe6adac05a87d7037dad80cef11765c51e4ad5be67c2ac1/merged
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Thu 2026-01-22 13:37:28 UTC; 2h 8min ago
      Until: Thu 2026-01-22 13:37:28 UTC; 2h 8min ago
      Where: /var/lib/containers/storage/overlay/eb07ffd7b803d428dbe6adac05a87d7037dad80cef11765c51e4ad5be67c2ac1/merged
       What: overlay

● var-lib-containers-storage-overlay.mount - /var/lib/containers/storage/overlay
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Thu 2026-01-22 13:35:28 UTC; 2h 10min ago
      Until: Thu 2026-01-22 13:35:28 UTC; 2h 10min ago
      Where: /var/lib/containers/storage/overlay
       What: /dev/vda1

● var-lib-containers-storage-overlay\x2dcontainers-572ffe12c89ef3d651b3d5a5d0d084d01048037ddf29c596a9682c34d685f649-userdata-shm.mount - /var/lib/containers/storage/overlay-containers/572ffe12c89ef3d651b3d5a5d0d084d01048037ddf29c596a9682c34d685f649/userdata/shm
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Thu 2026-01-22 13:55:22 UTC; 1h 50min ago
      Until: Thu 2026-01-22 13:55:22 UTC; 1h 50min ago
      Where: /var/lib/containers/storage/overlay-containers/572ffe12c89ef3d651b3d5a5d0d084d01048037ddf29c596a9682c34d685f649/userdata/shm
       What: shm

● var-lib-containers-storage-overlay\x2dcontainers-65cda04b9c9e71d648ab5510147314c4de15a37ca8d4a48196c50c9ad6ccb44d-userdata-shm.mount - /var/lib/containers/storage/overlay-containers/65cda04b9c9e71d648ab5510147314c4de15a37ca8d4a48196c50c9ad6ccb44d/userdata/shm
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Thu 2026-01-22 13:46:45 UTC; 1h 59min ago
      Until: Thu 2026-01-22 13:46:45 UTC; 1h 59min ago
      Where: /var/lib/containers/storage/overlay-containers/65cda04b9c9e71d648ab5510147314c4de15a37ca8d4a48196c50c9ad6ccb44d/userdata/shm
       What: shm

● var-lib-containers-storage-overlay\x2dcontainers-8eec14eed05eebd169934b14ad23738a7c696fad3b7be75ce9a652966539c356-userdata-shm.mount - /var/lib/containers/storage/overlay-containers/8eec14eed05eebd169934b14ad23738a7c696fad3b7be75ce9a652966539c356/userdata/shm
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Thu 2026-01-22 13:45:27 UTC; 2h 0min ago
      Until: Thu 2026-01-22 13:45:27 UTC; 2h 0min ago
      Where: /var/lib/containers/storage/overlay-containers/8eec14eed05eebd169934b14ad23738a7c696fad3b7be75ce9a652966539c356/userdata/shm
       What: shm

○ var-lib-machines.mount - Virtual Machine and Container Storage (Compatibility)
     Loaded: loaded (/usr/lib/systemd/system/var-lib-machines.mount; static)
     Active: inactive (dead)
  Condition: start condition failed at Thu 2026-01-22 13:51:24 UTC; 1h 54min ago
      Where: /var/lib/machines
       What: /var/lib/machines.raw

Jan 22 13:51:24 compute-2 systemd[1]: Virtual Machine and Container Storage (Compatibility) was skipped because of an unmet condition check (ConditionPathExists=/var/lib/machines.raw).

● var-lib-nfs-rpc_pipefs.mount - RPC Pipe File System
     Loaded: loaded (/proc/self/mountinfo; static)
     Active: active (mounted) since Thu 2026-01-22 12:49:14 UTC; 2h 56min ago
      Until: Thu 2026-01-22 12:49:14 UTC; 2h 56min ago
      Where: /var/lib/nfs/rpc_pipefs
       What: rpc_pipefs

● systemd-ask-password-console.path - Dispatch Password Requests to Console Directory Watch
     Loaded: loaded (/usr/lib/systemd/system/systemd-ask-password-console.path; static)
     Active: active (waiting) since Thu 2026-01-22 12:49:14 UTC; 2h 56min ago
      Until: Thu 2026-01-22 12:49:14 UTC; 2h 56min ago
   Triggers: ● systemd-ask-password-console.service
       Docs: man:systemd-ask-password-console.path(8)

● systemd-ask-password-wall.path - Forward Password Requests to Wall Directory Watch
     Loaded: loaded (/usr/lib/systemd/system/systemd-ask-password-wall.path; static)
     Active: active (waiting) since Thu 2026-01-22 12:49:14 UTC; 2h 56min ago
      Until: Thu 2026-01-22 12:49:14 UTC; 2h 56min ago
   Triggers: ● systemd-ask-password-wall.service
       Docs: man:systemd-ask-password-wall.path(8)

● init.scope - System and Service Manager
     Loaded: loaded
  Transient: yes
     Active: active (running) since Thu 2026-01-22 12:49:11 UTC; 2h 57min ago
       Docs: man:systemd(1)
         IO: 844.0K read, 0B written
      Tasks: 1 (limit: 48560)
     Memory: 47.5M (peak: 66.4M)
        CPU: 1min 2.303s
     CGroup: /init.scope
             └─1 /usr/lib/systemd/systemd --switched-root --system --deserialize 31

Jan 22 15:39:23 compute-2 systemd[1]: libpod-conmon-7388ce5ee3d99173f70197fceb574b7daa841b8d9bb8a2d748a9c53909dc30fd.scope: Deactivated successfully.
Jan 22 15:39:23 compute-2 systemd[1]: Started libpod-conmon-4fc8cc84139abc258a816c26eaa0a142ddd799f3b381fccee791026af3a708db.scope.
Jan 22 15:39:23 compute-2 systemd[1]: Started libcrun container.
Jan 22 15:39:25 compute-2 systemd[1]: libpod-4fc8cc84139abc258a816c26eaa0a142ddd799f3b381fccee791026af3a708db.scope: Deactivated successfully.
Jan 22 15:39:25 compute-2 systemd[1]: libpod-4fc8cc84139abc258a816c26eaa0a142ddd799f3b381fccee791026af3a708db.scope: Consumed 1.361s CPU time.
Jan 22 15:39:25 compute-2 systemd[1]: var-lib-containers-storage-overlay-c8b1142ccbf335480b995577fe7d87f8df451a3753a1aab61efbc6016c18fc4a-merged.mount: Deactivated successfully.
Jan 22 15:39:25 compute-2 systemd[1]: libpod-conmon-4fc8cc84139abc258a816c26eaa0a142ddd799f3b381fccee791026af3a708db.scope: Deactivated successfully.
Jan 22 15:45:29 compute-2 systemd[1]: Started Session 51 of User zuul.
Jan 22 15:45:49 compute-2 systemd[1]: Starting Hostname Service...
Jan 22 15:45:49 compute-2 systemd[1]: Started Hostname Service.

● libpod-572ffe12c89ef3d651b3d5a5d0d084d01048037ddf29c596a9682c34d685f649.scope - libcrun container
     Loaded: loaded (/run/systemd/transient/libpod-572ffe12c89ef3d651b3d5a5d0d084d01048037ddf29c596a9682c34d685f649.scope; transient)
  Transient: yes
     Active: active (running) since Thu 2026-01-22 13:55:22 UTC; 1h 50min ago
         IO: 56.1M read, 83.8M written
      Tasks: 149 (limit: 4096)
     Memory: 808.6M (peak: 866.3M)
        CPU: 1min 36.200s
     CGroup: /machine.slice/libpod-572ffe12c89ef3d651b3d5a5d0d084d01048037ddf29c596a9682c34d685f649.scope
             └─container
               ├─226435 dumb-init --single-child -- kolla_start
               ├─226437 /usr/bin/python3 /usr/bin/nova-compute
               ├─230058 rbd import --pool vms /var/lib/nova/instances/_base/389efd6047b99779d5161939afa4f2bdb261bfd0 e0e74330-96df-479f-8baf-53fbd2ccba91_disk --image-format=2 --id openstack --conf /etc/ceph/ceph.conf
               ├─237484 /usr/bin/python3 /bin/privsep-helper --config-file /etc/nova/nova.conf --config-file /etc/nova/nova-compute.conf --config-dir /etc/nova/nova.conf.d --privsep_context vif_plug_ovs.privsep.vif_plug --privsep_sock_path /tmp/tmpwbd1s1u6/privsep.sock
               ├─238396 rbd import --pool vms /var/lib/nova/instances/_base/389efd6047b99779d5161939afa4f2bdb261bfd0 f591d61b-712e-49aa-85bd-8d222b607eb3_disk --image-format=2 --id openstack --conf /etc/ceph/ceph.conf
               ├─238793 rbd import --pool vms /var/lib/nova/instances/_base/389efd6047b99779d5161939afa4f2bdb261bfd0 87e798e6-6f00-4fe1-8412-75ddc9e2878e_disk --image-format=2 --id openstack --conf /etc/ceph/ceph.conf
               ├─244616 rbd import --pool vms /var/lib/nova/instances/_base/389efd6047b99779d5161939afa4f2bdb261bfd0 8331b067-1b3f-4a1d-a596-e966f6de776a_disk --image-format=2 --id openstack --conf /etc/ceph/ceph.conf
               ├─245897 rbd import --pool vms /var/lib/nova/instances/a0b3924b-4422-47c5-ba40-748e41b14d00/disk.config a0b3924b-4422-47c5-ba40-748e41b14d00_disk.config --image-format=2 --id openstack --conf /etc/ceph/ceph.conf
               ├─248518 /usr/bin/python3 /bin/privsep-helper --config-file /etc/nova/nova.conf --config-file /etc/nova/nova-compute.conf --config-dir /etc/nova/nova.conf.d --privsep_context os_brick.privileged.default --privsep_sock_path /tmp/tmpk2q2e022/privsep.sock
               └─250095 rbd import --pool vms /var/lib/nova/instances/_base/389efd6047b99779d5161939afa4f2bdb261bfd0 001ba9a6-ba0c-438d-8150-5cfbcec3d34f_disk --image-format=2 --id openstack --conf /etc/ceph/ceph.conf

Jan 22 13:55:22 compute-2 systemd[1]: Started libcrun container.

● libpod-65cda04b9c9e71d648ab5510147314c4de15a37ca8d4a48196c50c9ad6ccb44d.scope - libcrun container
     Loaded: loaded (/run/systemd/transient/libpod-65cda04b9c9e71d648ab5510147314c4de15a37ca8d4a48196c50c9ad6ccb44d.scope; transient)
  Transient: yes
    Drop-In: /run/systemd/transient/libpod-65cda04b9c9e71d648ab5510147314c4de15a37ca8d4a48196c50c9ad6ccb44d.scope.d
             └─dep.conf
     Active: active (running) since Thu 2026-01-22 13:46:45 UTC; 1h 59min ago
         IO: 19.6M read, 1.7M written
      Tasks: 10 (limit: 4096)
     Memory: 434.6M (peak: 477.7M)
        CPU: 31.117s
     CGroup: /machine.slice/libpod-65cda04b9c9e71d648ab5510147314c4de15a37ca8d4a48196c50c9ad6ccb44d.scope
             └─container
               ├─143494 dumb-init --single-child -- kolla_start
               ├─143497 "neutron-ovn-metadata-agent (/usr/bin/python3 /usr/bin/neutron-ovn-metadata-agent)"
               ├─143757 "neutron-ovn-metadata-agent (/usr/bin/python3 /usr/bin/neutron-ovn-metadata-agent)"
               ├─143856 /usr/bin/python3 /bin/privsep-helper --config-file /etc/neutron/neutron.conf --config-dir /etc/neutron.conf.d --privsep_context neutron.privileged.namespace_cmd --privsep_sock_path /tmp/tmp405dvk24/privsep.sock
               ├─237689 /usr/bin/python3 /bin/privsep-helper --config-file /etc/neutron/neutron.conf --config-dir /etc/neutron.conf.d --privsep_context neutron.privileged.default --privsep_sock_path /tmp/tmp_pg3kwj0/privsep.sock
               └─237788 /usr/bin/python3 /bin/privsep-helper --config-file /etc/neutron/neutron.conf --config-dir /etc/neutron.conf.d --privsep_context neutron.privileged.link_cmd --privsep_sock_path /tmp/tmp3y50ov6x/privsep.sock

Jan 22 14:29:03 compute-2 podman[248924]: 2026-01-22 14:29:03.769958997 +0000 UTC m=+0.060919766 container died 3f8d50ba790e2d05462a6a55fd8218af8632a807958c685028c074be3cd8b14b (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=neutron-haproxy-ovnmeta-b247a422-e88b-4d6e-9b42-d4947ce89ea4, io.buildah.version=1.41.3, org.label-schema.build-date=20251202, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, maintainer=OpenStack Kubernetes Operator team, org.label-schema.name=CentOS Stream 9 Base Image, tcib_managed=true, org.label-schema.license=GPLv2, tcib_build_tag=c3923531bcda0b0811b2d5053f189beb)
Jan 22 14:29:03 compute-2 podman[248924]: 2026-01-22 14:29:03.816821692 +0000 UTC m=+0.107782411 container cleanup 3f8d50ba790e2d05462a6a55fd8218af8632a807958c685028c074be3cd8b14b (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=neutron-haproxy-ovnmeta-b247a422-e88b-4d6e-9b42-d4947ce89ea4, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, tcib_build_tag=c3923531bcda0b0811b2d5053f189beb, io.buildah.version=1.41.3, org.label-schema.build-date=20251202, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, tcib_managed=true, maintainer=OpenStack Kubernetes Operator team)
Jan 22 14:29:03 compute-2 podman[248971]: 2026-01-22 14:29:03.875268114 +0000 UTC m=+0.037427223 container remove 3f8d50ba790e2d05462a6a55fd8218af8632a807958c685028c074be3cd8b14b (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=neutron-haproxy-ovnmeta-b247a422-e88b-4d6e-9b42-d4947ce89ea4, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, tcib_managed=true, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251202, io.buildah.version=1.41.3, tcib_build_tag=c3923531bcda0b0811b2d5053f189beb)
Jan 22 14:34:16 compute-2 podman[252613]: 2026-01-22 14:34:16.670319826 +0000 UTC m=+0.058805116 container create 43125dacd357b517e238cd06be25c2275d0954f87098ef055b4b9bef1e2b9857 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=neutron-haproxy-ovnmeta-e70febd3-9995-42cd-a322-30bf5db3445d, org.label-schema.license=GPLv2, io.buildah.version=1.41.3, org.label-schema.schema-version=1.0, tcib_managed=true, org.label-schema.name=CentOS Stream 9 Base Image, maintainer=OpenStack Kubernetes Operator team, tcib_build_tag=c3923531bcda0b0811b2d5053f189beb, org.label-schema.vendor=CentOS, org.label-schema.build-date=20251202)
Jan 22 14:34:16 compute-2 podman[252613]: 2026-01-22 14:34:16.636628088 +0000 UTC m=+0.025113458 image pull 3695f0466b4af47afdf4b467956f8cc4744d7249671a73e7ca3fd26cca2f59c3 quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified
Jan 22 14:34:16 compute-2 podman[252613]: 2026-01-22 14:34:16.770376026 +0000 UTC m=+0.158861316 container init 43125dacd357b517e238cd06be25c2275d0954f87098ef055b4b9bef1e2b9857 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=neutron-haproxy-ovnmeta-e70febd3-9995-42cd-a322-30bf5db3445d, org.label-schema.build-date=20251202, org.label-schema.name=CentOS Stream 9 Base Image, maintainer=OpenStack Kubernetes Operator team, org.label-schema.license=GPLv2, io.buildah.version=1.41.3, org.label-schema.schema-version=1.0, tcib_build_tag=c3923531bcda0b0811b2d5053f189beb, tcib_managed=true, org.label-schema.vendor=CentOS)
Jan 22 14:34:16 compute-2 podman[252613]: 2026-01-22 14:34:16.782612145 +0000 UTC m=+0.171097435 container start 43125dacd357b517e238cd06be25c2275d0954f87098ef055b4b9bef1e2b9857 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=neutron-haproxy-ovnmeta-e70febd3-9995-42cd-a322-30bf5db3445d, tcib_build_tag=c3923531bcda0b0811b2d5053f189beb, org.label-schema.name=CentOS Stream 9 Base Image, maintainer=OpenStack Kubernetes Operator team, io.buildah.version=1.41.3, org.label-schema.schema-version=1.0, tcib_managed=true, org.label-schema.vendor=CentOS, org.label-schema.build-date=20251202, org.label-schema.license=GPLv2)
Jan 22 14:40:03 compute-2 podman[257497]: 2026-01-22 14:40:03.506984423 +0000 UTC m=+0.052948916 container died 43125dacd357b517e238cd06be25c2275d0954f87098ef055b4b9bef1e2b9857 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=neutron-haproxy-ovnmeta-e70febd3-9995-42cd-a322-30bf5db3445d, maintainer=OpenStack Kubernetes Operator team, org.label-schema.build-date=20251202, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, tcib_build_tag=c3923531bcda0b0811b2d5053f189beb, io.buildah.version=1.41.3, tcib_managed=true, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS)
Jan 22 14:40:03 compute-2 podman[257497]: 2026-01-22 14:40:03.561837007 +0000 UTC m=+0.107801470 container cleanup 43125dacd357b517e238cd06be25c2275d0954f87098ef055b4b9bef1e2b9857 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=neutron-haproxy-ovnmeta-e70febd3-9995-42cd-a322-30bf5db3445d, org.label-schema.name=CentOS Stream 9 Base Image, tcib_build_tag=c3923531bcda0b0811b2d5053f189beb, org.label-schema.vendor=CentOS, maintainer=OpenStack Kubernetes Operator team, org.label-schema.schema-version=1.0, io.buildah.version=1.41.3, tcib_managed=true, org.label-schema.build-date=20251202, org.label-schema.license=GPLv2)
Jan 22 14:40:03 compute-2 podman[257524]: 2026-01-22 14:40:03.665447598 +0000 UTC m=+0.065696101 container remove 43125dacd357b517e238cd06be25c2275d0954f87098ef055b4b9bef1e2b9857 (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=neutron-haproxy-ovnmeta-e70febd3-9995-42cd-a322-30bf5db3445d, org.label-schema.license=GPLv2, tcib_managed=true, org.label-schema.build-date=20251202, org.label-schema.vendor=CentOS, maintainer=OpenStack Kubernetes Operator team, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.41.3, org.label-schema.schema-version=1.0, tcib_build_tag=c3923531bcda0b0811b2d5053f189beb)

● libpod-8eec14eed05eebd169934b14ad23738a7c696fad3b7be75ce9a652966539c356.scope - libcrun container
     Loaded: loaded (/run/systemd/transient/libpod-8eec14eed05eebd169934b14ad23738a7c696fad3b7be75ce9a652966539c356.scope; transient)
  Transient: yes
    Drop-In: /run/systemd/transient/libpod-8eec14eed05eebd169934b14ad23738a7c696fad3b7be75ce9a652966539c356.scope.d
             └─dep.conf
     Active: active (running) since Thu 2026-01-22 13:45:27 UTC; 2h 0min ago
         IO: 7.4M read, 4.0K written
      Tasks: 6 (limit: 4096)
     Memory: 20.5M (peak: 24.4M)
        CPU: 16.591s
     CGroup: /machine.slice/libpod-8eec14eed05eebd169934b14ad23738a7c696fad3b7be75ce9a652966539c356.scope
             └─container
               ├─133158 dumb-init --single-child -- kolla_start
               └─133161 /usr/bin/ovn-controller --pidfile unix:/run/openvswitch/db.sock -p /etc/pki/tls/private/ovndb.key -c /etc/pki/tls/certs/ovndb.crt -C /etc/pki/tls/certs/ovndbca.crt

Jan 22 13:45:27 compute-2 systemd[1]: Started libcrun container.

● machine-qemu\x2d6\x2dinstance\x2d00000016.scope - Virtual Machine qemu-6-instance-00000016
     Loaded: loaded (/run/systemd/transient/machine-qemu\x2d6\x2dinstance\x2d00000016.scope; transient)
  Transient: yes
     Active: active (running) since Thu 2026-01-22 14:34:15 UTC; 1h 11min ago
         IO: 256.0K read, 0B written
      Tasks: 37 (limit: 16384)
     Memory: 277.4M (peak: 278.4M)
        CPU: 3min 33.896s
     CGroup: /machine.slice/machine-qemu\x2d6\x2dinstance\x2d00000016.scope
             └─libvirt
               └─252492 /usr/libexec/qemu-kvm -name guest=instance-00000016,debug-threads=on -S -object "{\"qom-type\":\"secret\",\"id\":\"masterKey0\",\"format\":\"raw\",\"file\":\"/var/lib/libvirt/qemu/domain-6-instance-00000016/master-key.aes\"}" -machine pc-q35-rhel9.8.0,usb=off,dump-guest-core=off,memory-backend=pc.ram,hpet=off,acpi=on -accel kvm -cpu Nehalem -m size=131072k -object "{\"qom-type\":\"memory-backend-ram\",\"id\":\"pc.ram\",\"size\":134217728}" -overcommit mem-lock=off -smp 1,sockets=1,dies=1,clusters=1,cores=1,threads=1 -uuid 839e8e64-64a9-4e35-85dd-cdbb7f8e71c5 -smbios "type=1,manufacturer=RDO,product=OpenStack Compute,version=27.5.2-0.20250829104910.6f8decf.el9,serial=839e8e64-64a9-4e35-85dd-cdbb7f8e71c5,uuid=839e8e64-64a9-4e35-85dd-cdbb7f8e71c5,family=Virtual Machine" -no-user-config -nodefaults -chardev socket,id=charmonitor,fd=26,server=on,wait=off -mon chardev=charmonitor,id=monitor,mode=control -rtc base=utc,driftfix=slew -global kvm-pit.lost_tick_policy=delay -no-shutdown -boot strict=on -device "{\"driver\":\"pcie-root-port\",\"port\":16,\"chassis\":1,\"id\":\"pci.1\",\"bus\":\"pcie.0\",\"multifunction\":true,\"addr\":\"0x2\"}" -device "{\"driver\":\"pcie-root-port\",\"port\":17,\"chassis\":2,\"id\":\"pci.2\",\"bus\":\"pcie.0\",\"addr\":\"0x2.0x1\"}" -device "{\"driver\":\"pcie-root-port\",\"port\":18,\"chassis\":3,\"id\":\"pci.3\",\"bus\":\"pcie.0\",\"addr\":\"0x2.0x2\"}" -device "{\"driver\":\"pcie-root-port\",\"port\":19,\"chassis\":4,\"id\":\"pci.4\",\"bus\":\"pcie.0\",\"addr\":\"0x2.0x3\"}" -device "{\"driver\":\"pcie-root-port\",\"port\":20,\"chassis\":5,\"id\":\"pci.5\",\"bus\":\"pcie.0\",\"addr\":\"0x2.0x4\"}" -device "{\"driver\":\"pcie-root-port\",\"port\":21,\"chassis\":6,\"id\":\"pci.6\",\"bus\":\"pcie.0\",\"addr\":\"0x2.0x5\"}" -device "{\"driver\":\"pcie-root-port\",\"port\":22,\"chassis\":7,\"id\":\"pci.7\",\"bus\":\"pcie.0\",\"addr\":\"0x2.0x6\"}" -device "{\"driver\":\"pcie-root-port\",\"port\":23,\"chassis\":8,\"id\":\"pci.8\",\"bus\":\"pcie.0\",\"addr\":\"0x2.0x7\"}" -device "{\"driver\":\"pcie-root-port\",\"port\":24,\"chassis\":9,\"id\":\"pci.9\",\"bus\":\"pcie.0\",\"multifunction\":true,\"addr\":\"0x3\"}" -device "{\"driver\":\"pcie-root-port\",\"port\":25,\"chassis\":10,\"id\":\"pci.10\",\"bus\":\"pcie.0\",\"addr\":\"0x3.0x1\"}" -device "{\"driver\":\"pcie-root-port\",\"port\":26,\"chassis\":11,\"id\":\"pci.11\",\"bus\":\"pcie.0\",\"addr\":\"0x3.0x2\"}" -device "{\"driver\":\"pcie-root-port\",\"port\":27,\"chassis\":12,\"id\":\"pci.12\",\"bus\":\"pcie.0\",\"addr\":\"0x3.0x3\"}" -device "{\"driver\":\"pcie-root-port\",\"port\":28,\"chassis\":13,\"id\":\"pci.13\",\"bus\":\"pcie.0\",\"addr\":\"0x3.0x4\"}" -device "{\"driver\":\"pcie-root-port\",\"port\":29,\"chassis\":14,\"id\":\"pci.14\",\"bus\":\"pcie.0\",\"addr\":\"0x3.0x5\"}" -device "{\"driver\":\"pcie-root-port\",\"port\":30,\"chassis\":15,\"id\":\"pci.15\",\"bus\":\"pcie.0\",\"addr\":\"0x3.0x6\"}" -device "{\"driver\":\"pcie-root-port\",\"port\":31,\"chassis\":16,\"id\":\"pci.16\",\"bus\":\"pcie.0\",\"addr\":\"0x3.0x7\"}" -device "{\"driver\":\"pcie-root-port\",\"port\":32,\"chassis\":17,\"id\":\"pci.17\",\"bus\":\"pcie.0\",\"multifunction\":true,\"addr\":\"0x4\"}" -device "{\"driver\":\"pcie-root-port\",\"port\":33,\"chassis\":18,\"id\":\"pci.18\",\"bus\":\"pcie.0\",\"addr\":\"0x4.0x1\"}" -device "{\"driver\":\"pcie-root-port\",\"port\":34,\"chassis\":19,\"id\":\"pci.19\",\"bus\":\"pcie.0\",\"addr\":\"0x4.0x2\"}" -device "{\"driver\":\"pcie-root-port\",\"port\":35,\"chassis\":20,\"id\":\"pci.20\",\"bus\":\"pcie.0\",\"addr\":\"0x4.0x3\"}" -device "{\"driver\":\"pcie-root-port\",\"port\":36,\"chassis\":21,\"id\":\"pci.21\",\"bus\":\"pcie.0\",\"addr\":\"0x4.0x4\"}" -device "{\"driver\":\"pcie-root-port\",\"port\":37,\"chassis\":22,\"id\":\"pci.22\",\"bus\":\"pcie.0\",\"addr\":\"0x4.0x5\"}" -device "{\"driver\":\"pcie-root-port\",\"port\":38,\"chassis\":23,\"id\":\"pci.23\",\"bus\":\"pcie.0\",\"addr\":\"0x4.0x6\"}" -device "{\"driver\":\"pcie-root-port\",\"port\":39,\"chassis\":24,\"id\":\"pci.24\",\"bus\":\"pcie.0\",\"addr\":\"0x4.0x7\"}" -device "{\"driver\":\"pcie-root-port\",\"port\":40,\"chassis\":25,\"id\":\"pci.25\",\"bus\":\"pcie.0\",\"addr\":\"0x5\"}" -device "{\"driver\":\"pcie-pci-bridge\",\"id\":\"pci.26\",\"bus\":\"pci.1\",\"addr\":\"0x0\"}" -device "{\"driver\":\"piix3-usb-uhci\",\"id\":\"usb\",\"bus\":\"pci.26\",\"addr\":\"0x1\"}" -device "{\"driver\":\"virtio-scsi-pci\",\"id\":\"scsi0\",\"bus\":\"pci.3\",\"addr\":\"0x0\"}" -object "{\"qom-type\":\"secret\",\"id\":\"libvirt-2-storage-auth-secret0\",\"data\":\"ckHvapQG84cP5zoSqf7m1gCY4qTASyRQDTUqj+xCcWI=\",\"keyid\":\"masterKey0\",\"iv\":\"qG5IcOgwSBm0Z7uTjw3pxQ==\",\"format\":\"base64\"}" -blockdev "{\"driver\":\"rbd\",\"pool\":\"vms\",\"image\":\"839e8e64-64a9-4e35-85dd-cdbb7f8e71c5_disk\",\"server\":[{\"host\":\"192.168.122.100\",\"port\":\"6789\"},{\"host\":\"192.168.122.102\",\"port\":\"6789\"},{\"host\":\"192.168.122.101\",\"port\":\"6789\"}],\"user\":\"openstack\",\"auth-client-required\":[\"cephx\",\"none\"],\"key-secret\":\"libvirt-2-storage-auth-secret0\",\"node-name\":\"libvirt-2-storage\",\"read-only\":false,\"cache\":{\"direct\":true,\"no-flush\":false}}" -device "{\"driver\":\"scsi-hd\",\"bus\":\"scsi0.0\",\"channel\":0,\"scsi-id\":0,\"lun\":0,\"device_id\":\"drive-scsi0-0-0-0\",\"drive\":\"libvirt-2-storage\",\"id\":\"scsi0-0-0-0\",\"bootindex\":1,\"write-cache\":\"on\"}" -object "{\"qom-type\":\"secret\",\"id\":\"libvirt-1-storage-auth-secret0\",\"data\":\"us/rqTgpkEOPr1e80IYYxNi8jF+jipoTDjYt15m4hho=\",\"keyid\":\"masterKey0\",\"iv\":\"aOqQCrTLnlRcP7tTW3+8PQ==\",\"format\":\"base64\"}" -blockdev "{\"driver\":\"rbd\",\"pool\":\"vms\",\"image\":\"839e8e64-64a9-4e35-85dd-cdbb7f8e71c5_disk.config\",\"server\":[{\"host\":\"192.168.122.100\",\"port\":\"6789\"},{\"host\":\"192.168.122.102\",\"port\":\"6789\"},{\"host\":\"192.168.122.101\",\"port\":\"6789\"}],\"user\":\"openstack\",\"auth-client-required\":[\"cephx\",\"none\"],\"key-secret\":\"libvirt-1-storage-auth-secret0\",\"node-name\":\"libvirt-1-storage\",\"read-only\":true,\"cache\":{\"direct\":true,\"no-flush\":false}}" -device "{\"driver\":\"scsi-cd\",\"bus\":\"scsi0.0\",\"channel\":0,\"scsi-id\":0,\"lun\":1,\"device_id\":\"drive-scsi0-0-0-1\",\"drive\":\"libvirt-1-storage\",\"id\":\"scsi0-0-0-1\",\"write-cache\":\"on\"}" -netdev "{\"type\":\"tap\",\"fd\":\"28\",\"vhost\":true,\"vhostfd\":\"30\",\"id\":\"hostnet0\"}" -device "{\"driver\":\"virtio-net-pci\",\"rx_queue_size\":512,\"host_mtu\":1442,\"netdev\":\"hostnet0\",\"id\":\"net0\",\"mac\":\"fa:16:3e:35:f2:b5\",\"bus\":\"pci.2\",\"addr\":\"0x0\"}" -add-fd set=0,fd=27,opaque=serial0-log -chardev pty,id=charserial0,logfile=/dev/fdset/0,logappend=on -device "{\"driver\":\"isa-serial\",\"chardev\":\"charserial0\",\"id\":\"serial0\",\"index\":0}" -device "{\"driver\":\"usb-tablet\",\"id\":\"input0\",\"bus\":\"usb.0\",\"port\":\"1\"}" -audiodev "{\"id\":\"audio1\",\"driver\":\"none\"}" -object "{\"qom-type\":\"tls-creds-x509\",\"id\":\"vnc-tls-creds0\",\"dir\":\"/etc/pki/qemu\",\"endpoint\":\"server\",\"verify-peer\":true}" -vnc "[::0]:0,tls-creds=vnc-tls-creds0,audiodev=audio1" -device "{\"driver\":\"virtio-vga\",\"id\":\"video0\",\"max_outputs\":1,\"bus\":\"pcie.0\",\"addr\":\"0x1\"}" -global ICH9-LPC.noreboot=off -watchdog-action reset -device "{\"driver\":\"virtio-balloon-pci\",\"id\":\"balloon0\",\"bus\":\"pci.4\",\"addr\":\"0x0\"}" -object "{\"qom-type\":\"rng-random\",\"id\":\"objrng0\",\"filename\":\"/dev/urandom\"}" -device "{\"driver\":\"virtio-rng-pci\",\"rng\":\"objrng0\",\"id\":\"rng0\",\"bus\":\"pci.5\",\"addr\":\"0x0\"}" -device "{\"driver\":\"vmcoreinfo\"}" -sandbox on,obsolete=deny,elevateprivileges=deny,spawn=deny,resourcecontrol=deny -msg timestamp=on

Jan 22 14:34:15 compute-2 systemd[1]: Started Virtual Machine qemu-6-instance-00000016.

● session-1.scope - Session 1 of User zuul
     Loaded: loaded (/run/systemd/transient/session-1.scope; transient)
  Transient: yes
     Active: active (abandoned) since Thu 2026-01-22 12:50:30 UTC; 2h 55min ago
         IO: 0B read, 0B written
      Tasks: 1
     Memory: 17.1M (peak: 39.3M)
        CPU: 1min 6.832s
     CGroup: /user.slice/user-1000.slice/session-1.scope
             └─4517 /usr/bin/python3

Jan 22 12:51:16 np0005592159.novalocal sudo[6864]: pam_unix(sudo:session): session closed for user root
Jan 22 12:51:16 np0005592159.novalocal python3[6893]: ansible-ansible.legacy.command Invoked with executable=/bin/bash _raw_params=env
                                                       _uses_shell=True zuul_log_id=fa163efc-24cc-37d2-1cc7-000000000020-1-compute2 zuul_ansible_split_streams=False warn=False stdin_add_newline=True strip_empty_ends=True argv=None chdir=None creates=None removes=None stdin=None
Jan 22 12:51:18 np0005592159.novalocal python3[6922]: ansible-file Invoked with path=/home/zuul/workspace state=directory recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None mode=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Jan 22 12:51:42 np0005592159.novalocal sudo[6950]:     zuul : PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-ihqogofdshkmxdtaoldtfamwjkaasacv ; /usr/bin/python3'
Jan 22 12:51:42 np0005592159.novalocal sudo[6950]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Jan 22 12:51:43 np0005592159.novalocal python3[6952]: ansible-ansible.builtin.file Invoked with path=/etc/ci/env state=directory mode=0755 recurse=False force=False follow=True modification_time_format=%Y%m%d%H%M.%S access_time_format=%Y%m%d%H%M.%S unsafe_writes=False _original_basename=None _diff_peek=None src=None modification_time=None access_time=None owner=None group=None seuser=None serole=None selevel=None setype=None attributes=None
Jan 22 12:51:43 np0005592159.novalocal sudo[6950]: pam_unix(sudo:session): session closed for user root
Jan 22 12:52:43 np0005592159.novalocal sshd-session[4314]: Received disconnect from 38.102.83.114 port 53856:11: disconnected by user
Jan 22 12:52:43 np0005592159.novalocal sshd-session[4314]: Disconnected from user zuul 38.102.83.114 port 53856
Jan 22 12:52:43 np0005592159.novalocal sshd-session[4301]: pam_unix(sshd:session): session closed for user zuul

● session-20.scope - Session 20 of User ceph-admin
     Loaded: loaded (/run/systemd/transient/session-20.scope; transient)
  Transient: yes
     Active: active (running) since Thu 2026-01-22 13:33:33 UTC; 2h 12min ago
         IO: 0B read, 0B written
      Tasks: 2
     Memory: 1.2M (peak: 1.6M)
        CPU: 249ms
     CGroup: /user.slice/user-42477.slice/session-20.scope
             ├─72606 "sshd-session: ceph-admin [priv]"
             └─72628 "sshd-session: ceph-admin"

Jan 22 13:33:33 compute-2 systemd[1]: Started Session 20 of User ceph-admin.

● session-22.scope - Session 22 of User ceph-admin
     Loaded: loaded (/run/systemd/transient/session-22.scope; transient)
  Transient: yes
     Active: active (running) since Thu 2026-01-22 13:33:33 UTC; 2h 12min ago
         IO: 0B read, 0B written
      Tasks: 2
     Memory: 1.2M (peak: 4.5M)
        CPU: 323ms
     CGroup: /user.slice/user-42477.slice/session-22.scope
             ├─72623 "sshd-session: ceph-admin [priv]"
             └─72629 "sshd-session: ceph-admin@notty"

Jan 22 13:33:33 compute-2 systemd[1]: Started Session 22 of User ceph-admin.
Jan 22 13:33:33 compute-2 sudo[72630]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Jan 22 13:33:33 compute-2 sudo[72630]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Jan 22 13:33:33 compute-2 sudo[72630]: pam_unix(sudo:session): session closed for user root
Jan 22 13:33:33 compute-2 sudo[72655]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Jan 22 13:33:33 compute-2 sudo[72655]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Jan 22 13:33:33 compute-2 sudo[72655]: pam_unix(sudo:session): session closed for user root

● session-23.scope - Session 23 of User ceph-admin
     Loaded: loaded (/run/systemd/transient/session-23.scope; transient)
  Transient: yes
     Active: active (running) since Thu 2026-01-22 13:33:33 UTC; 2h 12min ago
         IO: 0B read, 0B written
      Tasks: 2
     Memory: 1.2M (peak: 6.2M)
        CPU: 376ms
     CGroup: /user.slice/user-42477.slice/session-23.scope
             ├─72680 "sshd-session: ceph-admin [priv]"
             └─72683 "sshd-session: ceph-admin@notty"

Jan 22 13:33:33 compute-2 systemd[1]: Started Session 23 of User ceph-admin.
Jan 22 13:33:33 compute-2 sudo[72684]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Jan 22 13:33:33 compute-2 sudo[72684]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Jan 22 13:33:33 compute-2 sudo[72684]: pam_unix(sudo:session): session closed for user root
Jan 22 13:33:33 compute-2 sudo[72709]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/088fe176-0106-5401-803c-2da38b73b76a/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --timeout 895 check-host --expect-hostname compute-2
Jan 22 13:33:33 compute-2 sudo[72709]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Jan 22 13:33:33 compute-2 sudo[72709]: pam_unix(sudo:session): session closed for user root

● session-24.scope - Session 24 of User ceph-admin
     Loaded: loaded (/run/systemd/transient/session-24.scope; transient)
  Transient: yes
     Active: active (running) since Thu 2026-01-22 13:33:34 UTC; 2h 12min ago
         IO: 0B read, 0B written
      Tasks: 2
     Memory: 1.2M (peak: 4.3M)
        CPU: 322ms
     CGroup: /user.slice/user-42477.slice/session-24.scope
             ├─72734 "sshd-session: ceph-admin [priv]"
             └─72737 "sshd-session: ceph-admin@notty"

Jan 22 13:33:34 compute-2 systemd[1]: Started Session 24 of User ceph-admin.
Jan 22 13:33:34 compute-2 sudo[72738]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Jan 22 13:33:34 compute-2 sudo[72738]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Jan 22 13:33:34 compute-2 sudo[72738]: pam_unix(sudo:session): session closed for user root
Jan 22 13:33:34 compute-2 sudo[72763]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/ls /var/lib/ceph/088fe176-0106-5401-803c-2da38b73b76a/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d
Jan 22 13:33:34 compute-2 sudo[72763]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Jan 22 13:33:34 compute-2 sudo[72763]: pam_unix(sudo:session): session closed for user root

● session-25.scope - Session 25 of User ceph-admin
     Loaded: loaded (/run/systemd/transient/session-25.scope; transient)
  Transient: yes
     Active: active (running) since Thu 2026-01-22 13:33:34 UTC; 2h 12min ago
         IO: 0B read, 0B written
      Tasks: 2
     Memory: 1.2M (peak: 4.5M)
        CPU: 373ms
     CGroup: /user.slice/user-42477.slice/session-25.scope
             ├─72788 "sshd-session: ceph-admin [priv]"
             └─72791 "sshd-session: ceph-admin@notty"

Jan 22 13:33:34 compute-2 systemd[1]: Started Session 25 of User ceph-admin.
Jan 22 13:33:34 compute-2 sudo[72792]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Jan 22 13:33:34 compute-2 sudo[72792]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Jan 22 13:33:34 compute-2 sudo[72792]: pam_unix(sudo:session): session closed for user root
Jan 22 13:33:34 compute-2 sudo[72817]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/mkdir -p /var/lib/ceph/088fe176-0106-5401-803c-2da38b73b76a
Jan 22 13:33:34 compute-2 sudo[72817]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Jan 22 13:33:34 compute-2 sudo[72817]: pam_unix(sudo:session): session closed for user root

● session-26.scope - Session 26 of User ceph-admin
     Loaded: loaded (/run/systemd/transient/session-26.scope; transient)
  Transient: yes
     Active: active (running) since Thu 2026-01-22 13:33:34 UTC; 2h 12min ago
         IO: 0B read, 0B written
      Tasks: 2
     Memory: 1.2M (peak: 4.2M)
        CPU: 337ms
     CGroup: /user.slice/user-42477.slice/session-26.scope
             ├─72842 "sshd-session: ceph-admin [priv]"
             └─72845 "sshd-session: ceph-admin@notty"

Jan 22 13:33:34 compute-2 systemd[1]: Started Session 26 of User ceph-admin.
Jan 22 13:33:34 compute-2 sudo[72846]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Jan 22 13:33:34 compute-2 sudo[72846]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Jan 22 13:33:34 compute-2 sudo[72846]: pam_unix(sudo:session): session closed for user root
Jan 22 13:33:34 compute-2 sudo[72871]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/mkdir -p /tmp/cephadm-088fe176-0106-5401-803c-2da38b73b76a/var/lib/ceph/088fe176-0106-5401-803c-2da38b73b76a
Jan 22 13:33:34 compute-2 sudo[72871]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Jan 22 13:33:34 compute-2 sudo[72871]: pam_unix(sudo:session): session closed for user root

● session-27.scope - Session 27 of User ceph-admin
     Loaded: loaded (/run/systemd/transient/session-27.scope; transient)
  Transient: yes
     Active: active (running) since Thu 2026-01-22 13:33:35 UTC; 2h 12min ago
         IO: 0B read, 0B written
      Tasks: 2
     Memory: 1.2M (peak: 4.4M)
        CPU: 345ms
     CGroup: /user.slice/user-42477.slice/session-27.scope
             ├─72896 "sshd-session: ceph-admin [priv]"
             └─72899 "sshd-session: ceph-admin@notty"

Jan 22 13:33:35 compute-2 systemd[1]: Started Session 27 of User ceph-admin.
Jan 22 13:33:35 compute-2 sudo[72900]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Jan 22 13:33:35 compute-2 sudo[72900]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Jan 22 13:33:35 compute-2 sudo[72900]: pam_unix(sudo:session): session closed for user root
Jan 22 13:33:35 compute-2 sudo[72925]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/touch /tmp/cephadm-088fe176-0106-5401-803c-2da38b73b76a/var/lib/ceph/088fe176-0106-5401-803c-2da38b73b76a/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d.new
Jan 22 13:33:35 compute-2 sudo[72925]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Jan 22 13:33:35 compute-2 sudo[72925]: pam_unix(sudo:session): session closed for user root

● session-28.scope - Session 28 of User ceph-admin
     Loaded: loaded (/run/systemd/transient/session-28.scope; transient)
  Transient: yes
     Active: active (running) since Thu 2026-01-22 13:33:35 UTC; 2h 12min ago
         IO: 0B read, 0B written
      Tasks: 2
     Memory: 1.2M (peak: 4.5M)
        CPU: 331ms
     CGroup: /user.slice/user-42477.slice/session-28.scope
             ├─72950 "sshd-session: ceph-admin [priv]"
             └─72953 "sshd-session: ceph-admin@notty"

Jan 22 13:33:35 compute-2 systemd[1]: Started Session 28 of User ceph-admin.
Jan 22 13:33:35 compute-2 sudo[72954]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Jan 22 13:33:35 compute-2 sudo[72954]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Jan 22 13:33:35 compute-2 sudo[72954]: pam_unix(sudo:session): session closed for user root
Jan 22 13:33:35 compute-2 sudo[72979]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/chown -R ceph-admin /tmp/cephadm-088fe176-0106-5401-803c-2da38b73b76a
Jan 22 13:33:35 compute-2 sudo[72979]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Jan 22 13:33:35 compute-2 sudo[72979]: pam_unix(sudo:session): session closed for user root

● session-29.scope - Session 29 of User ceph-admin
     Loaded: loaded (/run/systemd/transient/session-29.scope; transient)
  Transient: yes
     Active: active (running) since Thu 2026-01-22 13:33:36 UTC; 2h 12min ago
         IO: 0B read, 0B written
      Tasks: 2
     Memory: 1.2M (peak: 4.1M)
        CPU: 331ms
     CGroup: /user.slice/user-42477.slice/session-29.scope
             ├─73004 "sshd-session: ceph-admin [priv]"
             └─73007 "sshd-session: ceph-admin@notty"

Jan 22 13:33:36 compute-2 systemd[1]: Started Session 29 of User ceph-admin.
Jan 22 13:33:36 compute-2 sudo[73008]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Jan 22 13:33:36 compute-2 sudo[73008]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Jan 22 13:33:36 compute-2 sudo[73008]: pam_unix(sudo:session): session closed for user root
Jan 22 13:33:36 compute-2 sudo[73033]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/chmod 644 /tmp/cephadm-088fe176-0106-5401-803c-2da38b73b76a/var/lib/ceph/088fe176-0106-5401-803c-2da38b73b76a/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d.new
Jan 22 13:33:36 compute-2 sudo[73033]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Jan 22 13:33:36 compute-2 sudo[73033]: pam_unix(sudo:session): session closed for user root

● session-30.scope - Session 30 of User ceph-admin
     Loaded: loaded (/run/systemd/transient/session-30.scope; transient)
  Transient: yes
     Active: active (running) since Thu 2026-01-22 13:33:36 UTC; 2h 12min ago
         IO: 0B read, 216.0K written
      Tasks: 2
     Memory: 1.5M (peak: 3.6M)
        CPU: 291ms
     CGroup: /user.slice/user-42477.slice/session-30.scope
             ├─73058 "sshd-session: ceph-admin [priv]"
             └─73061 "sshd-session: ceph-admin@notty"

Jan 22 13:33:36 compute-2 systemd[1]: Started Session 30 of User ceph-admin.

● session-31.scope - Session 31 of User ceph-admin
     Loaded: loaded (/run/systemd/transient/session-31.scope; transient)
  Transient: yes
     Active: active (running) since Thu 2026-01-22 13:33:37 UTC; 2h 12min ago
         IO: 0B read, 0B written
      Tasks: 2
     Memory: 1.2M (peak: 4.0M)
        CPU: 396ms
     CGroup: /user.slice/user-42477.slice/session-31.scope
             ├─73085 "sshd-session: ceph-admin [priv]"
             └─73088 "sshd-session: ceph-admin@notty"

Jan 22 13:33:37 compute-2 systemd[1]: Started Session 31 of User ceph-admin.
Jan 22 13:33:37 compute-2 sudo[73089]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Jan 22 13:33:37 compute-2 sudo[73089]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Jan 22 13:33:37 compute-2 sudo[73089]: pam_unix(sudo:session): session closed for user root
Jan 22 13:33:37 compute-2 sudo[73114]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/mv /tmp/cephadm-088fe176-0106-5401-803c-2da38b73b76a/var/lib/ceph/088fe176-0106-5401-803c-2da38b73b76a/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d.new /var/lib/ceph/088fe176-0106-5401-803c-2da38b73b76a/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d
Jan 22 13:33:37 compute-2 sudo[73114]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Jan 22 13:33:37 compute-2 sudo[73114]: pam_unix(sudo:session): session closed for user root

● session-32.scope - Session 32 of User ceph-admin
     Loaded: loaded (/run/systemd/transient/session-32.scope; transient)
  Transient: yes
     Active: active (running) since Thu 2026-01-22 13:33:37 UTC; 2h 12min ago
         IO: 296.0K read, 1.9G written
      Tasks: 2
     Memory: 1.4G (peak: 1.7G)
        CPU: 4min 59.043s
     CGroup: /user.slice/user-42477.slice/session-32.scope
             ├─73139 "sshd-session: ceph-admin [priv]"
             └─73142 "sshd-session: ceph-admin@notty"

Jan 22 15:45:39 compute-2 sudo[292367]: pam_unix(sudo:session): session closed for user root
Jan 22 15:45:39 compute-2 sudo[292414]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Jan 22 15:45:39 compute-2 sudo[292414]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Jan 22 15:45:39 compute-2 sudo[292414]: pam_unix(sudo:session): session closed for user root
Jan 22 15:45:59 compute-2 sudo[295523]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Jan 22 15:45:59 compute-2 sudo[295523]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Jan 22 15:45:59 compute-2 sudo[295523]: pam_unix(sudo:session): session closed for user root
Jan 22 15:45:59 compute-2 sudo[295565]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Jan 22 15:45:59 compute-2 sudo[295565]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Jan 22 15:45:59 compute-2 sudo[295565]: pam_unix(sudo:session): session closed for user root

● session-51.scope - Session 51 of User zuul
     Loaded: loaded (/run/systemd/transient/session-51.scope; transient)
  Transient: yes
     Active: active (running) since Thu 2026-01-22 15:45:29 UTC; 43s ago
         IO: 394.6M read, 177.9M written
      Tasks: 27
     Memory: 921.6M (peak: 980.6M)
        CPU: 1min 57.693s
     CGroup: /user.slice/user-1000.slice/session-51.scope
             ├─291623 "sshd-session: zuul [priv]"
             ├─291627 "sshd-session: zuul@notty"
             ├─291628 sudo bash -c "rm -rf /var/tmp/sos-osp && mkdir /var/tmp/sos-osp && sos report --batch --all-logs --tmp-dir=/var/tmp/sos-osp  -p container,openstack_edpm,system,storage,virt"
             ├─291652 /usr/bin/python3 -s /sbin/sos report --batch --all-logs --tmp-dir=/var/tmp/sos-osp -p container,openstack_edpm,system,storage,virt
             ├─297596 timeout 15s turbostat --debug sleep 10
             ├─298091 timeout 300s systemctl status --all
             ├─298093 systemctl status --all
             ├─298119 timeout 300s ceph osd pool ls detail --format json-pretty
             ├─298120 /usr/bin/python3 -s /usr/bin/ceph osd pool ls detail --format json-pretty
             ├─298121 timeout 300s tuned-adm active
             └─298122 /usr/bin/python3 -Es /usr/sbin/tuned-adm active

Jan 22 15:45:29 compute-2 systemd[1]: Started Session 51 of User zuul.
Jan 22 15:45:29 compute-2 sudo[291628]:     zuul : PWD=/home/zuul ; USER=root ; COMMAND=/bin/bash -c 'rm -rf /var/tmp/sos-osp && mkdir /var/tmp/sos-osp && sos report --batch --all-logs --tmp-dir=/var/tmp/sos-osp  -p container,openstack_edpm,system,storage,virt'
Jan 22 15:45:29 compute-2 sudo[291628]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Jan 22 15:46:01 compute-2 ovs-appctl[296364]: ovs|00001|daemon_unix|WARN|/var/run/openvswitch/ovs-monitor-ipsec.pid: open: No such file or directory
Jan 22 15:46:01 compute-2 ovs-appctl[296374]: ovs|00001|daemon_unix|WARN|/var/run/openvswitch/ovs-monitor-ipsec.pid: open: No such file or directory
Jan 22 15:46:01 compute-2 ovs-appctl[296378]: ovs|00001|daemon_unix|WARN|/var/run/openvswitch/ovs-monitor-ipsec.pid: open: No such file or directory

○ 65cda04b9c9e71d648ab5510147314c4de15a37ca8d4a48196c50c9ad6ccb44d-4facdc8abbad11c7.service - /usr/bin/podman healthcheck run 65cda04b9c9e71d648ab5510147314c4de15a37ca8d4a48196c50c9ad6ccb44d
     Loaded: loaded (/run/systemd/transient/65cda04b9c9e71d648ab5510147314c4de15a37ca8d4a48196c50c9ad6ccb44d-4facdc8abbad11c7.service; transient)
  Transient: yes
     Active: inactive (dead) since Thu 2026-01-22 15:46:08 UTC; 4s ago
   Duration: 119ms
TriggeredBy: ● 65cda04b9c9e71d648ab5510147314c4de15a37ca8d4a48196c50c9ad6ccb44d-4facdc8abbad11c7.timer
    Process: 297782 ExecStart=/usr/bin/podman healthcheck run 65cda04b9c9e71d648ab5510147314c4de15a37ca8d4a48196c50c9ad6ccb44d (code=exited, status=0/SUCCESS)
   Main PID: 297782 (code=exited, status=0/SUCCESS)
        CPU: 140ms

Jan 22 15:46:08 compute-2 podman[297782]: 2026-01-22 15:46:08.037972221 +0000 UTC m=+0.086500367 container health_status 65cda04b9c9e71d648ab5510147314c4de15a37ca8d4a48196c50c9ad6ccb44d (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=ovn_metadata_agent, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, config_data={'cgroupns': 'host', 'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'EDPM_CONFIG_HASH': '109b2e65a809d9df2b2d81c602046702b988fc7a594c944e65d89c0e3a64ae71-32177f2c3fa09030b0d1ae5cc46811ab0cd45ff7cf090b1a287b538f8d13e58d-32177f2c3fa09030b0d1ae5cc46811ab0cd45ff7cf090b1a287b538f8d13e58d-32177f2c3fa09030b0d1ae5cc46811ab0cd45ff7cf090b1a287b538f8d13e58d-0823bd3e096c75f72e4a95820d41b0d4b6a1172bd2892ddb9f29b788a11bc87d'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_metadata_agent', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified', 'net': 'host', 'pid': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/run/openvswitch:/run/openvswitch:z', '/var/lib/openstack/neutron-ovn-metadata-agent:/etc/neutron.conf.d:z', '/run/netns:/run/netns:shared', '/var/lib/kolla/config_files/ovn_metadata_agent.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/neutron:/var/lib/neutron:shared,z', '/var/lib/neutron/ovn_metadata_haproxy_wrapper:/usr/local/bin/haproxy:ro', '/var/lib/neutron/kill_scripts:/etc/neutron/kill_scripts:ro', '/var/lib/openstack/cacerts/neutron-metadata/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tlsUnit apparmor.service could not be found.
Unit apt-daily.service could not be found.
.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_metadata_agent:/openstack:ro,z']}, io.buildah.version=1.41.3, tcib_build_tag=c3923531bcda0b0811b2d5053f189beb, org.label-schema.build-date=20251202, org.label-schema.vendor=CentOS, config_id=ovn_metadata_agent, managed_by=edpm_ansible, tcib_managed=true, container_name=ovn_metadata_agent, maintainer=OpenStack Kubernetes Operator team, org.label-schema.license=GPLv2)

○ 8eec14eed05eebd169934b14ad23738a7c696fad3b7be75ce9a652966539c356-44e4d69ad703dadb.service - /usr/bin/podman healthcheck run 8eec14eed05eebd169934b14ad23738a7c696fad3b7be75ce9a652966539c356
     Loaded: loaded (/run/systemd/transient/8eec14eed05eebd169934b14ad23738a7c696fad3b7be75ce9a652966539c356-44e4d69ad703dadb.service; transient)
  Transient: yes
     Active: inactive (dead) since Thu 2026-01-22 15:45:57 UTC; 15s ago
   Duration: 228ms
TriggeredBy: ● 8eec14eed05eebd169934b14ad23738a7c696fad3b7be75ce9a652966539c356-44e4d69ad703dadb.timer
    Process: 295071 ExecStart=/usr/bin/podman healthcheck run 8eec14eed05eebd169934b14ad23738a7c696fad3b7be75ce9a652966539c356 (code=exited, status=0/SUCCESS)
   Main PID: 295071 (code=exited, status=0/SUCCESS)
        CPU: 166ms

Jan 22 15:45:57 compute-2 podman[295071]: 2026-01-22 15:45:57.148557003 +0000 UTC m=+0.188369860 container health_status 8eec14eed05eebd169934b14ad23738a7c696fad3b7be75ce9a652966539c356 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20251202, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, tcib_build_tag=c3923531bcda0b0811b2d5053f189beb, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'EDPM_CONFIG_HASH': '109b2e65a809d9df2b2d81c602046702b988fc7a594c944e65d89c0e3a64ae71-32177f2c3fa09030b0d1ae5cc46811ab0cd45ff7cf090b1a287b538f8d13e58d-32177f2c3fa09030b0d1ae5cc46811ab0cd45ff7cf090b1a287b538f8d13e58d-32177f2c3fa09030b0d1ae5cc46811ab0cd45ff7cf090b1a287b538f8d13e58d'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, managed_by=edpm_ansible, org.label-schema.vendor=CentOS, tcib_managed=true, config_id=ovn_controller, container_name=ovn_controller, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team)

● auditd.service - Security Auditing Service
     Loaded: loaded (/usr/lib/systemd/system/auditd.service; enabled; preset: enabled)
     Active: active (running) since Thu 2026-01-22 12:49:14 UTC; 2h 56min ago
       Docs: man:auditd(8)
             https://github.com/linux-audit/audit-documentation
   Main PID: 699 (auditd)
         IO: 0B read, 28.2M written
      Tasks: 4 (limit: 48560)
     Memory: 18.3M (peak: 19.2M)
        CPU: 6.565s
     CGroup: /system.slice/auditd.service
             ├─699 /sbin/auditd
             └─701 /usr/sbin/sedispatch

Jan 22 12:49:14 localhost augenrules[719]: failure 1
Jan 22 12:49:14 localhost augenrules[719]: pid 699
Jan 22 12:49:14 localhost augenrules[719]: rUnit auto-cpufreq.service could not be found.
Unit autofs.service could not be found.
ate_limit 0
Jan 22 12:49:14 localhost augenrules[719]: backlog_limit 8192
Jan 22 12:49:14 localhost augenrules[719]: lost 0
Jan 22 12:49:14 localhost augenrules[719]: backlog 2
Jan 22 12:49:14 localhost augenrules[719]: backlog_wait_time 60000
Jan 22 12:49:14 localhost augenrules[719]: backlog_wait_time_actual 0
Jan 22 12:49:14 localhost systemd[1]: Started Security Auditing Service.
Jan 22 13:51:10 compute-2 auditd[699]: Audit daemon rotating log files

○ auth-rpcgss-module.service - Kernel Module supporting RPCSEC_GSS
     Loaded: loaded (/usr/lib/systemd/system/auth-rpcgss-module.service; static)
     Active: inactive (dead)
  Condition: start condition failed at Thu 2026-01-22 12:49:14 UTC; 2h 56min ago

○ blk-availability.service - Availability of block devices
     Loaded: loaded (/usr/lib/systemd/system/blk-availability.service; disabled; preset: disabled)
     Active: inactive (dead)

● ceph-088fe176-0106-5401-803c-2da38b73b76a@crash.compute-2.service - Ceph crash.compute-2 for 088fe176-0106-5401-803c-2da38b73b76a
     Loaded: loaded (/etc/systemd/system/ceph-088fe176-0106-5401-803c-2da38b73b76a@.service; enabled; preset: disabled)
     Active: active (running) since Thu 2026-01-22 13:35:52 UTC; 2h 10min ago
   Main PID: 77792 (conmon)
         IO: 0B read, 1.1M written
      Tasks: 3 (limit: 48560)
     Memory: 12.2M (peak: 49.4M)
        CPU: 873ms
     CGroup: /system.slice/system-ceph\x2d088fe176\x2d0106\x2d5401\x2d803c\x2d2da38b73b76a.slice/ceph-088fe176-0106-5401-803c-2da38b73b76a@crash.compute-2.service
             ├─libpod-payload-52f09a99f1b294dc32194bfc1ab7f2d1320bd9205c0632fb77a4b4dfb25dbf93
             │ ├─77794 /run/podman-init -- /usr/bin/ceph-crash -n client.crash.compute-2
             │ └─77796 /usr/bin/python3 -s /usr/bin/ceph-crash -n client.crash.compute-2
             └─runtime
               └─77792 /usr/bin/conmon --api-version 1 -c 52f09a99f1b294dc32194bfc1ab7f2d1320bd9205c0632fb77a4b4dfb25dbf93 -u 52f09a99f1b294dc32194bfc1ab7f2d1320bd9205c0632fb77a4b4dfb25dbf93 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/52f09a99f1b294dc32194bfc1ab7f2d1320bd9205c0632fb77a4b4dfb25dbf93/userdata -p /run/containers/storage/overlay-containers/52f09a99f1b294dc32194bfc1ab7f2d1320bd9205c0632fb77a4b4dfb25dbf93/userdata/pidfile -n ceph-088fe176-0106-5401-803c-2da38b73b76a-crash-compute-2 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/52f09a99f1b294dc32194bfc1ab7f2d1320bd9205c0632fb77a4b4dfb25dbf93 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/52f09a99f1b294dc32194bfc1ab7f2d1320bd9205c0632fb77a4b4dfb25dbf93/userdata/oci-log --conmon-pidfile /run/ceph-088fe176-0106-5401-803c-2da38b73b76a@crash.compute-2.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 52f09a99f1b294dc32194bfc1ab7f2d1320bd9205c0632fb77a4b4dfb25dbf93

Jan 22 13:35:51 compute-2 ceph-088fe176-0106-5401-803c-2da38b73b76a-crash-compute-2[77792]: 2026-01-22T13:35:51.831+0000 7fd4e3898640 -1 auth: unable to find a keyring on /etc/ceph/ceph.client.admin.keyring,/etc/ceph/ceph.keyring,/etc/ceph/keyring,/etc/ceph/keyring.bin: (2) No such file or directory
Jan 22 13:35:51 compute-2 ceph-088fe176-0106-5401-803c-2da38b73b76a-crash-compute-2[77792]: 2026-01-22T13:35:51.831+0000 7fd4e3898640 -1 AuthRegistry(0x7fd4e3897000) no keyring found at /etc/ceph/ceph.client.admin.keyring,/etc/ceph/ceph.keyring,/etc/ceph/keyring,/etc/ceph/keyring.bin, disabling cephx
Jan 22 13:35:51 compute-2 ceph-088fe176-0106-5401-803c-2da38b73b76a-crash-compute-2[77792]: 2026-01-22T13:35:51.833+0000 7fd4e0e0c640 -1 monclient(hunting): handle_auth_bad_method server allowed_methods [2] but i only support [1]
Jan 22 13:35:51 compute-2 ceph-088fe176-0106-5401-803c-2da38b73b76a-crash-compute-2[77792]: 2026-01-22T13:35:51.834+0000 7fd4e160d640 -1 monclient(hunting): handle_auth_bad_method server allowed_methods [2] but i only support [1]
Jan 22 13:35:51 compute-2 ceph-088fe176-0106-5401-803c-2da38b73b76a-crash-compute-2[77792]: 2026-01-22T13:35:51.834+0000 7fd4e1e0e640 -1 monclient(hunting): handle_auth_bad_method server allowed_methods [2] but i only support [1]
Jan 22 13:35:51 compute-2 ceph-088fe176-0106-5401-803c-2da38b73b76a-crash-compute-2[77792]: 2026-01-22T13:35:51.834+0000 7fd4e3898640 -1 monclient: authenticate NOTE: no keyring found; disabled cephx authentication
Jan 22 13:35:51 compute-2 ceph-088fe176-0106-5401-803c-2da38b73b76a-crash-compute-2[77792]: [errno 13] RADOS permission denied (error connecting to the cluster)
Jan 22 13:35:51 compute-2 ceph-088fe176-0106-5401-803c-2da38b73b76a-crash-compute-2[77792]: INFO:ceph-crash:monitoring path /var/lib/ceph/crash, delay 600s
Jan 22 13:35:52 compute-2 bash[77776]: 52f09a99f1b294dc32194bfc1ab7f2d1320bd9205c0632fb77a4b4dfb25dbf93
Jan 22 13:35:52 compute-2 systemd[1]: Started Ceph crash.compute-2 for 088fe176-0106-5401-803c-2da38b73b76a.

● ceph-088fe176-0106-5401-803c-2da38b73b76a@haproxy.rgw.default.compute-2.zogxki.service - Ceph haproxy.rgw.default.compute-2.zogxki for 088fe176-0106-5401-803c-2da38b73b76a
     Loaded: loaded (/etc/systemd/system/ceph-088fe176-0106-5401-803c-2da38b73b76a@.service; enabled; preset: disabled)
     Active: active (running) since Thu 2026-01-22 13:37:29 UTC; 2h 8min ago
   Main PID: 82538 (conmon)
         IO: 0B read, 862.5K written
      Tasks: 11 (limit: 48560)
     Memory: 6.0M (peak: 19.9M)
        CPU: 12.040s
     CGroup: /system.slice/system-ceph\x2d088fe176\x2d0106\x2d5401\x2d803c\x2d2da38b73b76a.slice/ceph-088fe176-0106-5401-803c-2da38b73b76a@haproxy.rgw.default.compute-2.zogxki.service
             ├─libpod-payload-ff608106d7c871852a462621c4b38466f0a089e42add90baa06df30604a36e5f
             │ ├─82540 /run/podman-init -- docker-entrypoint.sh haproxy -f /var/lib/haproxy/haproxy.cfg
             │ ├─82542 haproxy -W -db -f /var/lib/haproxy/haproxy.cfg
             │ └─82544 haproxy -W -db -f /var/lib/haproxy/haproxy.cfg
             └─runtime
               └─82538 /usr/bin/conmon --api-version 1 -c ff608106d7c871852a462621c4b38466f0a089e42add90baa06df30604a36e5f -u ff608106d7c871852a462621c4b38466f0a089e42add90baa06df30604a36e5f -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/ff608106d7c871852a462621c4b38466f0a089e42add90baa06df30604a36e5f/userdata -p /run/containers/storage/overlay-containers/ff608106d7c871852a462621c4b38466f0a089e42add90baa06df30604a36e5f/userdata/pidfile -n ceph-088fe176-0106-5401-803c-2da38b73b76a-haproxy-rgw-default-compute-2-zogxki --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/ff608106d7c871852a462621c4b38466f0a089e42add90baa06df30604a36e5f --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/ff608106d7c871852a462621c4b38466f0a089e42add90baa06df30604a36e5f/userdata/oci-log --conmon-pidfile /run/ceph-088fe176-0106-5401-803c-2da38b73b76a@haproxy.rgw.default.compute-2.zogxki.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg ff608106d7c871852a462621c4b38466f0a089e42add90baa06df30604a36e5f

Jan 22 13:37:28 compute-2 systemd[1]: Starting Ceph haproxy.rgw.default.compute-2.zogxki for 088fe176-0106-5401-803c-2da38b73b76a...
Jan 22 13:37:28 compute-2 podman[82513]: 2026-01-22 13:37:28.303426176 +0000 UTC m=+0.021754869 image pull e85424b0d443f37ddd2dd8a3bb2ef6f18dd352b987723a921b64289023af2914 quay.io/ceph/haproxy:2.3
Jan 22 13:37:28 compute-2 podman[82513]: 2026-01-22 13:37:28.507057928 +0000 UTC m=+0.225386601 container create ff608106d7c871852a462621c4b38466f0a089e42add90baa06df30604a36e5f (image=quay.io/ceph/haproxy:2.3, name=ceph-088fe176-0106-5401-803c-2da38b73b76a-haproxy-rgw-default-compute-2-zogxki)
Jan 22 13:37:29 compute-2 podman[82513]: 2026-01-22 13:37:29.014283463 +0000 UTC m=+0.732612166 container init ff608106d7c871852a462621c4b38466f0a089e42add90baa06df30604a36e5f (image=quay.io/ceph/haproxy:2.3, name=ceph-088fe176-0106-5401-803c-2da38b73b76a-haproxy-rgw-default-compute-2-zogxki)
Jan 22 13:37:29 compute-2 podman[82513]: 2026-01-22 13:37:29.020564172 +0000 UTC m=+0.738892845 container start ff608106d7c871852a462621c4b38466f0a089e42add90baa06df30604a36e5f (image=quay.io/ceph/haproxy:2.3, name=ceph-088fe176-0106-5401-803c-2da38b73b76a-haproxy-rgw-default-compute-2-zogxki)
Jan 22 13:37:29 compute-2 bash[82513]: ff608106d7c871852a462621c4b38466f0a089e42add90baa06df30604a36e5f
Jan 22 13:37:29 compute-2 systemd[1]: Started Ceph haproxy.rgw.default.compute-2.zogxki for 088fe176-0106-5401-803c-2da38b73b76a.
Jan 22 13:37:29 compute-2 ceph-088fe176-0106-5401-803c-2da38b73b76a-haproxy-rgw-default-compute-2-zogxki[82538]: [NOTICE] 021/133729 (2) : New worker #1 (4) forked

● ceph-088fe176-0106-5401-803c-2da38b73b76a@keepalived.rgw.default.compute-2.xbsrtt.service - Ceph keepalived.rgw.default.compute-2.xbsrtt for 088fe176-0106-5401-803c-2da38b73b76a
     Loaded: loaded (/etc/systemd/system/ceph-088fe176-0106-5401-803c-2da38b73b76a@.service; enabled; preset: disabled)
     Active: active (running) since Thu 2026-01-22 13:37:44 UTC; 2h 8min ago
   Main PID: 82992 (conmon)
         IO: 0B read, 171.5K written
      Tasks: 4 (limit: 48560)
     Memory: 2.9M (peak: 19.3M)
        CPU: 36.747s
     CGroup: /system.slice/system-ceph\x2d088fe176\x2d0106\x2d5401\x2d803c\x2d2da38b73b76a.slice/ceph-088fe176-0106-5401-803c-2da38b73b76a@keepalived.rgw.default.compute-2.xbsrtt.service
             ├─libpod-payload-6667054e4ceb45c7be5e11486852d0790d9219015e6bca7cdf08e071806b9af4
             │ ├─82994 /run/podman-init -- ./init.sh
             │ ├─82996 /usr/sbin/keepalived -n -l -f /etc/keepalived/keepalived.conf
             │ └─82998 /usr/sbin/keepalived -n -l -f /etc/keepalived/keepalived.conf
             └─runtime
               └─82992 /usr/bin/conmon --api-version 1 -c 6667054e4ceb45c7be5e11486852d0790d9219015e6bca7cdf08e071806b9af4 -u 6667054e4ceb45c7be5e11486852d0790d9219015e6bca7cdf08e071806b9af4 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/6667054e4ceb45c7be5e11486852d0790d9219015e6bca7cdf08e071806b9af4/userdata -p /run/containers/storage/overlay-containers/6667054e4ceb45c7be5e11486852d0790d9219015e6bca7cdf08e071806b9af4/userdata/pidfile -n ceph-088fe176-0106-5401-803c-2da38b73b76a-keepalived-rgw-default-compute-2-xbsrtt --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/6667054e4ceb45c7be5e11486852d0790d9219015e6bca7cdf08e071806b9af4 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/6667054e4ceb45c7be5e11486852d0790d9219015e6bca7cdf08e071806b9af4/userdata/oci-log --conmon-pidfile /run/ceph-088fe176-0106-5401-803c-2da38b73b76a@keepalived.rgw.default.compute-2.xbsrtt.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 6667054e4ceb45c7be5e11486852d0790d9219015e6bca7cdf08e071806b9af4

Jan 22 13:37:44 compute-2 ceph-088fe176-0106-5401-803c-2da38b73b76a-keepalived-rgw-default-compute-2-xbsrtt[82992]: Thu Jan 22 13:37:44 2026: Command line: '/usr/sbin/keepalived' '-n' '-l' '-f' '/etc/keepalived/keepalived.conf'
Jan 22 13:37:44 compute-2 ceph-088fe176-0106-5401-803c-2da38b73b76a-keepalived-rgw-default-compute-2-xbsrtt[82992]: Thu Jan 22 13:37:44 2026: Configuration file /etc/keepalived/keepalived.conf
Jan 22 13:37:44 compute-2 ceph-088fe176-0106-5401-803c-2da38b73b76a-keepalived-rgw-default-compute-2-xbsrtt[82992]: Thu Jan 22 13:37:44 2026: NOTICE: setting config option max_auto_priority should result in better keepalived performance
Jan 22 13:37:44 compute-2 ceph-088fe176-0106-5401-803c-2da38b73b76a-keepalived-rgw-default-compute-2-xbsrtt[82992]: Thu Jan 22 13:37:44 2026: Starting VRRP child process, pid=4
Jan 22 13:37:44 compute-2 ceph-088fe176-0106-5401-803c-2da38b73b76a-keepalived-rgw-default-compute-2-xbsrtt[82992]: Thu Jan 22 13:37:44 2026: Startup complete
Jan 22 13:37:44 compute-2 ceph-088fe176-0106-5401-803c-2da38b73b76a-keepalived-rgw-default-compute-2-xbsrtt[82992]: Thu Jan 22 13:37:44 2026: (VI_0) Entering BACKUP STATE (init)
Jan 22 13:37:44 compute-2 ceph-088fe176-0106-5401-803c-2da38b73b76a-keepalived-rgw-default-compute-2-xbsrtt[82992]: Thu Jan 22 13:37:44 2026: VRRP_Script(check_backend) succeeded
Jan 22 13:37:47 compute-2 ceph-088fe176-0106-5401-803c-2da38b73b76a-keepalived-rgw-default-compute-2-xbsrtt[82992]: Thu Jan 22 13:37:47 2026: (VI_0) Entering MASTER STATE
Jan 22 13:37:47 compute-2 ceph-088fe176-0106-5401-803c-2da38b73b76a-keepalived-rgw-default-compute-2-xbsrtt[82992]: Thu Jan 22 13:37:47 2026: (VI_0) Master received advert from 192.168.122.100 with higher priority 100, ours 90
Jan 22 13:37:47 compute-2 ceph-088fe176-0106-5401-803c-2da38b73b76a-keepalived-rgw-default-compute-2-xbsrtt[82992]: Thu Jan 22 13:37:47 2026: (VI_0) Entering BACKUP STATE

● ceph-088fe176-0106-5401-803c-2da38b73b76a@mds.cephfs.compute-2.zycvef.service - Ceph mds.cephfs.compute-2.zycvef for 088fe176-0106-5401-803c-2da38b73b76a
     Loaded: loaded (/etc/systemd/system/ceph-088fe176-0106-5401-803c-2da38b73b76a@.service; enabled; preset: disabled)
     Active: active (running) since Thu 2026-01-22 13:36:49 UTC; 2h 9min ago
   Main PID: 81150 (conmon)
         IO: 0B read, 204.5K written
      Tasks: 28 (limit: 48560)
     Memory: 28.1M (peak: 28.9M)
        CPU: 13.643s
     CGroup: /system.slice/system-ceph\x2d088fe176\x2d0106\x2d5401\x2d803c\x2d2da38b73b76a.slice/ceph-088fe176-0106-5401-803c-2da38b73b76a@mds.cephfs.compute-2.zycvef.service
             ├─libpod-payload-28402c8a6e0adf22561a923d42802647af00df10eacceb300a94fe8b5f18bf63
             │ ├─81152 /run/podman-init -- /usr/bin/ceph-mds -n mds.cephfs.compute-2.zycvef -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ └─81154 /usr/bin/ceph-mds -n mds.cephfs.compute-2.zycvef -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             └─runtime
               └─81150 /usr/bin/conmon --api-version 1 -c 28402c8a6e0adf22561a923d42802647af00df10eacceb300a94fe8b5f18bf63 -u 28402c8a6e0adf22561a923d42802647af00df10eacceb300a94fe8b5f18bf63 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/28402c8a6e0adf22561a923d42802647af00df10eacceb300a94fe8b5f18bf63/userdata -p /run/containers/storage/overlay-containers/28402c8a6e0adf22561a923d42802647af00df10eacceb300a94fe8b5f18bf63/userdata/pidfile -n ceph-088fe176-0106-5401-803c-2da38b73b76a-mds-cephfs-compute-2-zycvef --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/28402c8a6e0adf22561a923d42802647af00df10eacceb300a94fe8b5f18bf63 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/28402c8a6e0adf22561a923d42802647af00df10eacceb300a94fe8b5f18bf63/userdata/oci-log --conmon-pidfile /run/ceph-088fe176-0106-5401-803c-2da38b73b76a@mds.cephfs.compute-2.zycvef.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 28402c8a6e0adf22561a923d42802647af00df10eacceb300a94fe8b5f18bf63

Jan 22 15:45:39 compute-2 ceph-mds[81154]: mds.cephfs.compute-2.zycvef asok_command: dump loads {prefix=dump loads} (starting...)
Jan 22 15:45:39 compute-2 ceph-mds[81154]: mds.cephfs.compute-2.zycvef asok_command: dump tree {prefix=dump tree,root=/} (starting...)
Jan 22 15:45:39 compute-2 ceph-mds[81154]: mds.cephfs.compute-2.zycvef asok_command: dump_blocked_ops {prefix=dump_blocked_ops} (starting...)
Jan 22 15:45:39 compute-2 ceph-mds[81154]: mds.cephfs.compute-2.zycvef asok_command: dump_historic_ops {prefix=dump_historic_ops} (starting...)
Jan 22 15:45:39 compute-2 ceph-mds[81154]: mds.cephfs.compute-2.zycvef asok_command: dump_historic_ops_by_duration {prefix=dump_historic_ops_by_duration} (starting...)
Jan 22 15:45:40 compute-2 ceph-mds[81154]: mds.cephfs.compute-2.zycvef asok_command: dump_ops_in_flight {prefix=dump_ops_in_flight} (starting...)
Jan 22 15:45:40 compute-2 ceph-mds[81154]: mds.cephfs.compute-2.zycvef asok_command: get subtrees {prefix=get subtrees} (starting...)
Jan 22 15:45:40 compute-2 ceph-mds[81154]: mds.cephfs.compute-2.zycvef asok_command: ops {prefix=ops} (starting...)
Jan 22 15:45:41 compute-2 ceph-mds[81154]: mds.cephfs.compute-2.zycvef asok_command: session ls {prefix=session ls} (starting...)
Jan 22 15:45:41 compute-2 ceph-mds[81154]: mds.cephfs.compute-2.zycvef asok_command: status {prefix=status} (starting...)

● ceph-088fe176-0106-5401-803c-2da38b73b76a@mgr.compute-2.tjdsdx.service - Ceph mgr.compute-2.tjdsdx for 088fe176-0106-5401-803c-2da38b73b76a
     Loaded: loaded (/etc/systemd/system/ceph-088fe176-0106-5401-803c-2da38b73b76a@.service; enabled; preset: disabled)
     Active: active (running) since Thu 2026-01-22 13:35:43 UTC; 2h 10min ago
   Main PID: 77434 (conmon)
         IO: 0B read, 225.0K written
      Tasks: 23 (limit: 48560)
     Memory: 479.6M (peak: 479.8M)
        CPU: 31.109s
     CGroup: /system.slice/system-ceph\x2d088fe176\x2d0106\x2d5401\x2d803c\x2d2da38b73b76a.slice/ceph-088fe176-0106-5401-803c-2da38b73b76a@mgr.compute-2.tjdsdx.service
             ├─libpod-payload-3f48eeed4688717dc1b70b826cbb76219abc8f1d02edfa4f514b989747c1506f
             │ ├─77436 /run/podman-init -- /usr/bin/ceph-mgr -n mgr.compute-2.tjdsdx -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ └─77438 /usr/bin/ceph-mgr -n mgr.compute-2.tjdsdx -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             └─runtime
               └─77434 /usr/bin/conmon --api-version 1 -c 3f48eeed4688717dc1b70b826cbb76219abc8f1d02edfa4f514b989747c1506f -u 3f48eeed4688717dc1b70b826cbb76219abc8f1d02edfa4f514b989747c1506f -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/3f48eeed4688717dc1b70b826cbb76219abc8f1d02edfa4f514b989747c1506f/userdata -p /run/containers/storage/overlay-containers/3f48eeed4688717dc1b70b826cbb76219abc8f1d02edfa4f514b989747c1506f/userdata/pidfile -n ceph-088fe176-0106-5401-803c-2da38b73b76a-mgr-compute-2-tjdsdx --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/3f48eeed4688717dc1b70b826cbb76219abc8f1d02edfa4f514b989747c1506f --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/3f48eeed4688717dc1b70b826cbb76219abc8f1d02edfa4f514b989747c1506f/userdata/oci-log --conmon-pidfile /run/ceph-088fe176-0106-5401-803c-2da38b73b76a@mgr.compute-2.tjdsdx.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 3f48eeed4688717dc1b70b826cbb76219abc8f1d02edfa4f514b989747c1506f

Jan 22 13:36:04 compute-2 ceph-mgr[77438]: client.0 ms_handle_reset on v2:192.168.122.100:6800/1334415348
Jan 22 13:36:05 compute-2 ceph-mgr[77438]: client.0 ms_handle_reset on v2:192.168.122.100:6800/1334415348
Jan 22 13:36:06 compute-2 ceph-mgr[77438]: client.0 ms_handle_reset on v2:192.168.122.100:6800/1334415348
Jan 22 13:51:07 compute-2 ceph-mgr[77438]: client.0 ms_handle_reset on v2:192.168.122.100:6800/1334415348
Jan 22 14:21:07 compute-2 ceph-mgr[77438]: client.0 ms_handle_reset on v2:192.168.122.100:6800/1334415348
Jan 22 14:36:07 compute-2 ceph-mgr[77438]: client.0 ms_handle_reset on v2:192.168.122.100:6800/1334415348
Jan 22 14:51:07 compute-2 ceph-mgr[77438]: client.0 ms_handle_reset on v2:192.168.122.100:6800/1334415348
Jan 22 15:06:07 compute-2 ceph-mgr[77438]: client.0 ms_handle_reset on v2:192.168.122.100:6800/1334415348
Jan 22 15:21:07 compute-2 ceph-mgr[77438]: client.0 ms_handle_reset on v2:192.168.122.100:6800/1334415348
Jan 22 15:36:07 compute-2 ceph-mgr[77438]: client.0 ms_handle_reset on v2:192.168.122.100:6800/1334415348

● ceph-088fe176-0106-5401-803c-2da38b73b76a@mon.compute-2.service - Ceph mon.compute-2 for 088fe176-0106-5401-803c-2da38b73b76a
     Loaded: loaded (/etc/systemd/system/ceph-088fe176-0106-5401-803c-2da38b73b76a@.service; enabled; preset: disabled)
     Active: active (running) since Thu 2026-01-22 13:35:28 UTC; 2h 10min ago
   Main PID: 77077 (conmon)
         IO: 0B read, 1.3G written
      Tasks: 27 (limit: 48560)
     Memory: 173.8M (peak: 177.1M)
        CPU: 1min 46.741s
     CGroup: /system.slice/system-ceph\x2d088fe176\x2d0106\x2d5401\x2d803c\x2d2da38b73b76a.slice/ceph-088fe176-0106-5401-803c-2da38b73b76a@mon.compute-2.service
             ├─libpod-payload-ad3fee4799b44f9e04b5aa9968630e9af6ffd410d7fc49c4207495984cf6bca6
             │ ├─77079 /run/podman-init -- /usr/bin/ceph-mon -n mon.compute-2 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false
             │ └─77081 /usr/bin/ceph-mon -n mon.compute-2 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false
             └─runtime
               └─77077 /usr/bin/conmon --api-version 1 -c ad3fee4799b44f9e04b5aa9968630e9af6ffd410d7fc49c4207495984cf6bca6 -u ad3fee4799b44f9e04b5aa9968630e9af6ffd410d7fc49c4207495984cf6bca6 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/ad3fee4799b44f9e04b5aa9968630e9af6ffd410d7fc49c4207495984cf6bca6/userdata -p /run/containers/storage/overlay-containers/ad3fee4799b44f9e04b5aa9968630e9af6ffd410d7fc49c4207495984cf6bca6/userdata/pidfile -n ceph-088fe176-0106-5401-803c-2da38b73b76a-mon-compute-2 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/ad3fee4799b44f9e04b5aa9968630e9af6ffd410d7fc49c4207495984cf6bca6 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/ad3fee4799b44f9e04b5aa9968630e9af6ffd410d7fc49c4207495984cf6bca6/userdata/oci-log --conmon-pidfile /run/ceph-088fe176-0106-5401-803c-2da38b73b76a@mon.compute-2.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg ad3fee4799b44f9e04b5aa9968630e9af6ffd410d7fc49c4207495984cf6bca6

Jan 22 15:46:12 compute-2 ceph-mon[77081]: from='client.? 192.168.122.100:0/2788111332' entity='client.admin' cmd=[{"prefix": "osd pool ls", "detail": "detail", "format": "json-pretty"}]: dispatch
Jan 22 15:46:12 compute-2 ceph-mon[77081]: from='client.29578 -' entity='client.admin' cmd=[{"prefix": "osd df", "output_method": "tree", "target": ["mon-mgr", ""], "format": "json-pretty"}]: dispatch
Jan 22 15:46:12 compute-2 ceph-mon[77081]: from='client.? 192.168.122.100:0/2621515414' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json-pretty"}]: dispatch
Jan 22 15:46:12 compute-2 ceph-mon[77081]: pgmap v4175: 305 pgs: 2 active+clean+laggy, 303 active+clean; 882 MiB data, 652 MiB used, 20 GiB / 21 GiB avail
Jan 22 15:46:12 compute-2 ceph-mon[77081]: from='client.29584 -' entity='client.admin' cmd=[{"prefix": "osd df", "target": ["mon-mgr", ""], "format": "json-pretty"}]: dispatch
Jan 22 15:46:12 compute-2 ceph-mon[77081]: from='client.? 192.168.122.101:0/683533908' entity='client.admin' cmd=[{"prefix": "time-sync-status", "format": "json-pretty"}]: dispatch
Jan 22 15:46:12 compute-2 ceph-mon[77081]: from='client.? 192.168.122.102:0/3630248537' entity='client.admin' cmd=[{"prefix": "osd dump", "format": "json-pretty"}]: dispatch
Jan 22 15:46:12 compute-2 ceph-mon[77081]: 62 slow requests (by type [ 'delayed' : 62 ] most affected pool [ 'vms' : 37 ])
Jan 22 15:46:12 compute-2 ceph-mon[77081]: Health check update: 62 slow ops, oldest one blocked for 7763 sec, osd.2 has slow ops (SLOW_OPS)
Jan 22 15:46:12 compute-2 ceph-mon[77081]: from='client.? 192.168.122.102:0/1564577597' entity='client.admin' cmd=[{"prefix": "osd numa-status", "format": "json-pretty"}]: dispatch

● ceph-088fe176-0106-5401-803c-2da38b73b76a@osd.2.service - Ceph osd.2 for 088fe176-0106-5401-803c-2da38b73b76a
     Loaded: loaded (/etc/systemd/system/ceph-088fe176-0106-5401-803c-2da38b73b76a@.service; enabled; preset: disabled)
     Active: active (running) since Thu 2026-01-22 13:36:29 UTC; 2h 9min ago
   Main PID: 79775 (conmon)
         IO: 112.4M read, 2.2G written
      Tasks: 60 (limit: 48560)
     Memory: 667.5M (peak: 769.6M)
        CPU: 1min 19.509s
     CGroup: /system.slice/system-ceph\x2d088fe176\x2d0106\x2d5401\x2d803c\x2d2da38b73b76a.slice/ceph-088fe176-0106-5401-803c-2da38b73b76a@osd.2.service
             ├─libpod-payload-1f90ecb4fcc015bd1f2f979a5a563080acb2d28030758941d6958f2336c7101d
             │ ├─79777 /run/podman-init -- /usr/bin/ceph-osd -n osd.2 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ └─79779 /usr/bin/ceph-osd -n osd.2 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             └─runtime
               └─79775 /usr/bin/conmon --api-version 1 -c 1f90ecb4fcc015bd1f2f979a5a563080acb2d28030758941d6958f2336c7101d -u 1f90ecb4fcc015bd1f2f979a5a563080acb2d28030758941d6958f2336c7101d -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/1f90ecb4fcc015bd1f2f979a5a563080acb2d28030758941d6958f2336c7101d/userdata -p /run/containers/storage/overlay-containers/1f90ecb4fcc015bd1f2f979a5a563080acb2d28030758941d6958f2336c7101d/userdata/pidfile -n ceph-088fe176-0106-5401-803c-2da38b73b76a-osd-2 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/1f90ecb4fcc015bd1f2f979a5a563080acb2d28030758941d6958f2336c7101d --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/1f90ecb4fcc015bd1f2f979a5a563080acb2d28030758941d6958f2336c7101d/userdata/oci-log --conmon-pidfile /run/ceph-088fe176-0106-5401-803c-2da38b73b76a@osd.2.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 1f90ecb4fcc015bd1f2f979a5a563080acb2d28030758941d6958f2336c7101d

Jan 22 15:46:08 compute-2 ceph-osd[79779]: log_channel(cluster) log [WRN] : 62 slow requests (by type [ 'delayed' : 62 ] most affected pool [ 'vms' : 37 ])
Jan 22 15:46:09 compute-2 ceph-088fe176-0106-5401-803c-2da38b73b76a-osd-2[79775]: 2026-01-22T15:46:09.815+0000 7f47f8ed4640 -1 osd.2 183 get_health_metrics reporting 62 slow ops, oldest is osd_op(client.14140.0:10 2.12 2:4e99cc3e:::rbd_mirror_snapshot_schedule:head [omap-get-vals in=16b] snapc 0=[] ondisk+read+known_if_redirected+supports_pool_eio e50)
Jan 22 15:46:09 compute-2 ceph-osd[79779]: osd.2 183 get_health_metrics reporting 62 slow ops, oldest is osd_op(client.14140.0:10 2.12 2:4e99cc3e:::rbd_mirror_snapshot_schedule:head [omap-get-vals in=16b] snapc 0=[] ondisk+read+known_if_redirected+supports_pool_eio e50)
Jan 22 15:46:09 compute-2 ceph-osd[79779]: log_channel(cluster) log [WRN] : 62 slow requests (by type [ 'delayed' : 62 ] most affected pool [ 'vms' : 37 ])
Jan 22 15:46:10 compute-2 ceph-088fe176-0106-5401-803c-2da38b73b76a-osd-2[79775]: 2026-01-22T15:46:10.863+0000 7f47f8ed4640 -1 osd.2 183 get_health_metrics reporting 62 slow ops, oldest is osd_op(client.14140.0:10 2.12 2:4e99cc3e:::rbd_mirror_snapshot_schedule:head [omap-get-vals in=16b] snapc 0=[] ondisk+read+known_if_redirected+supports_pool_eio e50)
Jan 22 15:46:10 compute-2 ceph-osd[79779]: osd.2 183 get_health_metrics reporting 62 slow ops, oldest is osd_op(client.14140.0:10 2.12 2:4e99cc3e:::rbd_mirror_snapshot_schedule:head [omap-get-vals in=16b] snapc 0=[] ondisk+read+known_if_redirected+supports_pool_eio e50)
Jan 22 15:46:10 compute-2 ceph-osd[79779]: log_channel(cluster) log [WRN] : 62 slow requests (by type [ 'delayed' : 62 ] most affected pool [ 'vms' : 37 ])
Jan 22 15:46:11 compute-2 ceph-088fe176-0106-5401-803c-2da38b73b76a-osd-2[79775]: 2026-01-22T15:46:11.911+0000 7f47f8ed4640 -1 osd.2 183 get_health_metrics reporting 62 slow ops, oldest is osd_op(client.14140.0:10 2.12 2:4e99cc3e:::rbd_mirror_snapshot_schedule:head [omap-get-vals in=16b] snapc 0=[] ondisk+read+known_if_redirected+supports_pool_eio e50)
Jan 22 15:46:11 compute-2 ceph-osd[79779]: osd.2 183 get_health_metrics reporting 62 slow ops, oldest is osd_op(client.14140.0:10 2.12 2:4e99cc3e:::rbd_mirror_snapshot_schedule:head [omap-get-vals in=16b] snapc 0=[] ondisk+read+known_if_redirected+supports_pool_eio e50)
Jan 22 15:46:11 compute-2 ceph-osd[79779]: log_channel(cluster) log [WRN] : 62 slow requests (by type [ 'delayed' : 62 ] most affected pool [ 'vms' : 37 ])

● ceph-088fe176-0106-5401-803c-2da38b73b76a@rgw.rgw.compute-2.gfsxzw.service - Ceph rgw.rgw.compute-2.gfsxzw for 088fe176-0106-5401-803c-2da38b73b76a
     Loaded: loaded (/etc/systemd/system/ceph-088fe176-0106-5401-803c-2da38b73b76a@.service; enabled; preset: disabled)
     Active: active (running) since Thu 2026-01-22 13:36:37 UTC; 2h 9min ago
   Main PID: 80765 (conmon)
         IO: 0B read, 8.5M written
      Tasks: 605 (limit: 48560)
     Memory: 123.2M (peak: 124.4M)
        CPU: 51.964s
     CGroup: /system.slice/system-ceph\x2d088fe176\x2d0106\x2d5401\x2d803c\x2d2da38b73b76a.slice/ceph-088fe176-0106-5401-803c-2da38b73b76a@rgw.rgw.compute-2.gfsxzw.service
             ├─libpod-payload-49e687254f675aca5071ee91f471edf46c03564ea189efa6346b4d0c66cd7dc0
             │ ├─80767 /run/podman-init -- /usr/bin/radosgw -n client.rgw.rgw.compute-2.gfsxzw -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ └─80769 /usr/bin/radosgw -n client.rgw.rgw.compute-2.gfsxzw -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             └─runtime
               └─80765 /usr/bin/conmon --api-version 1 -c 49e687254f675aca5071ee91f471edf46c03564ea189efa6346b4d0c66cd7dc0 -u 49e687254f675aca5071ee91f471edf46c03564ea189efa6346b4d0c66cd7dc0 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/49e687254f675aca5071ee91f471edf46c03564ea189efa6346b4d0c66cd7dc0/userdata -p /run/containers/storage/overlay-containers/49e687254f675aca5071ee91f471edf46c03564ea189efa6346b4d0c66cd7dc0/userdata/pidfile -n ceph-088fe176-0106-5401-803c-2da38b73b76a-rgw-rgw-compute-2-gfsxzw --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/49e687254f675aca5071ee91f471edf46c03564ea189efa6346b4d0c66cd7dc0 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/49e687254f675aca5071ee91f471edf46c03564ea189efa6346b4d0c66cd7dc0/userdata/oci-log --conmon-pidfile /run/ceph-088fe176-0106-5401-803c-2da38b73b76a@rgw.rgw.compute-2.gfsxzw.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 49e687254f675aca5071ee91f471edf46c03564ea189efa6346b4d0c66cd7dc0

Jan 22 15:46:09 compute-2 radosgw[80769]: beast: 0x7f935e56e6f0: 192.168.122.102 - anonymous [22/Jan/2026:15:46:09.410 +0000] "HEAD / HTTP/1.0" 200 0 - - - latency=0.000000000s
Jan 22 15:46:09 compute-2 radosgw[80769]: ====== starting new request req=0x7f935e56e6f0 =====
Jan 22 15:46:09 compute-2 radosgw[80769]: ====== req done req=0x7f935e56e6f0 op status=0 http_status=200 latency=0.000000000s ======
Jan 22 15:46:09 compute-2 radosgw[80769]: beast: 0x7f935e56e6f0: 192.168.122.100 - anonymous [22/Jan/2026:15:46:09.960 +0000] "HEAD / HTTP/1.0" 200 0 - - - latency=0.000000000s
Jan 22 15:46:11 compute-2 radosgw[80769]: ====== starting new request req=0x7f935e56e6f0 =====
Jan 22 15:46:11 compute-2 radosgw[80769]: ====== req done req=0x7f935e56e6f0 op status=0 http_status=200 latency=0.000000000s ======
Jan 22 15:46:11 compute-2 radosgw[80769]: beast: 0x7f935e56e6f0: 192.168.122.102 - anonymous [22/Jan/2026:15:46:11.412 +0000] "HEAD / HTTP/1.0" 200 0 - - - latency=0.000000000s
Jan 22 15:46:11 compute-2 radosgw[80769]: ====== starting new request req=0x7f935e56e6f0 =====
Jan 22 15:46:11 compute-2 radosgw[80769]: ====== req done req=0x7f935e56e6f0 op status=0 http_status=200 latency=0.000000000s ======
Jan 22 15:46:11 compute-2 radosgw[80769]: beast: 0x7f935e56e6f0: 192.168.122.100 - anonymous [22/Jan/2026:15:46:11.962 +0000] "HEAD / HTTP/1.0" 200 0 - - - latency=0.000000000s

● ceph-osd-losetup-0.service - Ceph OSD losetup
     Loaded: loaded (/etc/systemd/system/ceph-osd-losetup-0.service; enabled; preset: disabled)
     Active: active (exited) since Thu 2026-01-22 13:31:02 UTC; 2h 15min ago
   Main PID: 72530 (code=exited, status=0/SUCCESS)
        CPU: 25ms

Jan 22 13:31:02 compute-2 systemd[1]: Starting Ceph OSD losetup...
Jan 22 13:31:02 compute-2 bash[72531]: /dev/loop3: [64513]:4328449 (/var/lib/ceph-osd-0.img)
Jan 22 13:31:02 compute-2 systemd[1]: Finished Ceph OSD losetup.

● chronyd.service - NTP client/server
     Loaded: loaded (/usr/lib/systemd/system/chronyd.service; enabled; preset: enabled)
     Active: active (running) since Thu 2026-01-22 13:28:34 UTC; 2h 17min ago
       Docs: man:chronyd(8)
             man:chrony.conf(5)
   Main PID: 58561 (chronyd)
         IO: 0B read, 8.0K written
      Tasks: 1 (limit: 48560)
     Memory: 1.0M (peak: 1.9M)
        CPU: 92ms
     CGroup: /system.slice/chronyd.service
             └─58561 /usr/sbin/chronyd -F 2

Jan 22 13:28:34 compute-2 systemd[1]: Starting NTP client/server...
Jan 22 13:28:34 compute-2 chronyd[58561]: chronyd version 4.8 starting (+CMDMON +REFCLOCK +RTC +PRIVDROP +SCFILTER +SIGND +NTS +SECHASH +IPV6 +DEBUG)
Jan 22 13:28:34 compute-2 chronyd[58561]: Frequency -26.130 +/- 0.081 ppm read from /var/lib/chrony/drift
Jan 22 13:28:34 compute-2 chronyd[58561]: Loaded seccomp filter (level 2)
Jan 22 13:28:34 compute-2 systemd[1]: Started NTP client/server.
Jan 22 13:30:44 compute-2 chronyd[58561]: Selected source 167.160.187.179 (pool.ntp.org)

● cloud-config.service - Cloud-init: Config Stage
     Loaded: loaded (/usr/lib/systemd/system/cloud-config.service; enabled; preset: disabled)
     Active: active (exited) since Thu 2026-01-22 12:49:18 UTC; 2h 56min ago
   Main PID: 999 (code=exited, status=0/SUCCESS)
        CPU: 383ms

Jan 22 12:49:18 np0005592159.novalocal systemd[1]: Starting Cloud-init: Config Stage...
Jan 22 12:49:18 np0005592159.novalocal cloud-init[1129]: Cloud-init v. 24.4-8.el9 running 'modules:config' at Thu, 22 Jan 2026 12:49:18 +0000. Up 9.51 seconds.
Jan 22 12:49:18 np0005592159.novalocal systemd[1]: Finished Cloud-init: Config Stage.

● cloud-final.service - Cloud-init: Final Stage
     Loaded: loaded (/usr/lib/systemd/system/cloud-final.service; enabled; preset: disabled)
     Active: active (exited) since Thu 2026-01-22 12:49:19 UTC; 2h 56min ago
   Main PID: 1155 (code=exited, status=0/SUCCESS)
        CPU: 460ms

Jan 22 12:49:19 np0005592159.novalocal cloud-init[1272]: Cloud-init v. 24.4-8.el9 running 'modules:final' at Thu, 22 Jan 2026 12:49:19 +0000. Up 9.87 seconds.
Jan 22 12:49:19 np0005592159.novalocal cloud-init[1282]: #############################################################
Jan 22 12:49:19 np0005592159.novalocal cloud-init[1283]: -----BEGIN SSH HOST KEY FINGERPRINTS-----
Jan 22 12:49:19 np0005592159.novalocal cloud-init[1285]: 256 SHA256:N07ntx9a6ee1wXNKZCLxHO+EmOEsTiu/ut92zp8Te3Q root@np0005592159.novalocal (ECDSA)
Jan 22 12:49:19 np0005592159.novalocal cloud-init[1287]: 256 SHA256:1FUgzkEXvMlB1Efuil9gJ1N6+xIx3oHGUMLDz4R4tWE root@np0005592159.novalocal (ED25519)
Jan 22 12:49:19 np0005592159.novalocal cloud-init[1289]: 3072 SHA256:ZrAkF9Xrv+nsA28p+s/bLd5i3L5ajk6r69DLcZjH8XE root@np0005592159.novalocal (RSA)
Jan 22 12:49:19 np0005592159.novalocal cloud-init[1290]: -----END SSH HOST KEY FINGERPRINTS-----
Jan 22 12:49:19 np0005592159.novalocal cloud-init[1291]: #############################################################
Jan 22 12:49:19 np0005592159.novalocal cloud-init[1272]: Cloud-init v. 24.4-8.el9 finished at Thu, 22 Jan 2026 12:49:19 +0000. Datasource DataSourceConfigDrive [net,ver=2][source=/dev/sr0].  Up 10.08 seconds
Jan 22 12:49:19 np0005592159.novalocal systemd[1]: Finished Cloud-init: Final Stage.

● cloud-init-local.service - Cloud-init: Local Stage (pre-network)
     Loaded: loaded (/usr/lib/systemd/system/cloud-init-local.service; enabled; preset: disabled)
     Active: active (exited) since Thu 2026-01-22 12:49:16 UTC; 2h 56min ago
   Main PID: 774 (code=exited, status=0/SUCCESS)
        CPU: 758ms

Jan 22 12:49:15 localhost systemd[1]: Starting Cloud-init: Local Stage (pre-network)...
Jan 22 12:49:15 localhost cloud-init[836]: Cloud-init v. 24.4-8.el9 running 'init-local' at Thu, 22 Jan 2026 12:49:15 +0000. Up 6.40 seconds.
Jan 22 12:49:16 np0005592159.novalocal systemd[1]: Finished Cloud-init: Local Stage (pre-network).

● cloud-init.service - Cloud-init: Network Stage
     Loaded: loaded (/usr/lib/systemd/system/cloud-init.service; enabled; preset: disabled)
     Active: active (exited) since Thu 2026-01-22 12:49:18 UTC; 2h 56min ago
   Main PID: 898 (code=exited, status=0/SUCCESS)
        CPU: 1.160s

Jan 22 12:49:18 np0005592159.novalocal cloud-init[918]: |         = %=.+..|
Jan 22 12:49:18 np0005592159.novalocal cloud-init[918]: |        . =.B=..o|
Jan 22 12:49:18 np0005592159.novalocal cloud-init[918]: |       .    +*o= |
Jan 22 12:49:18 np0005592159.novalocal cloud-init[918]: |        S   ..*+=|
Jan 22 12:49:18 np0005592159.novalocal cloud-init[918]: |             ooBo|
Jan 22 12:49:18 np0005592159.novalocal cloud-init[918]: |            . .o.|
Jan 22 12:49:18 np0005592159.novalocal cloud-init[918]: |             ....|
Jan 22 12:49:18 np0005592159.novalocal cloud-init[918]: |              ...|
Jan 22 12:49:18 np0005592159.novalocal cloud-init[918]: +----[SHA256]-----+
Jan 22 12:49:18 np0005592159.novalocal systemd[1]: Finished Cloud-init: Network Stage.

○ cpupower.service - Configure CPU power related settings
     Loaded: loaded (/usr/lib/systemd/system/cpupower.service; disabled; preset: disabled)
     Active: inactive (dead)

● crond.service - Command Scheduler
     Loaded: loaded (/usr/lib/systemd/system/crond.service; enabled; preset: enabled)
     Active: active (running) since Thu 2026-01-22 12:49:18 UTC; 2h 56min ago
   Main PID: 1006 (crond)
         IO: 208.0K read, 12.0K written
      Tasks: 1 (limit: 48560)
     Memory: 1.3M (peak: 5.1M)
        CPU: 277ms
     CGroup: /system.slice/crond.service
             └─1006 /usr/sbin/crond -n

Jan 22 13:09:01 compute-2 anacron[8202]: Job `cron.daily' terminated
Jan 22 13:29:01 compute-2 anacron[8202]: Job `cron.weekly' started
Jan 22 13:29:01 compute-2 anacron[8202]: Job `cron.weekly' terminated
Jan 22 13:49:01 compute-2 anacron[8202]: Job `cron.monthly' started
Jan 22 13:49:01 compute-2 anacron[8202]: Job `cron.monthly' terminated
Jan 22 13:49:01 compute-2 anacron[8202]: Normal exit (3 jobs run)
Jan 22 14:01:01 compute-2 CROND[230019]: (root) CMD (run-parts /etc/cron.hourly)
Jan 22 14:01:01 compute-2 CROND[230018]: (root) CMDEND (run-parts /etc/cron.hourly)
Jan 22 15:01:01 compute-2 CROND[267579]: (root) CMD (run-parts /etc/cron.hourly)
Jan 22 15:01:01 compute-2 CROND[267578]: (root) CMDEND (run-parts /etc/cron.hourly)

● dbus-broker.service - D-Bus System Message Bus
     Loaded: loaded (/usr/lib/systemd/system/dbus-broker.service; enabled; preset: enabled)
     Active: active (running) since Thu 2026-01-22 12:49:15 UTC; 2h 56min ago
TriggeredBy: ● dbus.socket
       Docs: man:dbus-broker-launch(1)
   Main PID: 760 (dbus-broker-lau)
         IO: 0B read, 0B written
      Tasks: 2 (limit: 48560)
     Memory: 2.9M (peak: 3.7M)
        CPU: 5.435s
     CGroup: /system.slice/dbus-broker.service
             ├─760 /usr/bin/dbus-broker-launch --scope system --audit
             └─772 dbus-broker --log 4 --controller 9 --maUnit display-manager.service could not be found.
chine-id 85ac68c10a6e7ae08ceb898dbdca0cb5 --max-bytes 536870912 --max-fds 4096 --max-matches 131072 --audit

Jan 22 13:25:38 compute-2 dbus-broker-launch[760]: Noticed file-system modification, trigger reload.
Jan 22 13:26:26 compute-2 dbus-broker-launch[772]: avc:  op=load_policy lsm=selinux seqno=7 res=1
Jan 22 13:26:41 compute-2 dbus-broker-launch[772]: avc:  op=load_policy lsm=selinux seqno=8 res=1
Jan 22 13:44:17 compute-2 dbus-broker-launch[772]: avc:  op=load_policy lsm=selinux seqno=9 res=1
Jan 22 13:48:16 compute-2 dbus-broker-launch[772]: avc:  op=load_policy lsm=selinux seqno=10 res=1
Jan 22 13:48:30 compute-2 dbus-broker-launch[772]: avc:  op=load_policy lsm=selinux seqno=11 res=1
Jan 22 13:49:19 compute-2 dbus-broker-launch[760]: Noticed file-system modification, trigger reload.
Jan 22 13:49:19 compute-2 dbus-broker-launch[772]: avc:  op=load_policy lsm=selinux seqno=12 res=1
Jan 22 13:49:19 compute-2 dbus-broker-launch[760]: Noticed file-system modification, trigger reload.
Jan 22 13:51:12 compute-2 dbus-broker-launch[772]: avc:  op=load_policy lsm=selinux seqno=13 res=1

○ dm-event.service - Device-mapper event daemon
     Loaded: loaded (/usr/lib/systemd/system/dm-event.service; static)
     Active: inactive (dead)
TriggeredBy: ● dm-event.socket
       Docs: man:dmeventd(8)

○ dnf-makecache.service - dnf makecache
     Loaded: loaded (/usr/lib/systemd/system/dnf-makecache.service; static)
     Active: inactive (dead) since Thu 2026-01-22 15:07:26 UTC; 38min ago
TriggeredBy: ● dnf-makecache.timer
    Process: 270337 ExecStart=/usr/bin/dnf makecache --timer (code=exited, status=0/SUCCESS)
   Main PID: 270337 (code=exited, status=0/SUCCESS)
        CPU: 235ms

Jan 22 15:07:25 compute-2 systemd[1]: Starting dnf makecache...
Jan 22 15:07:26 compute-2 dnf[270337]: Metadata cache refreshed recently.
Jan 22 15:07:26 compute-2 systemd[1]: dnf-makecache.service: Deactivated successfully.
Jan 22 15:07:26 compute-2 systemd[1]: Finished dnf makecache.

○ dracut-cmdline.service - dracut cmdline hook
     Loaded: loaded (/usr/lib/systemd/system/dracut-cmdline.service; static)
     Active: inactive (dead) since Thu 2026-01-22 12:49:13 UTC; 2h 56min ago
   Duration: 1.977s
       Docs: man:dracut-cmdline.service(8)
             man:dracut.bootup(7)
   Main PID: 328 (code=exited, status=0/SUCCESS)
        CPU: 199ms

Jan 22 12:49:11 localhost systemd[1]: Starting dracut cmdline hook...
Jan 22 12:49:11 localhost dracut-cmdline[328]: dracut-9 dracut-057-102.git20250818.el9
Jan 22 12:49:11 localhost dracut-cmdline[328]: Using kernel command line parameters:    BOOT_IMAGE=(hd0,msdos1)/boot/vmlinuz-5.14.0-661.el9.x86_64 root=UUID=22ac9141-3960-4912-b20e-19fc8a328d40 ro console=ttyS0,115200n8 no_timer_check net.ifnames=0 crashkernel=1G-2G:192M,2G-64G:256M,64G-:512M
Jan 22 12:49:11 localhost systemd[1]: Finished dracut cmdline hook.
Jan 22 12:49:13 localhost systemd[1]: dracut-cmdline.service: Deactivated successfully.
Jan 22 12:49:13 localhost systemd[1]: Stopped dracut cmdline hook.

○ dracut-initqueue.service - dracut initqueue hook
     Loaded: loaded (/usr/lib/systemd/system/dracut-initqueue.service; static)
     Active: inactive (dead) since Thu 2026-01-22 12:49:13 UTC; 2h 56min ago
   Duration: 797ms
       Docs: man:dracut-initqueue.service(8)
             man:dracut.bootup(7)
   Main PID: 505 (code=exited, status=0/SUCCESS)
        CPU: 42ms

Jan 22 12:49:12 localhost systemd[1]: Starting dracut initqueue hook...
Jan 22 12:49:12 localhost systemd[1]: Finished dracut initqueue hook.
Jan 22 12:49:13 localhost systemd[1]: dracut-initqueue.service: Deactivated successfully.
Jan 22 12:49:13 localhost systemd[1]: Stopped dracut initqueue hook.

○ dracut-mount.service - dracut mount hook
     Loaded: loaded (/usr/lib/systemd/system/dracut-mount.service; static)
     Active: inactive (dead) since Thu 2026-01-22 12:49:13 UTC; 2h 56min ago
   Duration: 138ms
       Docs: man:dracut-mount.service(8)
             man:dracut.bootup(7)
   Main PID: 569 (code=exited, status=0/SUCCESS)
        CPU: 14ms

Jan 22 12:49:13 localhost systemd[1]: Starting dracut mount hook...
Jan 22 12:49:13 localhost systemd[1]: Finished dracut mount hook.
Jan 22 12:49:13 localhost systemd[1]: dracut-mount.service: Deactivated successfully.
Jan 22 12:49:13 localhost systemd[1]: Stopped dracut mount hook.

○ dracut-pre-mount.service - dracut pre-mount hook
     Loaded: loaded (/usr/lib/systemd/system/dracut-pre-mount.service; static)
     Active: inactive (dead) since Thu 2026-01-22 12:49:13 UTC; 2h 56min ago
   Duration: 753ms
       Docs: man:dracut-pre-mount.service(8)
             man:dracut.bootup(7)
   Main PID: 548 (code=exited, status=0/SUCCESS)
        CPU: 12ms

Jan 22 12:49:12 localhost systemd[1]: Starting dracut pre-mount hook...
Jan 22 12:49:12 localhost systemd[1]: Finished dracut pre-mount hook.
Jan 22 12:49:13 localhost systemd[1]: dracut-pre-mount.service: Deactivated successfully.
Jan 22 12:49:13 localhost systemd[1]: Stopped dracut pre-mount hook.

○ dracut-pre-pivot.service - dracut pre-pivot and cleanup hook
     Loaded: loaded (/usr/lib/systemd/system/dracut-pre-pivot.service; static)
     Active: inactive (dead) since Thu 2026-01-22 12:49:13 UTC; 2h 56min ago
   Duration: 36ms
       Docs: man:dracut-pre-pivot.service(8)
             man:dracut.bootup(7)
   Main PID: 574 (code=exited, status=0/SUCCESS)
        CPU: 82ms

Jan 22 12:49:13 localhost systemd[1]: Starting dracut pre-pivot and cleanup hook...
Jan 22 12:49:13 localhost systemd[1]: Finished dracut pre-pivot and cleanup hook.
Jan 22 12:49:13 localhost systemd[1]: dracut-pre-pivot.service: Deactivated successfully.
Jan 22 12:49:13 localhost systemd[1]: Stopped dracut pre-pivot and cleanup hook.

○ dracut-pre-trigger.service - dracut pre-trigger hook
     Loaded: loaded (/usr/lib/systemd/system/dracut-pre-trigger.service; static)
     Active: inactive (dead) since Thu 2026-01-22 12:49:13 UTC; 2h 56min ago
   Duration: 1.414s
       Docs: man:dracut-pre-trigger.service(8)
             man:dracut.bootup(7)
   Main PID: 468 (code=exited, status=0/SUCCESS)
        CPU: 32ms

Jan 22 12:49:12 localhost systemd[1]: Starting dracut pre-trigger hook...
Jan 22 12:49:12 localhost systemd[1]: Finished dracut pre-trigger hook.
Jan 22 12:49:13 localhost systemd[1]: dracut-pre-trigger.service: Deactivated successfully.
Jan 22 12:49:13 localhost systemd[1]: Stopped dracut pre-trigger hook.

○ dracut-pre-udev.service - dracut pre-udev hook
     Loaded: loaded (/usr/lib/systemd/system/dracut-pre-udev.service; static)
     Active: inactive (dead) since Thu 2026-01-22 12:49:13 UTC; 2h 56min ago
   Duration: 1.579s
       Docs: man:dracut-pre-udev.service(8)
             man:dracut.bootup(7)
   Main PID: 417 (code=exited, status=0/SUCCESS)
        CPU: 390ms

Jan 22 12:49:11 localhost systemd[1]: Starting dracut pre-udev hook...
Jan 22 12:49:12 localhost rpc.statd[445]: Version 2.5.4 starting
Jan 22 12:49:12 localhost rpc.statd[445]: Initializing NSM state
Jan 22 12:49:12 localhost rpc.idmapd[450]: Setting log level to 0
Jan 22 12:49:12 localhost systemd[1]: Finished dracut pre-udev hook.
Jan 22 12:49:13 localhost rpc.idmapd[450]: exiting on signal 15
Jan 22 12:49:13 localhost systemd[1]: dracut-pre-udev.service: Deactivated successfully.
Jan 22 12:49:13 localhost systemd[1]: Stopped dracut pre-udev hook.

○ dracut-shutdown-onfailure.service - Service executing upon dracut-shutdown failure to perform cleanup
     Loaded: loaded (/usr/lib/systemd/system/dracut-shutdown-onfailure.service; static)
     Active: inactive (dead)
       Docs: man:dracut-shutdown.service(8)

● dracut-shutdown.service - Restore /run/initramfs on shutdown
     Loaded: loaded (/usr/lib/systemd/system/dracut-shutdown.service; static)
     Active: active (exited) since Thu 2026-01-22 12:49:15 UTC; 2h 56min ago
       Docs: man:dracut-shutdown.service(8)
   Main PID: 777 (code=exited, status=0/SUCCESS)
        CPU: 5ms

Jan 22 12:49:15 localhost systemd[1]: Starting Restore /run/initramfs on shutdown...
Jan 22 12:49:15 localhost systemd[1]: Finished Restore /run/initramfs on shutdown.

● edpm-container-shutdown.service - EDPM Container Shutdown
     Loaded: loaded (/etc/systemd/system/edpm-container-shutdown.service; enabled; preset: enabled)
     Active: active (exited) since Thu 2026-01-22 13:29:08 UTC; 2h 17min ago
       Docs: https://github.com/openstack-k8s-operators/docs
   Main PID: 61561 (code=exited, status=0/SUCCESS)
        CPU: 3ms

Jan 22 13:29:08 compute-2 systemd[1]: Starting EDPM Container Shutdown...
Jan 22 13:29:08 compute-2 systemd[1]: Finished EDPM Container Shutdown.

○ edpm_libvirt_guests.service - Suspend libvirt Guests in edpm
     Loaded: loaded (/etc/systemd/system/edpm_libvirt_guests.service; enabled; preset: disabled)
     Active: inactive (dead)
       Docs: man:libvirtd(8)
             https://libvirt.org

● edpm_nova_compute.service - nova_compute container
     Loaded: loaded (/etc/systemd/system/edpm_nova_compute.service; enabled; preset: disabled)
     Active: active (running) since Thu 2026-01-22 13:55:22 UTC; 1h 50min ago
    Process: 226417 ExecStart=/usr/bin/podman start nova_compute (code=exited, status=0/SUCCESS)
   Main PID: 226433 (conmon)
         IO: 0B read, 94.5K written
      Tasks: 1 (limit: 48560)
     Memory: 684.0K (peak: 16.7M)
        CPU: 570ms
     CGroup: /system.slice/edpm_nova_compute.service
             └─226433 /usr/bin/conmon --api-version 1 -c 572ffe12c89ef3d651b3d5a5d0d084d01048037ddf29c596a9682c34d685f649 -u 572ffe12c89ef3d651b3d5a5d0d084d01048037ddf29c596a9682c34d685f649 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/572ffe12c89ef3d651b3d5a5d0d084d01048037ddf29c596a9682c34d685f649/userdata -p /run/containers/storage/overlay-containers/572ffe12c89ef3d651b3d5a5d0d084d01048037ddf29c596a9682c34d685f649/userdata/pidfile -n nova_compute --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/572ffe12c89ef3d651b3d5a5d0d084d01048037ddf29c596a9682c34d685f649 --full-attach -s -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/572ffe12c89ef3d651b3d5a5d0d084d01048037ddf29c596a9682c34d685f649/userdata/oci-log --conmon-pidfile /run/nova_compute.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg 572ffe12c89ef3d651b3d5a5d0d084d01048037ddf29c596a9682c34d685f649

Jan 22 14:34:33 compute-2 nova_compute[226433]: 2026-01-22 14:34:33.492 226437 DEBUG nova.virt.hardware [None req-34eb87e6-2213-4316-8175-f06c39b79e38 3b8229aedbc64b9691880a91d559e987 7efa67e548af42419a603e06c3b85f6d - - default default] Flavor limits 0:0:0 get_cpu_topology_constraints /usr/lib/python3.9/site-packages/nova/virt/hardware.py:348[00m
Jan 22 14:34:33 compute-2 nova_compute[226433]: 2026-01-22 14:34:33.492 226437 DEBUG nova.virt.hardware [None req-34eb87e6-2213-4316-8175-f06c39b79e38 3b8229aedbc64b9691880a91d559e987 7efa67e548af42419a603e06c3b85f6d - - default default] Image limits 0:0:0 get_cpu_topology_constraints /usr/lib/python3.9/site-packages/nova/virt/hardware.py:352[00m
Jan 22 14:34:33 compute-2 nova_compute[226433]: 2026-01-22 14:34:33.492 226437 DEBUG nova.virt.hardware [None req-34eb87e6-2213-4316-8175-f06c39b79e38 3b8229aedbc64b9691880a91d559e987 7efa67e548af42419a603e06c3b85f6d - - default default] Flavor pref 0:0:0 get_cpu_topology_constraints /usr/lib/python3.9/site-packages/nova/virt/hardware.py:388[00m
Jan 22 14:34:33 compute-2 nova_compute[226433]: 2026-01-22 14:34:33.492 226437 DEBUG nova.virt.hardware [None req-34eb87e6-2213-4316-8175-f06c39b79e38 3b8229aedbc64b9691880a91d559e987 7efa67e548af42419a603e06c3b85f6d - - default default] Image pref 0:0:0 get_cpu_topology_constraints /usr/lib/python3.9/site-packages/nova/virt/hardware.py:392[00m
Jan 22 14:34:33 compute-2 nova_compute[226433]: 2026-01-22 14:34:33.493 226437 DEBUG nova.virt.hardware [None req-34eb87e6-2213-4316-8175-f06c39b79e38 3b8229aedbc64b9691880a91d559e987 7efa67e548af42419a603e06c3b85f6d - - default default] Chose sockets=0, cores=0, threads=0; limits were sockets=65536, cores=65536, threads=65536 get_cpu_topology_constraints /usr/lib/python3.9/site-packages/nova/virt/hardware.py:430[00m
Jan 22 14:34:33 compute-2 nova_compute[226433]: 2026-01-22 14:34:33.493 226437 DEBUG nova.virt.hardware [None req-34eb87e6-2213-4316-8175-f06c39b79e38 3b8229aedbc64b9691880a91d559e987 7efa67e548af42419a603e06c3b85f6d - - default default] Topology preferred VirtCPUTopology(cores=0,sockets=0,threads=0), maximum VirtCPUTopology(cores=65536,sockets=65536,threads=65536) _get_desirable_cpu_topologies /usr/lib/python3.9/site-packages/nova/virt/hardware.py:569[00m
Jan 22 14:34:33 compute-2 nova_compute[226433]: 2026-01-22 14:34:33.493 226437 DEBUG nova.virt.hardware [None req-34eb87e6-2213-4316-8175-f06c39b79e38 3b8229aedbc64b9691880a91d559e987 7efa67e548af42419a603e06c3b85f6d - - default default] Build topologies for 1 vcpu(s) 1:1:1 _get_possible_cpu_topologies /usr/lib/python3.9/site-packages/nova/virt/hardware.py:471[00m
Jan 22 14:34:33 compute-2 nova_compute[226433]: 2026-01-22 14:34:33.494 226437 DEBUG nova.virt.hardware [None req-34eb87e6-2213-4316-8175-f06c39b79e38 3b8229aedbc64b9691880a91d559e987 7efa67e548af42419a603e06c3b85f6d - - default default] Got 1 possible topologies _get_possible_cpu_topologies /usr/lib/python3.9/site-packages/nova/virt/hardware.py:501[00m
Jan 22 14:34:33 compute-2 nova_compute[226433]: 2026-01-22 14:34:33.494 226437 DEBUG nova.virt.hardware [None req-34eb87e6-2213-4316-8175-f06c39b79e38 3b8229aedbc64b9691880a91d559e987 7efa67e548af42419a603e06c3b85f6d - - default default] Possible topologies [VirtCPUTopology(cores=1,sockets=1,threads=1)] _get_desirable_cpu_topologies /usr/lib/python3.9/site-packages/nova/virt/hardware.py:575[00m
Jan 22 14:34:33 compute-2 nova_compute[226433]: 2026-01-22 14:34:33.494 226437 DEBUG nova.virt.hardware [None req-34eb87e6-2213-4316-8175-f06c39b79e38 3b8229aedbc64b9691880a91d559e987 7efa67e548af42419a603e06c3b85f6d - - default default] Sorted desired topologies [VirtCPUTopology(cores=1,sockets=1,threads=1)] _get_desirable_cpu_topologies /usr/lib/python3.9/site-packages/nova/virt/hardware.py:577[00m

● edpm_ovn_controller.service - ovn_controller container
     Loaded: loaded (/etc/systemd/system/edpm_ovn_controller.service; enabled; preset: disabled)
     Active: active (running) since Thu 2026-01-22 13:45:27 UTC; 2h 0min ago
   Main PID: 133156 (conmon)
         IO: 0B read, 127.5K written
      Tasks: 1 (limit: 48560)
     Memory: 692.0K (peak: 18.4M)
        CPU: 220ms
     CGroup: /system.slice/edpm_ovn_controller.service
             └─133156 /usr/bin/conmon --api-version 1 -c 8eec14eed05eebd169934b14ad23738a7c696fad3b7be75ce9a652966539c356 -u 8eec14eed05eebd169934b14ad23738a7c696fad3b7be75ce9a652966539c356 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/8eec14eed05eebd169934b14ad23738a7c696fad3b7be75ce9a652966539c356/userdata -p /run/containers/storage/overlay-containers/8eec14eed05eebd169934b14ad23738a7c696fad3b7be75ce9a652966539c356/userdata/pidfile -n ovn_controller --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/8eec14eed05eebd169934b14ad23738a7c696fad3b7be75ce9a652966539c356 --full-attach -s -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/8eec14eed05eebd169934b14ad23738a7c696fad3b7be75ce9a652966539c356/userdata/oci-log --conmon-pidfile /run/ovn_controller.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg 8eec14eed05eebd169934b14ad23738a7c696fad3b7be75ce9a652966539c356

Jan 22 14:38:53 compute-2 ovn_controller[133156]: 2026-01-22T14:38:53Z|00069|binding|INFO|Releasing lport 3c983055-ff9e-4976-9d9f-e2b4b8598736 from this chassis (sb_readonly=0)
Jan 22 14:38:59 compute-2 ovn_controller[133156]: 2026-01-22T14:38:59Z|00070|binding|INFO|Releasing lport 3c983055-ff9e-4976-9d9f-e2b4b8598736 from this chassis (sb_readonly=0)
Jan 22 14:39:43 compute-2 ovn_controller[133156]: 2026-01-22T14:39:43Z|00071|binding|INFO|Releasing lport 3c983055-ff9e-4976-9d9f-e2b4b8598736 from this chassis (sb_readonly=0)
Jan 22 14:39:43 compute-2 ovn_controller[133156]: 2026-01-22T14:39:43Z|00072|binding|INFO|Releasing lport 3c983055-ff9e-4976-9d9f-e2b4b8598736 from this chassis (sb_readonly=0)
Jan 22 14:39:51 compute-2 ovn_controller[133156]: 2026-01-22T14:39:51Z|00073|binding|INFO|Releasing lport 3c983055-ff9e-4976-9d9f-e2b4b8598736 from this chassis (sb_readonly=0)
Jan 22 14:40:03 compute-2 ovn_controller[133156]: 2026-01-22T14:40:03Z|00074|binding|INFO|Removing iface tape581f563-33 ovn-installed in OVS
Jan 22 14:40:03 compute-2 ovn_controller[133156]: 2026-01-22T14:40:03Z|00075|binding|INFO|Removing lport e581f563-3369-4b65-92c8-89785e787b51 ovn-installed in OVS
Jan 22 14:41:07 compute-2 ovn_controller[133156]: 2026-01-22T14:41:07Z|00076|memory_trim|INFO|Detected inactivity (last active 30002 ms ago): trimming memory
Jan 22 14:46:00 compute-2 ovn_controller[133156]: 2026-01-22T14:46:00Z|00077|memory_trim|INFO|Detected inactivity (last active 30001 ms ago): trimming memory
Jan 22 14:51:30 compute-2 ovn_controller[133156]: 2026-01-22T14:51:30Z|00078|memory_trim|INFO|Detected inactivity (last active 30001 ms ago): trimming memory

● edpm_ovn_metadata_agent.service - ovn_metadata_agent container
     Loaded: loaded (/etc/systemd/system/edpm_ovn_metadata_agent.service; enabled; preset: disabled)
     Active: active (running) since Thu 2026-01-22 13:46:45 UTC; 1h 59min ago
   Main PID: 143492 (conmon)
         IO: 0B read, 133.5K written
      Tasks: 1 (limit: 48560)
     Memory: 724.0K (peak: 18.0M)
        CPU: 394ms
     CGroup: /system.slice/edpm_ovn_metadata_agent.service
             └─143492 /usr/bin/conmon --api-version 1 -c 65cda04b9c9e71d648ab5510147314c4de15a37ca8d4a48196c50c9ad6ccb44d -u 65cda04b9c9e71d648ab5510147314c4de15a37ca8d4a48196c50c9ad6ccb44d -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/65cda04b9c9e71d648ab5510147314c4de15a37ca8d4a48196c50c9ad6ccb44d/userdata -p /run/containers/storage/overlay-containers/65cda04b9c9e71d648ab5510147314c4de15a37ca8d4a48196c50c9ad6ccb44d/userdata/pidfile -n ovn_metadata_agent --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/65cda04b9c9e71d648ab5510147314c4de15a37ca8d4a48196c50c9ad6ccb44d --full-attach -s -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/65cda04b9c9e71d648ab5510147314c4de15a37ca8d4a48196c50c9ad6ccb44d/userdata/oci-log --conmon-pidfile /run/ovn_metadata_agent.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg 65cda04b9c9e71d648ab5510147314c4de15a37ca8d4a48196c50c9ad6ccb44d

Jan 22 15:42:47 compute-2 ovn_metadata_agent[143492]: 2026-01-22 15:42:47.284 143497 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" "released" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: held 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423[00m
Jan 22 15:43:47 compute-2 ovn_metadata_agent[143492]: 2026-01-22 15:43:47.285 143497 DEBUG oslo_concurrency.lockutils [-] Acquiring lock "_check_child_processes" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404[00m
Jan 22 15:43:47 compute-2 ovn_metadata_agent[143492]: 2026-01-22 15:43:47.285 143497 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" acquired by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409[00m
Jan 22 15:43:47 compute-2 ovn_metadata_agent[143492]: 2026-01-22 15:43:47.286 143497 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" "released" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: held 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423[00m
Jan 22 15:44:47 compute-2 ovn_metadata_agent[143492]: 2026-01-22 15:44:47.286 143497 DEBUG oslo_concurrency.lockutils [-] Acquiring lock "_check_child_processes" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404[00m
Jan 22 15:44:47 compute-2 ovn_metadata_agent[143492]: 2026-01-22 15:44:47.287 143497 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" acquired by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: waited 0.001s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409[00m
Jan 22 15:44:47 compute-2 ovn_metadata_agent[143492]: 2026-01-22 15:44:47.287 143497 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" "released" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: held 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423[00m
Jan 22 15:45:47 compute-2 ovn_metadata_agent[143492]: 2026-01-22 15:45:47.287 143497 DEBUG oslo_concurrency.lockutils [-] Acquiring lock "_checUnit fcoe.service could not be found.
Unit hv_kvp_daemon.service could not be found.
k_child_processes" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404[00m
Jan 22 15:45:47 compute-2 ovn_metadata_agent[143492]: 2026-01-22 15:45:47.287 143497 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" acquired by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: waited 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409[00m
Jan 22 15:45:47 compute-2 ovn_metadata_agent[143492]: 2026-01-22 15:45:47.287 143497 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" "released" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: held 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423[00m

○ emergency.service - Emergency Shell
     Loaded: loaded (/usr/lib/systemd/system/emergency.service; static)
     Active: inactive (dead)
       Docs: man:sulogin(8)

● getty@tty1.service - Getty on tty1
     Loaded: loaded (/usr/lib/systemd/system/getty@.service; enabled; preset: enabled)
     Active: active (running) since Thu 2026-01-22 12:49:18 UTC; 2h 56min ago
       Docs: man:agetty(8)
             man:systemd-getty-generator(8)
             http://0pointer.de/blog/projects/serial-console.html
   Main PID: 1007 (agetty)
         IO: 0B read, 0B written
      Tasks: 1 (limit: 48560)
     Memory: 284.0K (peak: 760.0K)
        CPU: 8ms
     CGroup: /system.slice/system-getty.slice/getty@tty1.service
             └─1007 /sbin/agetty -o "-p -- \\u" --noclear - linux

Jan 22 12:49:18 np0005592159.novalocal systemd[1]: Started Getty on tty1.

● gssproxy.service - GSSAPI Proxy Daemon
     Loaded: loaded (/usr/lib/systemd/system/gssproxy.service; disabled; preset: disabled)
     Active: active (running) since Thu 2026-01-22 12:49:16 UTC; 2h 56min ago
   Main PID: 868 (gssproxy)
         IO: 0B read, 0B written
      Tasks: 6 (limit: 48560)
     Memory: 1.8M (peak: 3.6M)
        CPU: 25ms
     CGroup: /system.slice/gssproxy.service
             └─868 /usr/sbin/gssproxy -D

Jan 22 12:49:16 np0005592159.novalocal systemd[1]: Starting GSSAPI Proxy Daemon...
Jan 22 12:49:16 np0005592159.novalocal systemd[1]: Started GSSAPI Proxy Daemon.

○ import-state.service - Import network configuration from initramfs
     Loaded: loaded (/usr/lib/systemd/system/import-state.service; enabled; preset: enabled)
     Active: inactive (dead)

○ initrd-cleanup.service - Cleaning Up and Shutting Down Daemons
     Loaded: loaded (/usr/lib/systemd/system/initrd-cleanup.service; static)
     Active: inactive (dead) since Thu 2026-01-22 12:49:13 UTC; 2h 56min ago
   Main PID: 615 (code=exited, status=0/SUCCESS)
        CPU: 12ms

Jan 22 12:49:13 localhost systemd[1]: Starting Cleaning Up and Shutting Down Daemons...
Jan 22 12:49:13 localhost systemd[1]: initrd-cleanup.service: Deactivated successfully.
Jan 22 12:49:13 localhost systemd[1]: Finished Cleaning Up and Shutting Down Daemons.

○ initrd-parse-etc.service - Mountpoints Configured in the Real Root
     Loaded: loaded (/usr/lib/systemd/system/initrd-parse-etc.service; static)
     Active: inactive (dead) since Thu 2026-01-22 12:49:13 UTC; 2h 56min ago
   Main PID: 568 (code=exited, status=0/SUCCESS)
        CPU: 14ms

Jan 22 12:49:13 localhost systemd[1]: Starting Mountpoints Configured in the Real Root...
Jan 22 12:49:13 localhost systemd[1]: initrd-parse-etc.service: Deactivated successfully.
Jan 22 12:49:13 localhost systemd[1]: Finished Mountpoints Configured in the Real Root.

○ initrd-switch-root.service - Switch Root
     Loaded: loaded (/usr/lib/systemd/system/initrd-switch-root.service; static)
     Active: inactive (dead) since Thu 2026-01-22 12:49:14 UTC; 2h 56min ago
   Main PID: 619 (code=exited, status=0/SUCCESS)
        CPU: 8ms

Jan 22 12:49:13 localhost systemd[1]: Starting Switch Root...

○ initrd-udevadm-cleanup-db.service - Cleanup udev Database
     Loaded: loaded (/usr/lib/systemd/system/initrd-udevadm-cleanup-db.service; static)
     Active: inactive (dead) since Thu 2026-01-22 12:49:13 UTC; 2h 56min ago
   Main PID: 618 (code=exited, status=0/SUCCESS)
        CPU: 12ms

Jan 22 12:49:13 localhost systemd[1]: Starting Cleanup udev Database...
Jan 22 12:49:13 localhost systemd[1]: initrd-udevadm-cleanup-db.service: Deactivated successfully.
Jan 22 12:49:13 localhost systemd[1]: Finished Cleanup udev Database.

○ ip6tables.service - IPv6 firewall with ip6tables
     Loaded: loaded (/usr/lib/systemd/system/ip6tables.service; disabled; preset: disabled)
     Active: inactive (dead)

○ iptables.service - IPv4 firewall with iptables
     Loaded: loaded (/usr/lib/systemd/system/iptables.service; disabled; preset: disabled)
     Active: inactive (dead) since Thu 2026-01-22 13:29:21 UTC; 2h 16min ago
   Duration: 40min 5.185s
   Main PID: 780 (code=exited, status=0/SUCCESS)
        CPU: 101ms

Jan 22 12:49:15 localhost systemd[1]: Starting IPv4 firewall with iptables...
Jan 22 12:49:15 localhost iptables.init[780]: iptables: Applying firewall rules: [  OK  ]
Jan 22 12:49:15 localhost systemd[1]: Finished IPv4 firewall with iptables.
Jan 22 13:29:20 compute-2 systemd[1]: Stopping IPv4 firewall with iptables...
Jan 22 13:29:20 compute-2 iptables.init[62813]: iptables: Setting chains to policy ACCEPT: raw mangle filter nat [  OK  ]
Jan 22 13:29:21 compute-2 iptables.init[62813]: iptables: Flushing firewall rules: [  OK  ]
Jan 22 13:29:21 compute-2 systemd[1]: iptables.service: Deactivated successfully.
Jan 22 13:29:21 compute-2 systemd[1]: Stopped IPv4 firewall with iptables.

● irqbalance.service - irqbalance daemon
     Loaded: loaded (/usr/lib/systemd/system/irqbalance.service; enabled; preset: enabled)
     Active: active (running) since Thu 2026-01-22 12:49:15 UTC; 2h 56min ago
       Docs: man:irqbalance(1)
             https://github.com/Irqbalance/irqbalance
   Main PID: 785 (irqbalance)
         IO: 0B read, 0B written
      Tasks: 2 (limit: 48560)
     Memory: 1.1M (peak: 1.4M)
        CPU: 953ms
     CGroup: /system.slice/irqbalance.service
             └─785 /usr/sbin/irqbalance

Jan 22 12:49:25 np0005592159.novalocal irqbalance[785]: Cannot change IRQ 32 affinity: Operation not permitted
Jan 22 12:49:25 np0005592159.novalocal irqbalance[785]: IRQ 32 affinity is now unmanaged
Jan 22 12:49:25 np0005592159.novalocal irqbalance[785]: Cannot change IRQ 30 affinity: Operation not permitted
Jan 22 12:49:25 np0005592159.novalocal irqbalance[785]: IRQ 30 affinity is now unmanaged
Jan 22 12:49:25 np0005592159.novalocal irqbalance[785]: Cannot change IRQ 29 affinity: Operation not permitted
Jan 22 12:49:25 np0005592159.novalocal irqbalance[785]: IRQ 29 affinity is now unmanaged
Jan 22 13:01:55 np0005592159.novalocal irqbalance[785]: Cannot change IRQ 27 affinity: Operation not permitted
Jan 22 13:01:55 np0005592159.novalocal irqbalance[785]: IRQ 27 affinity is now unmanaged
Jan 22 13:49:15 compute-2 irqbalance[785]: Cannot change IRQ 26 affinity: Operation not permitted
Jan 22 13:49:15 compute-2 irqbalance[785]: IRQ 26 affinity is now unmanaged

○ iscsi-init.service - One time configuration for iscsi.service
     Loaded: loaded (/usr/lib/systemd/system/iscsi-init.service; disabled; preset: disabled)
     Active: inactive (dead)
  Condition: start condition failed at Thu 2026-01-22 13:53:29 UTC; 1h 52min ago

Jan 22 13:52:41 compute-2 systemd[1]: One time configuration for iscsi.service was skipped because of an unmet condition check (ConditionPathExists=!/etc/iscsi/initiatorname.iscsi).
Jan 22 13:53:29 compute-2 systemd[1]: One time configuration for iscsi.service was skipped because of an unmet condition check (ConditionPathExists=!/etc/iscsi/initiatorname.iscsi).

○ iscsi-onboot.service - Special handling of early boot iSCSI sessions
     Loaded: loaded (/usr/lib/systemd/system/iscsi-onboot.service; enabled; preset: enabled)
     Active: inactive (dead)
       Docs: man:iscsiadm(8)
             man:iscsid(8)

● iscsi-shutdown.service - Logout off all iSCSI sessions on shutdown
     Loaded: loaded (/usr/lib/systemd/system/iscsi-shutdown.service; static)
     Active: active (exited) since Thu 2026-01-22 13:52:41 UTC; 1h 53min ago
       Docs: man:iscsid(8)
             man:iscsiadm(8)
   Main PID: 204628 (code=exited, status=0/SUCCESS)
        CPU: 2ms

Jan 22 13:52:41 compute-2 systemd[1]: Starting Logout off all iSCSI sessions on shutdown...
Jan 22 13:52:41 compute-2 systemd[1]: Finished Logout off all iSCSI sessions on shutdown.

○ iscsi-starter.service
     Loaded: loaded (/usr/lib/systemd/system/iscsi-starter.service; enabled; preset: disabled)
     Active: inactive (dead)

○ iscsi.service - Login and scanning of iSCSI devices
     Loaded: loaded (/usr/lib/systemd/system/iscsi.service; indirect; preset: enabled)
     Active: inactive (dead)
       Docs: man:iscsiadm(8)
             man:iscsid(8)

● iscsid.service - Open-iSCSI
     Loaded: loaded (/usr/lib/systemd/system/iscsid.service; enabled; preset: disabled)
     Active: active (running) since Thu 2026-01-22 13:53:29 UTC; 1h 52min ago
TriggeredBy: ● iscsid.socket
       Docs: man:iscsid(8)
             man:iscsiuio(8)
             man:iscsiadm(8)
   Main PID: 211449 (iscsid)
     Status: "Ready to process requests"
         IO: 0B read, 0B written
      Tasks: 1 (limit: 48560)
     Memory: 1.9M (peak: 2.0M)
        CPU: 7ms
     CGroup: /system.slice/iscsid.service
             └─211449 /usr/sbin/iscsid -f

Jan 22 13:53:29 compute-2 systemd[1]: Starting Open-iSCSI...
Jan 22 13:53:29 compute-2 systemd[1]: Started Open-iSCSI.

○ iscsiuio.service - iSCSI UserSpace I/O driver
     Loaded: loaded (/usr/lib/systemd/system/iscsiuio.service; disabled; preset: disabled)
     Active: inactive (dead)
TriggeredBy: ○ iscsiuio.socket
       Docs: man:iscsiuio(8)

● kdump.service - Crash recovery kernel arming
     Loaded: loaded (/usr/lib/systemd/system/kdump.service; enabled; preset: enabled)
     Active: active (exited) since Thu 2026-01-22 12:49:31 UTC; 2h 56min ago
   Main PID: 1005 (code=exited, status=0/SUCCESS)
        CPU: 18.810s

Jan 22 12:49:30 np0005592159.novalocal dracut[1266]: Linked:         0 files
Jan 22 12:49:30 np0005592159.novalocal dracut[1266]: Compared:       0 xattrs
Jan 22 12:49:30 np0005592159.novalocal dracut[1266]: Compared:       0 files
Jan 22 12:49:30 np0005592159.novalocal dracut[1266]: Saved:          0 B
Jan 22 12:49:30 np0005592159.novalocal dracut[1266]: Duration:       0.000904 seconds
Jan 22 12:49:30 np0005592159.novalocal dracut[1266]: *** Hardlinking files done ***
Jan 22 12:49:31 np0005592159.novalocal dracut[1266]: *** Creating initramfs image file '/boot/initramfs-5.14.0-661.el9.x86_64kdump.img' done ***
Jan 22 12:49:31 np0005592159.novalocal kdumpctl[1015]: kdump: kexec: loaded kdump kernel
Jan 22 12:49:31 np0005592159.novalocal kdumpctl[1015]: kdump: Starting kdump: [OK]
Jan 22 12:49:31 np0005592159.novalocal systemd[1]: Finished Crash recovery kernel arming.

● kmod-static-nodes.service - Create List of Static Device Nodes
     Loaded: loaded (/usr/lib/systemd/system/kmod-static-nodes.service; static)
     Active: active (exited) since Thu 2026-01-22 12:49:14 UTC; 2h 56min ago
   Main PID: 669 (code=exited, status=0/SUCCESS)
        CPU: 9ms

Jan 22 12:49:14 localhost systemd[1]: Finished Create List of Static Device Nodes.

● ldconfig.service - Rebuild Dynamic Linker Cache
     Loaded: loaded (/usr/lib/systemd/system/ldconfig.service; static)
     Active: active (exited) since Thu 2026-01-22 12:49:14 UTC; 2h 56min ago
       Docs: man:ldconfig(8)
   Main PID: 692 (code=exited, status=0/SUCCESS)
        CPU: 46ms

Jan 22 12:49:14 localhost systemd[1]: Starting Rebuild Dynamic Linker Cache...
Jan 22 12:49:14 localhost systemd[1]: Finished Rebuild Dynamic Linker Cache.

○ libvirtd.service
     Loaded: masked (Reason: Unit libvirtd.service is masked.)
     Active: inactive (dead)
TriggeredBy: ○ libvirtd-ro.socket
             ○ libvirtd.socket
             ○ libvirtd-admin.socket

○ loadmodules.service - Load legacy module configuration
     Loaded: loaded (/usr/lib/systemd/system/loadmodules.service; enabled; preset: eUnit lvm2-activation-early.service could not be found.
nabled)
     Active: inactive (dead)

○ logrotate.service - Rotate log files
     Loaded: loaded (/usr/lib/systemd/system/logrotate.service; static)
     Active: inactive (dead)
TriggeredBy: ● logrotate.timer
       Docs: man:logrotate(8)
             man:logrotate.conf(5)

○ lvm2-lvmpolld.service - LVM2 poll daemon
     Loaded: loaded (/usr/lib/systemd/system/lvm2-lvmpolld.service; static)
     Active: inactive (dead)
TriggeredBy: ● lvm2-lvmpolld.socket
       Docs: man:lvmpolld(8)

● lvm2-monitor.service - Monitoring of LVM2 mirrors, snapshots etc. using dmeventd or progress polling
     Loaded: loaded (/usr/lib/systemd/system/lvm2-monitor.service; enabled; preset: enabled)
     Active: active (exited) since Thu 2026-01-22 13:23:43 UTC; 2h 22min ago
       Docs: man:dmeventd(8)
             man:lvcreate(8)
             man:lvchange(8)
             man:vgchange(8)
   Main PID: 34049 (code=exited, status=0/SUCCESS)
        CPU: 37ms

Jan 22 13:23:43 compute-2 systemd[1]: Starting Monitoring of LVM2 mirrors, snapshots etc. using dmeventd or progress polling...
Jan 22 13:23:43 compute-2 systemd[1]: Finished Monitoring of LVM2 mirrors, snapshots etc. using dmeventd or progress polling.

○ mdmonitor.service - Software RAID monitoring and management
     Loaded: loaded (/usr/lib/systemd/system/mdmonitor.service; enabled; preset: enabled)
     Active: inactive (dead)

○ microcode.service - Load CPU microcode update
     Loaded: loaded (/usr/lib/systemd/system/microcode.service; enabled; preset: enabled)
     Active: inactive (dead)
  Condition: start condition failed at Thu 2026-01-22 12:49:15 UTC; 2h 56min ago

Jan 22 12:49:15 localhost systemd[1]: Load CPU microcode update was skipped because of an unmet condition check (ConditionPathExists=/sys/devices/system/cpu/microcode/reload).

○ modprobe@configfs.service - Load Kernel Module configfs
     Loaded: loaded (/usr/lib/systemd/system/modprobe@.service; static)
     Active: inactive (dead) since Thu 2026-01-22 12:49:15 UTC; 2h 56min ago
       Docs: man:modprobe(8)
   Main PID: 766 (code=exited, status=0/SUCCESS)
        CPU: 8ms

Jan 22 12:49:15 localhost systemd[1]: Starting Load Kernel Module configfs...
Jan 22 12:49:15 localhost systemd[1]: modprobe@configfs.service: Deactivated successfully.
Jan 22 12:49:15 localhost systemd[1]: Finished Load Kernel Module configfs.

○ modprobe@drm.service - Load Kernel Module drm
     Loaded: loaded (/usr/lib/systemd/system/modprobe@.service; static)
     Active: inactive (dead) since Thu 2026-01-22 12:49:14 UTC; 2h 56min ago
       Docs: man:modprobe(8)
   Main PID: 671 (code=exited, status=0/SUCCESS)
        CPU: 99ms

Jan 22 12:49:14 localhost systemd[1]: modprobe@drm.service: Deactivated successfully.
Jan 22 12:49:14 localhost systemd[1]: Finished Load Kernel Module drm.

○ modprobe@efi_pstore.service - Load Kernel Module efi_pstore
     Loaded: loaded (/usr/lib/systemd/system/modprobe@.service; static)
     Active: inactive (dead) since Thu 2026-01-22 12:49:14 UTC; 2h 56min ago
       Docs: man:modprobe(8)
   Main PID: 672 (code=exited, status=0/SUCCESS)
        CPU: 3ms

Jan 22 12:49:14 localhost systemd[1]: modprobe@efi_pstore.service: Deactivated successfully.
Jan 22 12:49:14 localhost systemd[1]: Finished Load Kernel Module efi_pstore.

○ modprobe@fuse.service - Load Kernel Module fuse
     Loaded: loaded (/usr/lib/systemd/system/modprobe@.service; static)
     Active: inactive (dead) since Thu 2026-01-22 12:49:14 UTC; 2h 56min ago
       Docs: man:modprobe(8)
   Main PID: 673 (code=exited, status=0/SUCCESS)
        CPU: 22ms

Jan 22 12:49:14 localhost systemd[1]: modprobe@fuse.service: Deactivated successfully.
Jan 22 12:49:14 localhost systemd[1]: Finished Load Kernel Module fuse.

● multipathd.service - Device-Mapper Multipath Device Controller
     Loaded: loaded (/usr/lib/systemd/system/multipathd.service; enabled; preset: enabled)
     Active: active (running) since Thu 2026-01-22 13:53:31 UTC; 1h 52min ago
TriggeredBy: ● multipathd.socket
   Main PID: 211608 (multipathd)
     Status: "up"
         IO: 0B read, 0B written
      Tasks: 7
     Memory: 18.4M (peak: 18.9M)
        CPU: 795ms
     CGroup: /system.slice/multipathd.service
             └─211608 /sbin/multipathd -d -s

Jan 22 13:53:31 compute-2 systemd[1]: Starting Device-Mapper Multipath Device Controller...
Jan 22 13:53:31 compute-2 multipathd[211608]: --------start up--------
Jan 22 13:53:31 compute-2 multipathd[211608]: read /etc/multipath.conf
Jan 22 13:53:31 compute-2 multipathd[211608]: path checkers start up
Jan 22 13:53:31 compute-2 systemd[1]: Started Device-Mapper Multipath Device Controller.

○ netns-placeholder.service - Create netns directory
     Loaded: loaded (/etc/systemd/system/netns-placeholder.service; enabled; preset: enabled)
     Active: inactive (dead) since Thu 2026-01-22 13:46:17 UTC; 1h 59min ago
   Main PID: 140559 (code=exited, status=0/SUCCESS)
        CPU: 12ms

Jan 22 13:46:17 compute-2 systemd[1]: Starting Create netns directory...
Jan 22 13:46:17 compute-2 systemd[1]: netns-placeholder.service: Deactivated successfully.
Jan 22 13:46:17 compute-2 systemd[1]: Finished Create netns directory.

○ network.service - LSB: Bring up/down networking
     Loaded: loaded (/etc/rc.d/init.d/network; generated)
     Active: inactive (dead)
       Docs: man:systemd-sysv-generator(8)

● NetworkManager-wait-online.service - Network Manager Wait Online
     Loaded: loaded (/usr/lib/systemd/system/NetworkManager-wait-online.service; enabled; preset: disabled)
     Active: active (exited) since Thu 2026-01-22 13:26:53 UTC; 2h 19min ago
       Docs: man:NetworkManager-wait-online.service(8)
   Main PID: 49012 (code=exited, status=0/SUCCESS)
        CPU: 28ms

Jan 22 13:26:53 compute-2 systemd[1]: Starting Network Manager Wait Online...
Jan 22 13:26:53 compute-2 systemd[1]: Finished Network Manager Wait Online.

● NetworkManager.service - Network Manager
     Loaded: loaded (/usr/lib/systemd/system/NetworkManager.service; enabled; preset: enabled)
    Drop-In: /usr/lib/systemd/system/NetworkManager.service.d
             └─NetworkManager-ovs.conf
     Active: active (running) since Thu 2026-01-22 13:26:53 UTC; 2h 19min ago
       Docs: man:NetworkManager(8)
   Main PID: 49000 (NetworkManager)
         IO: 104.0K read, 339.5K written
      Tasks: 3 (limit: 48560)
     Memory: 5.6M (peak: 6.5M)
        CPU: 1min 945ms
     CGroup: /system.slice/NetworkManager.service
             └─49000 /usr/sbin/NetworkManager --no-daemon

Jan 22 14:34:19 compute-2 NetworkManager[49000]: <info>  [1769092459.5810] manager: (patch-provnet-2aab3bd6-35b9-42c5-a14a-a2deb89cba28-to-br-int): new Open vSwitch Interface device (/org/freedesktop/NetworkManager/Devices/38)
Jan 22 14:34:19 compute-2 NetworkManager[49000]: <info>  [1769092459.5820] device (patch-provnet-2aab3bd6-35b9-42c5-a14a-a2deb89cba28-to-br-int)[Open vSwitch Interface]: state change: unmanaged -> unavailable (reason 'managed', managed-type: 'external')
Jan 22 14:34:19 compute-2 NetworkManager[49000]: <warn>  [1769092459.5822] device (patch-provnet-2aab3bd6-35b9-42c5-a14a-a2deb89cba28-to-br-int)[Open vSwitch Interface]: error setting IPv4 forwarding to '1': No such file or directory
Jan 22 14:34:19 compute-2 NetworkManager[49000]: <info>  [1769092459.5840] manager: (patch-br-int-to-provnet-2aab3bd6-35b9-42c5-a14a-a2deb89cba28): new Open vSwitch Interface device (/org/freedesktop/NetworkManager/Devices/39)
Jan 22 14:34:19 compute-2 NetworkManager[49000]: <info>  [1769092459.5849] device (patch-br-int-to-provnet-2aab3bd6-35b9-42c5-a14a-a2deb89cba28)[Open vSwitch Interface]: state change: unmanaged -> unavailable (reason 'managed', managed-type: 'external')
Jan 22 14:34:19 compute-2 NetworkManager[49000]: <warn>  [1769092459.5850] device (patch-br-int-to-provnet-2aab3bd6-35b9-42c5-a14a-a2deb89cba28)[Open vSwitch Interface]: error setting IPv4 forwarding to '1': No such file or directory
Jan 22 14:34:19 compute-2 NetworkManager[49000]: <info>  [1769092459.5869] manager: (patch-br-int-to-provnet-2aab3bd6-35b9-42c5-a14a-a2deb89cba28): new Open vSwitch Port device (/org/freedesktop/NetwUnit ntpd.service could not be found.
Unit ntpdate.service could not be found.
orkManager/Devices/40)
Jan 22 14:34:19 compute-2 NetworkManager[49000]: <info>  [1769092459.5883] manager: (patch-provnet-2aab3bd6-35b9-42c5-a14a-a2deb89cba28-to-br-int): new Open vSwitch Port device (/org/freedesktop/NetworkManager/Devices/41)
Jan 22 14:34:19 compute-2 NetworkManager[49000]: <info>  [1769092459.5893] device (patch-provnet-2aab3bd6-35b9-42c5-a14a-a2deb89cba28-to-br-int)[Open vSwitch Interface]: state change: unavailable -> disconnected (reason 'none', managed-type: 'full')
Jan 22 14:34:19 compute-2 NetworkManager[49000]: <info>  [1769092459.5901] device (patch-br-int-to-provnet-2aab3bd6-35b9-42c5-a14a-a2deb89cba28)[Open vSwitch Interface]: state change: unavailable -> disconnected (reason 'none', managed-type: 'full')

○ nfs-idmapd.service - NFSv4 ID-name mapping service
     Loaded: loaded (/usr/lib/systemd/system/nfs-idmapd.service; static)
     Active: inactive (dead)
       Docs: man:idmapd(8)

○ nfs-mountd.service - NFS Mount Daemon
     Loaded: loaded (/usr/lib/systemd/system/nfs-mountd.service; static)
     Active: inactive (dead)
       Docs: man:rpc.mountd(8)

○ nfs-server.service - NFS server and services
     Loaded: loaded (/usr/lib/systemd/system/nfs-server.service; disabled; preset: disabled)
     Active: inactive (dead)
       Docs: man:rpc.nfsd(8)
             man:exportfs(8)

○ nfs-utils.service - NFS server and client services
     Loaded: loaded (/usr/lib/systemd/system/nfs-utils.service; static)
     Active: inactive (dead)

○ nfsdcld.service - NFSv4 Client Tracking Daemon
     Loaded: loaded (/usr/lib/systemd/system/nfsdcld.service; static)
     Active: inactive (dead)
       Docs: man:nfsdcld(8)

● nftables.service - Netfilter Tables
     Loaded: loaded (/usr/lib/systemd/system/nftables.service; enabled; preset: disabled)
     Active: active (exited) since Thu 2026-01-22 13:29:23 UTC; 2h 16min ago
       Docs: man:nft(8)
   Main PID: 63204 (code=exited, status=0/SUCCESS)
        CPU: 16ms

Jan 22 13:29:23 compute-2 systemd[1]: Starting Netfilter Tables...
Jan 22 13:29:23 compute-2 systemd[1]: Finished Netfilter Tables.

● nis-domainname.service - Read and set NIS domainname from /etc/sysconfig/network
     Loaded: loaded (/usr/lib/systemd/system/nis-domainname.service; enabled; preset: enabled)
     Active: active (exited) since Thu 2026-01-22 12:49:14 UTC; 2h 56min ago
   Main PID: 674 (code=exited, status=0/SUCCESS)
        CPU: 4ms

Jan 22 12:49:14 localhost systemd[1]: Finished Read and set NIS domainname from /etc/sysconfig/network.

○ nvmefc-boot-connections.service - Auto-connect to subsystems on FC-NVME devices found during boot
     Loaded: loaded (/usr/lib/systemd/system/nvmefc-boot-connections.service; enabled; preset: enabled)
     Active: inactive (dead)

● openvswitch.service - Open vSwitch
     Loaded: loaded (/usr/lib/systemd/system/openvswitch.service; enabled; preset: disabled)
     Active: active (exited) since Thu 2026-01-22 13:26:34 UTC; 2h 19min ago
   Main PID: 47304 (code=exited, status=0/SUCCESS)
        CPU: 3ms

Jan 22 13:26:34 compute-2 systemd[1]: Starting Open vSwitch...
Jan 22 13:26:34 compute-2 systemd[1]: Finished Open vSwitch.

● ovs-delete-transient-ports.service - Open vSwitch Delete Transient Ports
     Loaded: loaded (/usr/lib/systemd/system/ovs-delete-transient-ports.service; static)
     Active: active (exited) since Thu 2026-01-22 13:26:34 UTC; 2h 19min ago
   Main PID: 47242 (code=exited, status=0/SUCCESS)
        CPU: 38ms

Jan 22 13:26:34 compute-2 systemd[1]: Starting Open vSwitch Delete Transient Ports...
Jan 22 13:26:34 compute-2 systemd[1]: Finished Open vSwitch Delete Transient Ports.

● ovs-vswitchd.service - Open vSwitch Forwarding Unit
     Loaded: loaded (/usr/lib/systemd/system/ovs-vswitchd.service; static)
     Active: active (running) since Thu 2026-01-22 13:26:34 UTC; 2h 19min ago
   Main PID: 47295 (ovs-vswitchd)
         IO: 3.4M read, 128.0K written
      Tasks: 13 (limit: 48560)
     Memory: 246.4M (peak: 248.8M)
        CPU: 26.095s
     CGroup: /system.slice/ovs-vswitchd.service
             └─4Unit pacemaker.service could not be found.
Unit plymouth-quit-wait.service could not be found.
Unit plymouth-start.service could not be found.
7295 ovs-vswitchd unix:/var/run/openvswitch/db.sock -vconsole:emer -vsyslog:err -vfile:info --mlockall --user openvswitch:hugetlbfs --no-chdir --log-file=/var/log/openvswitch/ovs-vswitchd.log --pidfile=/var/run/openvswitch/ovs-vswitchd.pid --detach

Jan 22 13:26:34 compute-2 systemd[1]: Starting Open vSwitch Forwarding Unit...
Jan 22 13:26:34 compute-2 ovs-ctl[47285]: Inserting openvswitch module [  OK  ]
Jan 22 13:26:34 compute-2 ovs-ctl[47254]: Starting ovs-vswitchd [  OK  ]
Jan 22 13:26:34 compute-2 ovs-vsctl[47303]: ovs|00001|vsctl|INFO|Called as ovs-vsctl --no-wait add Open_vSwitch . external-ids hostname=compute-2
Jan 22 13:26:34 compute-2 ovs-ctl[47254]: Enabling remote OVSDB managers [  OK  ]
Jan 22 13:26:34 compute-2 systemd[1]: Started Open vSwitch Forwarding Unit.

● ovsdb-server.service - Open vSwitch Database Unit
     Loaded: loaded (/usr/lib/systemd/system/ovsdb-server.service; static)
     Active: active (running) since Thu 2026-01-22 13:26:34 UTC; 2h 19min ago
   Main PID: 47215 (ovsdb-server)
         IO: 1.2M read, 502.0K written
      Tasks: 1 (limit: 48560)
     Memory: 4.8M (peak: 38.1M)
        CPU: 37.186s
     CGroup: /system.slice/ovsdb-server.service
             └─47215 ovsdb-server /etc/openvswitch/conf.db -vconsole:emer -vsyslog:err -vfile:info --remote=punix:/var/run/openvswitch/db.sock --private-key=db:Open_vSwitch,SSL,private_key --certificate=db:Open_vSwitch,SSL,certificate --bootstrap-ca-cert=db:Open_vSwitch,SSL,ca_cert --user openvswitch:hugetlbfs --no-chdir --log-file=/var/log/openvswitch/ovsdb-server.log --pidfile=/var/run/openvswitch/ovsdb-server.pid --detach

Jan 22 13:26:34 compute-2 ovs-ctl[47167]: /etc/openvswitch/conf.db does not exist ... (warning).
Jan 22 13:26:34 compute-2 ovs-ctl[47167]: Creating empty database /etc/openvswitch/conf.db [  OK  ]
Jan 22 13:26:34 compute-2 ovs-ctl[47167]: Starting ovsdb-server [  OK  ]
Jan 22 13:26:34 compute-2 ovs-vsctl[47216]: ovs|00001|vsctl|INFO|Called as ovs-vsctl --no-wait -- init -- set Open_vSwitch . db-version=8.5.1
Jan 22 13:26:34 compute-2 ovs-vsctl[47232]: ovs|00001|vsctl|INFO|Called as ovs-vsctl --no-wait set Open_vSwitch . ovs-version=3.3.5-115.el9s "external-ids:system-id=\"c4fa18b6-ed0f-47ac-8eec-d1399749aa8e\"" "external-ids:rundir=\"/var/run/openvswitch\"" "system-type=\"centos\"" "system-version=\"9\""
Jan 22 13:26:34 compute-2 ovs-ctl[47167]: Configuring Open vSwitch system IDs [  OK  ]
Jan 22 13:26:34 compute-2 ovs-ctl[47167]: Enabling remote OVSDB managers [  OK  ]
Jan 22 13:26:34 compute-2 systemd[1]: Started Open vSwitch Database Unit.
Jan 22 13:26:34 compute-2 ovs-vsctl[47241]: ovs|00001|vsctl|INFO|Called as ovs-vsctl --no-wait add Open_vSwitch . external-ids hostname=compute-2
Jan 22 14:34:43 compute-2 ovsdb-server[47215]: ovs|00005|reconnect|ERR|tcp:127.0.0.1:40134: no response to inactivity probe after 5 seconds, disconnecting

● polkit.service - Authorization Manager
     Loaded: loaded (/usr/lib/systemd/system/polkit.service; static)
     Active: active (running) since Thu 2026-01-22 13:25:43 UTC; 2h 20min ago
       Docs: man:polkit(8)
   Main PID: 43481 (polkitd)
         IO: 11.3M read, 0B written
      Tasks: 12 (limit: 48560)
     Memory: 17.1M (peak: 17.7M)
        CPU: 1.667s
     CGroup: /system.slice/polkit.service
             └─43481 /usr/lib/polkit-1/polkitd --no-debug

Jan 22 13:49:24 compute-2 polkitd[43481]: Collecting garbage unconditionally...
Jan 22 13:49:24 compute-2 polkitd[43481]: Loading rules from directory /etc/polkit-1/rules.d
Jan 22 13:49:24 compute-2 polkitd[43481]: Loading rules from directory /usr/share/polkit-1/rules.d
Jan 22 13:49:24 compute-2 polkitd[43481]: Finished loading, compiling and executing 3 rules
Jan 22 13:51:32 compute-2 polkitd[43481]: Registered Authentication Agent for unix-process:196244:374326 (system bus name :1.1902 [pkttyagent --process 196244 --notify-fd 5 --fallback], object path /org/freedesktop/PolicyKit1/AuthenticationAgent, locale en_US.UTF-8)
Jan 22 13:51:32 compute-2 polkitd[43481]: Unregistered Authentication Agent for unix-process:196244:374Unit power-profiles-daemon.service could not be found.
Unit rpc-svcgssd.service could not be found.
326 (system bus name :1.1902, object path /org/freedesktop/PolicyKit1/AuthenticationAgent, locale en_US.UTF-8) (disconnected from bus)
Jan 22 13:51:32 compute-2 polkitd[43481]: Registered Authentication Agent for unix-process:196243:374326 (system bus name :1.1903 [pkttyagent --process 196243 --notify-fd 4 --fallback], object path /org/freedesktop/PolicyKit1/AuthenticationAgent, locale en_US.UTF-8)
Jan 22 13:51:32 compute-2 polkitd[43481]: Unregistered Authentication Agent for unix-process:196243:374326 (system bus name :1.1903, object path /org/freedesktop/PolicyKit1/AuthenticationAgent, locale en_US.UTF-8) (disconnected from bus)
Jan 22 13:51:34 compute-2 polkitd[43481]: Registered Authentication Agent for unix-process:196738:374563 (system bus name :1.1906 [pkttyagent --process 196738 --notify-fd 4 --fallback], object path /org/freedesktop/PolicyKit1/AuthenticationAgent, locale en_US.UTF-8)
Jan 22 13:51:34 compute-2 polkitd[43481]: Unregistered Authentication Agent for unix-process:196738:374563 (system bus name :1.1906, object path /org/freedesktop/PolicyKit1/AuthenticationAgent, locale en_US.UTF-8) (disconnected from bus)

○ raid-check.service - RAID setup health check
     Loaded: loaded (/usr/lib/systemd/system/raid-check.service; static)
     Active: inactive (dead)
TriggeredBy: ○ raid-check.timer

○ rbdmap.service - Map RBD devices
     Loaded: loaded (/usr/lib/systemd/system/rbdmap.service; disabled; preset: disabled)
     Active: inactive (dead)

○ rc-local.service - /etc/rc.d/rc.local Compatibility
     Loaded: loaded (/usr/lib/systemd/system/rc-local.service; static)
     Active: inactive (dead)
       Docs: man:systemd-rc-local-generator(8)

○ rescue.service - Rescue Shell
     Loaded: loaded (/usr/lib/systemd/system/rescue.service; static)
     Active: inactive (dead)
       Docs: man:sulogin(8)

○ rpc-gssd.service - RPC security service for NFS client and server
     Loaded: loaded (/usr/lib/systemd/system/rpc-gssd.service; static)
     Active: inactive (dead)
  Condition: start condition failed at Thu 2026-01-22 12:49:16 UTC; 2h 56min ago
       Docs: man:rpc.gssd(8)

Jan 22 12:49:16 np0005592159.novalocal systemd[1]: RPC security service for NFS client and server was skipped because of an unmet condition check (ConditionPathExists=/etc/krb5.keytab).

● rpc-statd-notify.service - Notify NFS peers of a restart
     Loaded: loaded (/usr/lib/systemd/system/rpc-statd-notify.service; static)
     Active: active (exited) since Thu 2026-01-22 12:49:18 UTC; 2h 56min ago
       Docs: man:sm-notify(8)
             man:rpc.statd(8)
        CPU: 7ms

Jan 22 12:49:18 np0005592159.novalocal systemd[1]: Starting Notify NFS peers of a restart...
Jan 22 12:49:18 np0005592159.novalocal sm-notify[1001]: Version 2.5.4 starting
Jan 22 12:49:18 np0005592159.novalocal systemd[1]: Started Notify NFS peers of a restart.

○ rpc-statd.service - NFS status monitor for NFSv2/3 locking.
     Loaded: loaded (/usr/lib/systemd/system/rpc-statd.service; static)
     Active: inactive (dead)
       Docs: man:rpc.statd(8)

● rpcbind.service - RPC Bind
     Loaded: loaded (/usr/lib/systemd/system/rpcbind.service; enabled; preset: enabled)
     Active: active (running) since Thu 2026-01-22 12:49:14 UTC; 2h 56min ago
TriggeredBy: ● rpcbind.socket
       Docs: man:rpcbind(8)
   Main PID: 697 (rpcbind)
         IO: 0B read, 0B written
      Tasks: 1 (limit: 48560)
     Memory: 2.3M (peak: 2.6M)
        CPU: 39ms
     CGroup: /system.slice/rpcbind.service
             └─697 /usr/bin/rpcbind -w -f

Jan 22 12:49:14 localhost systemd[1]: Starting RPC Bind...
Jan 22 12:49:14 localhost systemd[1]: Started RPC Bind.

● rsyslog.service - System Logging Service
     Loaded: loaded (/usr/lib/systemd/system/rsyslog.service; enabled; preset: enabled)
     Active: active (running) since Thu 2026-01-22 12:49:18 UTC; 2h 56min ago
       Docs: man:rsyslogd(8)
             https://www.rsyslog.com/doc/
   Main PID: 1002 (rsyslogd)
         IO: 4.0K read, 22.1M written
      Tasks: 3 (limit: 48560)
     Memory: 24.4M (peak: 24.9M)
       Unit sntp.service could not be found.
Unit sshd-keygen.service could not be found.
 CPU: 19.330s
     CGroup: /system.slice/rsyslog.service
             └─1002 /usr/sbin/rsyslogd -n

Jan 22 14:22:00 compute-2 rsyslogd[1002]: imjournal: journal files changed, reloading...  [v8.2510.0-2.el9 try https://www.rsyslog.com/e/0 ]
Jan 22 14:22:00 compute-2 rsyslogd[1002]: imjournal: journal files changed, reloading...  [v8.2510.0-2.el9 try https://www.rsyslog.com/e/0 ]
Jan 22 14:38:54 compute-2 rsyslogd[1002]: imjournal: journal files changed, reloading...  [v8.2510.0-2.el9 try https://www.rsyslog.com/e/0 ]
Jan 22 14:38:54 compute-2 rsyslogd[1002]: imjournal: journal files changed, reloading...  [v8.2510.0-2.el9 try https://www.rsyslog.com/e/0 ]
Jan 22 15:00:27 compute-2 rsyslogd[1002]: imjournal: journal files changed, reloading...  [v8.2510.0-2.el9 try https://www.rsyslog.com/e/0 ]
Jan 22 15:00:27 compute-2 rsyslogd[1002]: imjournal: journal files changed, reloading...  [v8.2510.0-2.el9 try https://www.rsyslog.com/e/0 ]
Jan 22 15:22:20 compute-2 rsyslogd[1002]: imjournal: journal files changed, reloading...  [v8.2510.0-2.el9 try https://www.rsyslog.com/e/0 ]
Jan 22 15:22:20 compute-2 rsyslogd[1002]: imjournal: journal files changed, reloading...  [v8.2510.0-2.el9 try https://www.rsyslog.com/e/0 ]
Jan 22 15:43:58 compute-2 rsyslogd[1002]: imjournal: journal files changed, reloading...  [v8.2510.0-2.el9 try https://www.rsyslog.com/e/0 ]
Jan 22 15:43:58 compute-2 rsyslogd[1002]: imjournal: journal files changed, reloading...  [v8.2510.0-2.el9 try https://www.rsyslog.com/e/0 ]

○ selinux-autorelabel-mark.service - Mark the need to relabel after reboot
     Loaded: loaded (/usr/lib/systemd/system/selinux-autorelabel-mark.service; enabled; preset: enabled)
     Active: inactive (dead)
  Condition: start condition failed at Thu 2026-01-22 12:49:14 UTC; 2h 56min ago

Jan 22 12:49:14 localhost systemd[1]: Mark the need to relabel after reboot was skipped because of an unmet condition check (ConditionSecurity=!selinux).

● serial-getty@ttyS0.service - Serial Getty on ttyS0
     Loaded: loaded (/usr/lib/systemd/system/serial-getty@.service; enabled-runtime; preset: disabled)
     Active: active (running) since Thu 2026-01-22 12:49:18 UTC; 2h 56min ago
       Docs: man:agetty(8)
             man:systemd-getty-generator(8)
             http://0pointer.de/blog/projects/serial-console.html
   Main PID: 1009 (agetty)
         IO: 0B read, 0B written
      Tasks: 1 (limit: 48560)
     Memory: 232.0K (peak: 488.0K)
        CPU: 12ms
     CGroup: /system.slice/system-serial\x2dgetty.slice/serial-getty@ttyS0.service
             └─1009 /sbin/agetty -o "-p -- \\u" --keep-baud 115200,57600,38400,9600 - vt220

Jan 22 12:49:18 np0005592159.novalocal systemd[1]: Started Serial Getty on ttyS0.

○ sshd-keygen@ecdsa.service - OpenSSH ecdsa Server Key Generation
     Loaded: loaded (/usr/lib/systemd/system/sshd-keygen@.service; disabled; preset: disabled)
    Drop-In: /usr/lib/systemd/system/sshd-keygen@.service.d
             └─disable-sshd-keygen-if-cloud-init-active.conf
     Active: inactive (dead)
  Condition: start condition failed at Thu 2026-01-22 13:49:31 UTC; 1h 56min ago

Jan 22 12:49:15 localhost systemd[1]: OpenSSH ecdsa Server Key Generation was skipped because of an unmet condition check (ConditionPathExists=!/run/systemd/generator.early/multi-user.target.wants/cloud-init.target).
Jan 22 13:49:31 compute-2 systemd[1]: OpenSSH ecdsa Server Key Generation was skipped because of an unmet condition check (ConditionPathExists=!/run/systemd/generator.early/multi-user.target.wants/cloud-init.target).

○ sshd-keygen@ed25519.service - OpenSSH ed25519 Server Key Generation
     Loaded: loaded (/usr/lib/systemd/system/sshd-keygen@.service; disabled; preset: disabled)
    Drop-In: /usr/lib/systemd/system/sshd-keygen@.service.d
             └─disable-sshd-keygen-if-cloud-init-active.conf
     Active: inactive (dead)
  Condition: start condition failed at Thu 2026-01-22 13:49:31 UTC; 1h 56min ago

Jan 22 12:49:15 localhost systemd[1]: OpenSSH ed25519 Server Key Generation was skipped because of an unmet conditionUnit syslog.service could not be found.
 check (ConditionPathExists=!/run/systemd/generator.early/multi-user.target.wants/cloud-init.target).
Jan 22 13:49:31 compute-2 systemd[1]: OpenSSH ed25519 Server Key Generation was skipped because of an unmet condition check (ConditionPathExists=!/run/systemd/generator.early/multi-user.target.wants/cloud-init.target).

○ sshd-keygen@rsa.service - OpenSSH rsa Server Key Generation
     Loaded: loaded (/usr/lib/systemd/system/sshd-keygen@.service; disabled; preset: disabled)
    Drop-In: /usr/lib/systemd/system/sshd-keygen@.service.d
             └─disable-sshd-keygen-if-cloud-init-active.conf
     Active: inactive (dead)
  Condition: start condition failed at Thu 2026-01-22 13:49:31 UTC; 1h 56min ago

Jan 22 12:49:15 localhost systemd[1]: OpenSSH rsa Server Key Generation was skipped because of an unmet condition check (ConditionPathExists=!/run/systemd/generator.early/multi-user.target.wants/cloud-init.target).
Jan 22 13:49:31 compute-2 systemd[1]: OpenSSH rsa Server Key Generation was skipped because of an unmet condition check (ConditionPathExists=!/run/systemd/generator.early/multi-user.target.wants/cloud-init.target).

● sshd.service - OpenSSH server daemon
     Loaded: loaded (/usr/lib/systemd/system/sshd.service; enabled; preset: enabled)
     Active: active (running) since Thu 2026-01-22 13:49:31 UTC; 1h 56min ago
       Docs: man:sshd(8)
             man:sshd_config(5)
   Main PID: 169467 (sshd)
         IO: 68.0K read, 152.0K written
      Tasks: 1 (limit: 48560)
     Memory: 3.5M (peak: 7.4M)
        CPU: 2.829s
     CGroup: /system.slice/sshd.service
             └─169467 "sshd: /usr/sbin/sshd -D [listener] 0 of 10-100 startups"

Jan 22 15:33:13 compute-2 sshd[169467]: drop connection #0 from [134.209.61.246]:51132 on [38.102.83.5]:22 penalty: failed authentication
Jan 22 15:33:13 compute-2 sshd[169467]: drop connection #0 from [134.209.61.246]:51138 on [38.102.83.5]:22 penalty: failed authentication
Jan 22 15:33:13 compute-2 sshd[169467]: drop connection #0 from [134.209.61.246]:51146 on [38.102.83.5]:22 penalty: failed authentication
Jan 22 15:33:14 compute-2 sshd[169467]: drop connection #0 from [134.209.61.246]:51148 on [38.102.83.5]:22 penalty: failed authentication
Jan 22 15:33:14 compute-2 sshd[169467]: drop connection #0 from [134.209.61.246]:51162 on [38.102.83.5]:22 penalty: failed authentication
Jan 22 15:33:14 compute-2 sshd[169467]: drop connection #0 from [134.209.61.246]:51178 on [38.102.83.5]:22 penalty: failed authentication
Jan 22 15:38:41 compute-2 sshd-session[286669]: error: kex_exchange_identification: read: Connection reset by peer
Jan 22 15:38:41 compute-2 sshd-session[286669]: Connection reset by 176.120.22.52 port 60390
Jan 22 15:45:29 compute-2 sshd-session[291623]: Accepted publickey for zuul from 192.168.122.10 port 38638 ssh2: ECDSA SHA256:ZGulYWguNMmFf6ciBfmyHwkPUuqxgPGYTHWq2rryzeI
Jan 22 15:45:29 compute-2 sshd-session[291623]: pam_unix(sshd:session): session opened for user zuul(uid=1000) by zuul(uid=0)

○ sssd-kcm.service - SSSD Kerberos Cache Manager
     Loaded: loaded (/usr/lib/systemd/system/sssd-kcm.service; indirect; preset: disabled)
     Active: inactive (dead)
TriggeredBy: ● sssd-kcm.socket
       Docs: man:sssd-kcm(5)

○ sssd.service - System Security Services Daemon
     Loaded: loaded (/usr/lib/systemd/system/sssd.service; enabled; preset: enabled)
     Active: inactive (dead)
  Condition: start condition failed at Thu 2026-01-22 12:49:15 UTC; 2h 56min ago

Jan 22 12:49:15 localhost systemd[1]: System Security Services Daemon was skipped because no trigger condition checks were met.

○ sysstat-collect.service - system activity accounting tool
     Loaded: loaded (/usr/lib/systemd/system/sysstat-collect.service; static)
     Active: inactive (dead)
TriggeredBy: ○ sysstat-collect.timer
       Docs: man:sa1(8)

○ sysstat-summary.service - Generate a daily summary of process accounting
     Loaded: loaded (/usr/lib/systemd/system/sysstat-summary.service; static)
     Active: inactive (dead)
TriggeredBy: ○ sysstat-summary.timer
       Docs: man:sa2(8)

○ sysstat.service - Resets System Activity Logs
     Loaded: loaded (/usr/lib/systemd/system/sysstat.service; enabled; preset: enabled)
     Active: inactive (dead)

○ systemd-ask-password-console.service - Dispatch Password Requests to Console
     Loaded: loaded (/usr/lib/systemd/system/systemd-ask-password-console.service; static)
     Active: inactive (dead)
TriggeredBy: ● systemd-ask-password-console.path
       Docs: man:systemd-ask-password-console.service(8)

○ systemd-ask-password-wall.service - Forward Password Requests to Wall
     Loaded: loaded (/usr/lib/systemd/system/systemd-ask-password-wall.service; static)
     Active: inactive (dead)
TriggeredBy: ● systemd-ask-password-wall.path
       Docs: man:systemd-ask-password-wall.service(8)

○ systemd-binfmt.service - Set Up Additional Binary Formats
     Loaded: loaded (/usr/lib/systemd/system/systemd-binfmt.service; static)
     Active: inactive (dead)
  Condition: start condition failed at Thu 2026-01-22 12:49:14 UTC; 2h 56min ago
       Docs: man:systemd-binfmt.service(8)
             man:binfmt.d(5)
             https://docs.kernel.org/admin-guide/binfmt-misc.html
             https://www.freedesktop.org/wiki/Software/systemd/APIFileSystems

Jan 22 12:49:14 localhost systemd[1]: Set Up Additional Binary Formats was skipped because no trigger condition checks were met.

○ systemd-boot-random-seed.service - Update Boot Loader Random Seed
     Loaded: loaded (/usr/lib/systemd/system/systemd-boot-random-seed.service; static)
     Active: inactive (dead)
  Condition: start condition failed at Thu 2026-01-22 12:49:14 UTC; 2h 56min ago
       Docs: man:systemd-boot-random-seed.service(8)
             man:random(4)

Jan 22 12:49:14 localhost systemd[1]: Update Boot Loader Random Seed was skipped because no trigger condition checks were met.

● systemd-boot-update.service - Automatic Boot Loader Update
     Loaded: loaded (/usr/lib/systemd/system/systemd-boot-update.service; enabled; preset: enabled)
     Active: active (exited) since Thu 2026-01-22 12:49:14 UTC; 2h 56min ago
       Docs: man:bootctl(1)
   Main PID: 693 (code=exited, status=0/SUCCESS)
        CPU: 8ms

Jan 22 12:49:14 localhost systemd[1]: Starting Automatic Boot Loader Update...
Jan 22 12:49:14 localhost bootctl[693]: Couldn't find EFI system partition, skipping.
Jan 22 12:49:14 localhost systemd[1]: Finished Automatic Boot Loader Update.

○ systemd-firstboot.service - First Boot Wizard
     Loaded: loaded (/usr/lib/systemd/system/systemd-firstboot.service; static)
     Active: inactive (dead)
  Condition: start condition failed at Thu 2026-01-22 12:49:14 UTC; 2h 56min ago
       Docs: man:systemd-firstboot(1)

Jan 22 12:49:14 localhost systemd[1]: First Boot Wizard was skipped because of an unmet condition check (ConditionFirstBoot=yes).

○ systemd-fsck-root.service - File System Check on Root Device
     Loaded: loaded (/usr/lib/systemd/system/systemd-fsck-root.service; static)
     Active: inactive (dead) since Thu 2026-01-22 12:49:14 UTC; 2h 56min ago
   Duration: 1.413s
       Docs: man:systemd-fsck-root.service(8)
   Main PID: 552 (code=exited, status=0/SUCCESS)
        CPU: 20ms

Jan 22 12:49:12 localhost systemd[1]: Starting File System Check on /dev/disk/by-uuid/22ac9141-3960-4912-b20e-19fc8a328d40...
Jan 22 12:49:12 localhost systemd-fsck[554]: /usr/sbin/fsck.xfs: XFS file system.
Jan 22 12:49:12 localhost systemd[1]: Finished File System Check on /dev/disk/by-uuid/22ac9141-3960-4912-b20e-19fc8a328d40.

● systemd-hostnamed.service - Hostname Service
     Loaded: loaded (/usr/lib/systemd/system/systemd-hostnamed.service; static)
    Drop-In: /usr/lib/systemd/system/systemd-hostnamed.service.d
             └─disable-privatedevices.conf
     Active: active (running) since Thu 2026-01-22 15:45:49 UTC; 23s ago
       Docs: man:systemd-hostnamed.service(8)
             man:hostname(5)
             man:machine-info(5)
             man:org.freedesktop.resolve1(5)
   Main PID: 294012 (systemd-hostnam)
         IO: 8.0K read, 0B written
      Tasks: 1 (limit: 48560)
     Memory: 2.7M (peak: 3.8M)
        CPU: 149ms
     CGroup: /system.slice/systemd-hostnamed.service
             └─294012 /usr/lib/systemd/systemd-hostnamed

Jan 22 15:45:49 compute-2 systemd[1]: Starting Hostname Service...
Jan 22 15:45:49 compute-2 systemd[1]: Started Hostname Service.

● systemd-hwdb-update.service - Rebuild Hardware Database
     Loaded: loaded (/usr/lib/systemd/system/systemd-hwdb-update.service; static)
     Active: active (exited) since Thu 2026-01-22 12:49:15 UTC; 2h 56min ago
       Docs: man:hwdb(7)
             man:systemd-hwdb(8)
   Main PID: 685 (code=exited, status=0/SUCCESS)
        CPU: 517ms

Jan 22 12:49:14 localhost systemd[1]: Starting Rebuild Hardware Database...
Jan 22 12:49:15 localhost systemd[1]: Finished Rebuild Hardware Database.

○ systemd-initctl.service - initctl Compatibility Daemon
     Loaded: loaded (/usr/lib/systemd/system/systemd-initctl.service; static)
     Active: inactive (dead)
TriggeredBy: ● systemd-initctl.socket
       Docs: man:systemd-initctl.service(8)

● systemd-journal-catalog-update.service - Rebuild Journal Catalog
     Loaded: loaded (/usr/lib/systemd/system/systemd-journal-catalog-update.service; static)
     Active: active (exited) since Thu 2026-01-22 12:49:14 UTC; 2h 56min ago
       Docs: man:systemd-journald.service(8)
             man:journald.conf(5)
   Main PID: 698 (code=exited, status=0/SUCCESS)
        CPU: 17ms

Jan 22 12:49:14 localhost systemd[1]: Starting Rebuild Journal Catalog...
Jan 22 12:49:14 localhost systemd[1]: Finished Rebuild Journal Catalog.

● systemd-journal-flush.service - Flush Journal to Persistent Storage
     Loaded: loaded (/usr/lib/systemd/system/systemd-journal-flush.service; static)
     Active: active (exited) since Thu 2026-01-22 12:49:14 UTC; 2h 56min ago
       Docs: man:systemd-journald.service(8)
             man:journald.conf(5)
   Main PID: 686 (code=exited, status=0/SUCCESS)
        CPU: 7ms

Jan 22 12:49:14 localhost systemd[1]: Starting Flush Journal to Persistent Storage...
Jan 22 12:49:14 localhost systemd[1]: Finished Flush Journal to Persistent Storage.

● systemd-journald.service - Journal Service
     Loaded: loaded (/usr/lib/systemd/system/systemd-journald.service; static)
     Active: active (running) since Thu 2026-01-22 12:49:14 UTC; 2h 56min ago
TriggeredBy: ● systemd-journald.socket
             ● systemd-journald-dev-log.socket
       Docs: man:systemd-journald.service(8)
             man:journald.conf(5)
   Main PID: 675 (systemd-journal)
     Status: "Processing requests..."
         IO: 0B read, 0B written
      Tasks: 1 (limit: 48560)
     Memory: 96.5M (peak: 96.8M)
        CPU: 23.912s
     CGroup: /system.slice/systemd-journald.service
             └─675 /usr/lib/systemd/systemd-journald

Jan 22 12:49:14 localhost systemd-journald[675]: Journal started
Jan 22 12:49:14 localhost systemd-journald[675]: Runtime Journal (/run/log/journal/85ac68c10a6e7ae08ceb898dbdca0cb5) is 8.0M, max 153.6M, 145.6M free.
Jan 22 12:49:14 localhost systemd[1]: systemd-journald.service: Deactivated successfully.
Jan 22 12:49:14 localhost systemd-journald[675]: Runtime Journal (/run/log/journal/85ac68c10a6e7ae08ceb898dbdca0cb5) is 8.0M, max 153.6M, 145.6M free.
Jan 22 12:49:14 localhost systemd-journald[675]: Received client request to flush runtime journal.

● systemd-logind.service - User Login Management
     Loaded: loaded (/usr/lib/systemd/system/systemd-logind.service; static)
    Drop-In: /usr/lib/systemd/system/systemd-logind.service.d
             └─10-grub2-logind-service.conf
     Active: active (running) since Thu 2026-01-22 12:49:15 UTC; 2h 56min ago
       Docs: man:sd-login(3)
             man:systemd-logind.service(8)
             man:logind.conf(5)
             man:org.freedesktop.login1(5)
   Main PID: 787 (systemd-logind)
     Status: "Processing requests..."
         IO: 4.0K read, 0B written
      Tasks: 1 (limit: 48560)
     Memory: 6.5M (peak: 7.4M)
        CPU: 3.445s
     CGroup: /system.slice/systemd-logind.service
             └─787 /usr/lib/Unit systemd-networkd-wait-online.service could not be found.
systemd/systemd-logind

Jan 22 13:52:09 compute-2 systemd-logind[787]: Removed session 48.
Jan 22 13:52:15 compute-2 systemd-logind[787]: New session 49 of user zuul.
Jan 22 13:53:27 compute-2 systemd-logind[787]: Watching system buttons on /dev/input/event0 (Power Button)
Jan 22 13:53:27 compute-2 systemd-logind[787]: Watching system buttons on /dev/input/event1 (AT Translated Set 2 keyboard)
Jan 22 13:54:31 compute-2 systemd-logind[787]: New session 50 of user zuul.
Jan 22 13:54:31 compute-2 systemd-logind[787]: Session 50 logged out. Waiting for processes to exit.
Jan 22 13:54:31 compute-2 systemd-logind[787]: Removed session 50.
Jan 22 13:55:25 compute-2 systemd-logind[787]: Session 49 logged out. Waiting for processes to exit.
Jan 22 13:55:25 compute-2 systemd-logind[787]: Removed session 49.
Jan 22 15:45:29 compute-2 systemd-logind[787]: New session 51 of user zuul.

○ systemd-machine-id-commit.service - Commit a transient machine-id on disk
     Loaded: loaded (/usr/lib/systemd/system/systemd-machine-id-commit.service; static)
     Active: inactive (dead)
  Condition: start condition failed at Thu 2026-01-22 12:49:14 UTC; 2h 56min ago
       Docs: man:systemd-machine-id-commit.service(8)

Jan 22 12:49:14 localhost systemd[1]: Commit a transient machine-id on disk was skipped because of an unmet condition check (ConditionPathIsMountPoint=/etc/machine-id).

● systemd-machined.service - Virtual Machine and Container Registration Service
     Loaded: loaded (/usr/lib/systemd/system/systemd-machined.service; static)
     Active: active (running) since Thu 2026-01-22 13:51:25 UTC; 1h 54min ago
       Docs: man:systemd-machined.service(8)
             man:org.freedesktop.machine1(5)
   Main PID: 194970 (systemd-machine)
     Status: "Processing requests..."
         IO: 0B read, 0B written
      Tasks: 1 (limit: 48560)
     Memory: 1.5M (peak: 2.0M)
        CPU: 1.711s
     CGroup: /system.slice/systemd-machined.service
             └─194970 /usr/lib/systemd/systemd-machined

Jan 22 14:11:09 compute-2 systemd-machined[194970]: Machine qemu-1-instance-00000006 terminated.
Jan 22 14:21:53 compute-2 systemd-machined[194970]: New machine qemu-3-instance-0000000d.
Jan 22 14:28:42 compute-2 systemd-machined[194970]: New machine qemu-4-instance-00000012.
Jan 22 14:29:03 compute-2 systemd-machined[194970]: Machine qemu-4-instance-00000012 terminated.
Jan 22 14:32:31 compute-2 systemd-machined[194970]: New machine qemu-5-instance-00000015.
Jan 22 14:33:04 compute-2 systemd-machined[194970]: Machine qemu-3-instance-0000000d terminated.
Jan 22 14:33:51 compute-2 systemd-machined[194970]: Machine qemu-5-instance-00000015 terminated.
Jan 22 14:34:15 compute-2 systemd-machined[194970]: New machine qemu-6-instance-00000016.
Jan 22 14:34:20 compute-2 systemd-machined[194970]: New machine qemu-7-instance-00000015.
Jan 22 14:34:22 compute-2 systemd-machined[194970]: Machine qemu-7-instance-00000015 terminated.

● systemd-modules-load.service - Load Kernel Modules
     Loaded: loaded (/usr/lib/systemd/system/systemd-modules-load.service; static)
     Active: active (exited) since Thu 2026-01-22 13:53:23 UTC; 1h 52min ago
       Docs: man:systemd-modules-load.service(8)
             man:modules-load.d(5)
   Main PID: 209770 (code=exited, status=0/SUCCESS)
        CPU: 8ms

Jan 22 13:53:23 compute-2 systemd[1]: Starting Load Kernel Modules...
Jan 22 13:53:23 compute-2 systemd[1]: Finished Load Kernel Modules.

● systemd-network-generator.service - Generate network units from Kernel command line
     Loaded: loaded (/usr/lib/systemd/system/systemd-network-generator.service; enabled; preset: enabled)
     Active: active (exited) since Thu 2026-01-22 12:49:14 UTC; 2h 56min ago
       Docs: man:systemd-network-generator.service(8)
   Main PID: 676 (code=exited, status=0/SUCCESS)
        CPU: 7ms

Jan 22 12:49:14 localhost systemd[1]: Finished Generate network units from Kernel command line.

○ systemd-pcrmachine.service - TPM2 PCR Machine ID Measurement
     Loaded: loaded (/usr/lib/systemd/system/systemd-pcrmachine.service; static)
     Active: inactive (dead)
  Condition: start condition failed at Thu 2026-01-22 12:49:14 UTC; 2h 56min ago
       Docs: man:systemd-pcrmachine.service(8)

○ systemd-pcrphase-initrd.service - TPM2 PCR Barrier (initrd)
     Loaded: loaded (/usr/lib/systemd/system/systemd-pcrphase-initrd.service; static)
     Active: inactive (dead)
       Docs: man:systemd-pcrphase-initrd.service(8)

○ systemd-pcrphase-sysinit.service - TPM2 PCR Barrier (Initialization)
     Loaded: loaded (/usr/lib/systemd/system/systemd-pcrphase-sysinit.service; static)
     Active: inactive (dead)
  Condition: start condition failed at Thu 2026-01-22 12:49:15 UTC; 2h 56min ago
       Docs: man:systemd-pcrphase-sysinit.service(8)

Jan 22 12:49:15 localhost systemd[1]: TPM2 PCR Barrier (Initialization) was skipped because of an unmet condition check (ConditionPathExists=/sys/firmware/efi/efivars/StubPcrKernelImage-4a67b082-0a4c-41cf-b6c7-440b29bb8c4f).

○ systemd-pcrphase.service - TPM2 PCR Barrier (User)
     Loaded: loaded (/usr/lib/systemd/system/systemd-pcrphase.service; static)
     Active: inactive (dead)
  Condition: start condition failed at Thu 2026-01-22 12:49:16 UTC; 2h 56min ago
       Docs: man:systemd-pcrphase.service(8)

Jan 22 12:49:16 np0005592159.novalocal systemd[1]: TPM2 PCR Barrier (User) was skipped because of an unmet condition check (ConditionPathExists=/sys/firmware/efi/efivars/StubPcrKernelImage-4a67b082-0a4c-41cf-b6c7-440b29bb8c4f).

○ systemd-pstore.service - Platform Persistent Storage Archival
     Loaded: loaded (/usr/lib/systemd/system/systemd-pstore.service; enabled; preset: enabled)
     Active: inactive (dead)
  Condition: start condition failed at Thu 2026-01-22 12:49:14 UTC; 2h 56min ago
       Docs: man:systemd-pstore(8)

Jan 22 12:49:14 localhost systemd[1]: Platform Persistent Storage Archival was skipped because of an unmet condition check (ConditionDirectoryNotEmpty=/sys/fs/pstore).

● systemd-random-seed.service - Load/Save OS Random Seed
     Loaded: loaded (/usr/lib/systemd/system/systemd-random-seed.service; static)
     Active: active (exited) since Thu 2026-01-22 12:49:14 UTC; 2h 56min ago
       Docs: man:systemd-random-seed.service(8)
             man:random(4)
   Main PID: 687 (code=exited, status=0/SUCCESS)
        CPU: 14ms

Jan 22 12:49:14 localhost systemd[1]: Starting Load/Save OS Random Seed...
Jan 22 12:49:14 localhost systemd[1]: Finished Load/Save OS Random Seed.

● systemd-remount-fs.service - Remount Root and Kernel File Systems
     Loaded: loaded (/usr/lib/systemd/system/systemd-remount-fs.service; enabled-runtime; preset: disabled)
     Active: active (exited) since Thu 2026-01-22 12:49:14 UTC; 2h 56min ago
       Docs: man:systemd-remount-fs.service(8)
             https://www.freedesktop.org/wiki/Software/systemd/APIFileSystems
   Main PID: 677 (code=exited, status=0/SUCCESS)
        CPU: 15ms

Jan 22 12:49:14 localhost systemd[1]: Finished Remount Root and Kernel File Systems.

○ systemd-repart.service - Repartition Root Disk
     Loaded: loaded (/usr/lib/systemd/system/systemd-repart.service; static)
     Active: inactive (dead)
  Condition: start condition failed at Thu 2026-01-22 12:49:14 UTC; 2h 56min ago
       Docs: man:systemd-repart.service(8)

○ systemd-rfkill.service - Load/Save RF Kill Switch Status
     Loaded: loaded (/usr/lib/systemd/system/systemd-rfkill.service; static)
     Active: inactive (dead)
TriggeredBy: ● systemd-rfkill.socket
       Docs: man:systemd-rfkill.service(8)

● systemd-sysctl.service - Apply Kernel Variables
     Loaded: loaded (/usr/lib/systemd/system/systemd-sysctl.service; static)
     Active: active (exited) since Thu 2026-01-22 13:25:58 UTC; 2h 20min ago
       Docs: man:systemd-sysctl.service(8)
             man:sysctl.d(5)
   Main PID: 44968 (code=exited, status=0/SUCCESS)
        CPU: 25ms

Jan 22 13:25:58 compute-2 systemd[1]: Starting Apply Kernel Variables...
Jan 22 13:25:58 compute-2 systemd[1]: Finished Apply Kernel Variables.

○ systemd-sysext.service - Merge System Extension Images into /usr/ and /opt/
     LoadeUnit systemd-timesyncd.service could not be found.
Unit systemd-tmpfiles.service could not be found.
d: loaded (/usr/lib/systemd/system/systemd-sysext.service; disabled; preset: disabled)
     Active: inactive (dead)
       Docs: man:systemd-sysext.service(8)

● systemd-sysusers.service - Create System Users
     Loaded: loaded (/usr/lib/systemd/system/systemd-sysusers.service; static)
     Active: active (exited) since Thu 2026-01-22 12:49:14 UTC; 2h 56min ago
       Docs: man:sysusers.d(5)
             man:systemd-sysusers.service(8)
   Main PID: 688 (code=exited, status=0/SUCCESS)
        CPU: 30ms

Jan 22 12:49:14 localhost systemd[1]: Starting Create System Users...
Jan 22 12:49:14 localhost systemd[1]: Finished Create System Users.

○ systemd-tmpfiles-clean.service - Cleanup of Temporary Directories
     Loaded: loaded (/usr/lib/systemd/system/systemd-tmpfiles-clean.service; static)
     Active: inactive (dead) since Thu 2026-01-22 13:04:26 UTC; 2h 41min ago
TriggeredBy: ● systemd-tmpfiles-clean.timer
       Docs: man:tmpfiles.d(5)
             man:systemd-tmpfiles(8)
   Main PID: 29958 (code=exited, status=0/SUCCESS)
        CPU: 67ms

Jan 22 13:04:25 compute-2 systemd[1]: Starting Cleanup of Temporary Directories...
Jan 22 13:04:26 compute-2 systemd[1]: systemd-tmpfiles-clean.service: Deactivated successfully.
Jan 22 13:04:26 compute-2 systemd[1]: Finished Cleanup of Temporary Directories.

● systemd-tmpfiles-setup-dev.service - Create Static Device Nodes in /dev
     Loaded: loaded (/usr/lib/systemd/system/systemd-tmpfiles-setup-dev.service; static)
     Active: active (exited) since Thu 2026-01-22 12:49:14 UTC; 2h 56min ago
       Docs: man:tmpfiles.d(5)
             man:systemd-tmpfiles(8)
   Main PID: 690 (code=exited, status=0/SUCCESS)
        CPU: 42ms

Jan 22 12:49:14 localhost systemd[1]: Starting Create Static Device Nodes in /dev...
Jan 22 12:49:14 localhost systemd[1]: Finished Create Static Device Nodes in /dev.

● systemd-tmpfiles-setup.service - Create Volatile Files and Directories
     Loaded: loaded (/usr/lib/systemd/system/systemd-tmpfiles-setup.service; static)
     Active: active (exited) since Thu 2026-01-22 12:49:14 UTC; 2h 56min ago
       Docs: man:tmpfiles.d(5)
             man:systemd-tmpfiles(8)
   Main PID: 694 (code=exited, status=0/SUCCESS)
        CPU: 112ms

Jan 22 12:49:14 localhost systemd[1]: Starting Create Volatile Files and Directories...
Jan 22 12:49:14 localhost systemd[1]: Finished Create Volatile Files and Directories.

● systemd-udev-settle.service - Wait for udev To Complete Device Initialization
     Loaded: loaded (/usr/lib/systemd/system/systemd-udev-settle.service; static)
     Active: active (exited) since Thu 2026-01-22 13:53:17 UTC; 1h 52min ago
       Docs: man:systemd-udev-settle.service(8)
   Main PID: 208791 (code=exited, status=0/SUCCESS)
        CPU: 19ms

Jan 22 13:53:17 compute-2 systemd[1]: Starting Wait for udev To Complete Device Initialization...
Jan 22 13:53:17 compute-2 udevadm[208791]: systemd-udev-settle.service is deprecated. Please fix multipathd.service not to pull it in.
Jan 22 13:53:17 compute-2 systemd[1]: Finished Wait for udev To Complete Device Initialization.

● systemd-udev-trigger.service - Coldplug All udev Devices
     Loaded: loaded (/usr/lib/systemd/system/systemd-udev-trigger.service; static)
    Drop-In: /usr/lib/systemd/system/systemd-udev-trigger.service.d
             └─systemd-udev-trigger-no-reload.conf
     Active: active (exited) since Thu 2026-01-22 12:49:14 UTC; 2h 56min ago
       Docs: man:udev(7)
             man:systemd-udevd.service(8)
   Main PID: 681 (code=exited, status=0/SUCCESS)
        CPU: 90ms

Jan 22 12:49:14 localhost systemd[1]: Finished Coldplug All udev Devices.

● systemd-udevd.service - Rule-based Manager for Device Events and Files
     Loaded: loaded (/usr/lib/systemd/system/systemd-udevd.service; static)
     Active: active (running) since Thu 2026-01-22 12:49:15 UTC; 2h 56min ago
TriggeredBy: ● systemd-udevd-kernel.socket
             ● systemd-udevd-control.socket
       Docs: man:systemd-udevd.service(8)
             man:udev(7)
   Main PID: 727 (systemd-udevd)
     Status: "Processing with 32 children at max"
         IO: 46.4M read, 29.3M written
      Tasks: 1
     Memory: 31.2M (peak: 90.4M)
        CPU: 5.332s
     CGroup: /system.slice/systemd-udevd.service
             └─udev
               └─727 /usr/lib/systemd/systemd-udevd

Jan 22 13:53:27 compute-2 lvm[210086]: PV /dev/loop3 online, VG ceph_vg0 is complete.
Jan 22 13:53:27 compute-2 lvm[210086]: VG ceph_vg0 finished
Jan 22 14:10:53 compute-2 systemd-udevd[237607]: Network interface NamePolicy= disabled on kernel command line.
Jan 22 14:10:54 compute-2 systemd-udevd[237605]: Network interface NamePolicy= disabled on kernel command line.
Jan 22 14:10:57 compute-2 systemd-udevd[237801]: Network interface NamePolicy= disabled on kernel command line.
Jan 22 14:28:42 compute-2 systemd-udevd[248612]: Network interface NamePolicy= disabled on kernel command line.
Jan 22 14:28:45 compute-2 systemd-udevd[248680]: Network interface NamePolicy= disabled on kernel command line.
Jan 22 14:34:15 compute-2 systemd-udevd[252494]: Network interface NamePolicy= disabled on kernel command line.
Jan 22 15:45:38 compute-2 lvm[292291]: PV /dev/loop3 online, VG ceph_vg0 is complete.
Jan 22 15:45:38 compute-2 lvm[292291]: VG ceph_vg0 finished

● systemd-update-done.service - Update is Completed
     Loaded: loaded (/usr/lib/systemd/system/systemd-update-done.service; static)
     Active: active (exited) since Thu 2026-01-22 12:49:15 UTC; 2h 56min ago
       Docs: man:systemd-update-done.service(8)
   Main PID: 728 (code=exited, status=0/SUCCESS)
        CPU: 13ms

Jan 22 12:49:15 localhost systemd[1]: Starting Update is Completed...
Jan 22 12:49:15 localhost systemd[1]: Finished Update is Completed.

○ systemd-update-utmp-runlevel.service - Record Runlevel Change in UTMP
     Loaded: loaded (/usr/lib/systemd/system/systemd-update-utmp-runlevel.service; static)
     Active: inactive (dead) since Thu 2026-01-22 12:49:18 UTC; 2h 56min ago
       Docs: man:systemd-update-utmp-runlevel.service(8)
             man:utmp(5)
   Main PID: 1014 (code=exited, status=0/SUCCESS)
        CPU: 8ms

Jan 22 12:49:18 np0005592159.novalocal systemd[1]: Starting Record Runlevel Change in UTMP...
Jan 22 12:49:18 np0005592159.novalocal systemd[1]: systemd-update-utmp-runlevel.service: Deactivated successfully.
Jan 22 12:49:18 np0005592159.novalocal systemd[1]: Finished Record Runlevel Change in UTMP.

● systemd-update-utmp.service - Record System Boot/Shutdown in UTMP
     Loaded: loaded (/usr/lib/systemd/system/systemd-update-utmp.service; static)
     Active: active (exited) since Thu 2026-01-22 12:49:14 UTC; 2h 56min ago
       Docs: man:systemd-update-utmp.service(8)
             man:utmp(5)
   Main PID: 726 (code=exited, status=0/SUCCESS)
        CPU: 14ms

Jan 22 12:49:14 localhost systemd[1]: Starting Record System Boot/Shutdown in UTMP...
Jan 22 12:49:14 localhost systemd[1]: Finished Record System Boot/Shutdown in UTMP.

● systemd-user-sessions.service - Permit User Sessions
     Loaded: loaded (/usr/lib/systemd/system/systemd-user-sessions.service; static)
     Active: active (exited) since Thu 2026-01-22 12:49:18 UTC; 2h 56min ago
       Docs: man:systemd-user-sessions.service(8)
   Main PID: 1004 (code=exited, status=0/SUCCESS)
        CPU: 13ms

Jan 22 12:49:18 np0005592159.novalocal systemd[1]: Starting Permit User Sessions...
Jan 22 12:49:18 np0005592159.novalocal systemd[1]: Finished Permit User Sessions.

○ systemd-vconsole-setup.service - Setup Virtual Console
     Loaded: loaded (/usr/lib/systemd/system/systemd-vconsole-setup.service; static)
     Active: inactive (dead) since Thu 2026-01-22 12:49:13 UTC; 2h 56min ago
   Duration: 2.174s
       Docs: man:systemd-vconsole-setup.service(8)
             man:vconsole.conf(5)
   Main PID: 313 (code=exited, status=0/SUCCESS)
        CPU: 254ms

Jan 22 12:49:11 localhost systemd[1]: Finished Setup Virtual Console.
Jan 22 12:49:13 localhost systemd[1]: systemd-vconsole-setup.service: Deactivated successfully.
Jan 22 12:49:13 localhost systemd[1]: Stopped Setup Virtual Console.
Notice: journal Unit tlp.service could not be found.
has been rotated since unit was started, output may be incomplete.

● tuned.service - Dynamic System Tuning Daemon
     Loaded: loaded (/usr/lib/systemd/system/tuned.service; enabled; preset: enabled)
     Active: active (running) since Thu 2026-01-22 13:39:41 UTC; 2h 6min ago
       Docs: man:tuned(8)
             man:tuned.conf(5)
             man:tuned-adm(8)
   Main PID: 92450 (tuned)
         IO: 192.0K read, 0B written
      Tasks: 4 (limit: 48560)
     Memory: 14.1M (peak: 16.8M)
        CPU: 2.597s
     CGroup: /system.slice/tuned.service
             └─92450 /usr/bin/python3 -Es /usr/sbin/tuned -l -P

Jan 22 13:39:41 compute-2 systemd[1]: Starting Dynamic System Tuning Daemon...
Jan 22 13:39:41 compute-2 systemd[1]: Started Dynamic System Tuning Daemon.

○ unbound-anchor.service - update of the root trust anchor for DNSSEC validation in unbound
     Loaded: loaded (/usr/lib/systemd/system/unbound-anchor.service; static)
     Active: inactive (dead)
TriggeredBy: ● unbound-anchor.timer
       Docs: man:unbound-anchor(8)

● user-runtime-dir@1000.service - User Runtime Directory /run/user/1000
     Loaded: loaded (/usr/lib/systemd/system/user-runtime-dir@.service; static)
     Active: active (exited) since Thu 2026-01-22 12:50:30 UTC; 2h 55min ago
       Docs: man:user@.service(5)
   Main PID: 4304 (code=exited, status=0/SUCCESS)
        CPU: 28ms

Jan 22 12:50:30 np0005592159.novalocal systemd[1]: Starting User Runtime Directory /run/user/1000...
Jan 22 12:50:30 np0005592159.novalocal systemd[1]: Finished User Runtime Directory /run/user/1000.

● user-runtime-dir@42477.service - User Runtime Directory /run/user/42477
     Loaded: loaded (/usr/lib/systemd/system/user-runtime-dir@.service; static)
     Active: active (exited) since Thu 2026-01-22 13:33:33 UTC; 2h 12min ago
       Docs: man:user@.service(5)
   Main PID: 72609 (code=exited, status=0/SUCCESS)
        CPU: 17ms

Jan 22 13:33:33 compute-2 systemd[1]: Starting User Runtime Directory /run/user/42477...
Jan 22 13:33:33 compute-2 systemd[1]: Finished User Runtime Directory /run/user/42477.

● user@1000.service - User Manager for UID 1000
     Loaded: loaded (/usr/lib/systemd/system/user@.service; static)
    Drop-In: /usr/lib/systemd/system/user@.service.d
             └─10-login-barrier.conf
     Active: active (running) since Thu 2026-01-22 12:50:30 UTC; 2h 55min ago
       Docs: man:user@.service(5)
   Main PID: 4305 (systemd)
     Status: "Ready."
         IO: 676.0K read, 8.0K written
      Tasks: 5
     Memory: 9.0M (peak: 15.1M)
        CPU: 1.065s
     CGroup: /user.slice/user-1000.slice/user@1000.service
             ├─app.slice
             │ └─dbus-broker.service
             │   ├─11936 /usr/bin/dbus-broker-launch --scope user
             │   └─11948 dbus-broker --log 4 --controller 9 --machine-id 85ac68c10a6e7ae08ceb898dbdca0cb5 --max-bytes 100000000000000 --max-fds 25000000000000 --max-matches 5000000000
             ├─init.scope
             │ ├─4305 /usr/lib/systemd/systemd --user
             │ └─4307 "(sd-pam)"
             └─user.slice
               └─podman-pause-3b1c51bd.scope
                 └─11838 catatonit -P

Jan 22 13:01:26 np0005592159.novalocal dbus-broker-launch[11936]: Policy to allow eavesdropping in /usr/share/dbus-1/session.conf +31: Eavesdropping is deprecated and ignored
Jan 22 13:01:26 np0005592159.novalocal dbus-broker-launch[11936]: Policy to allow eavesdropping in /usr/share/dbus-1/session.conf +33: Eavesdropping is deprecated and ignored
Jan 22 13:01:26 np0005592159.novalocal systemd[4305]: Started D-Bus User Message Bus.
Jan 22 13:01:26 np0005592159.novalocal dbus-broker-lau[11936]: Ready
Jan 22 13:01:26 np0005592159.novalocal systemd[4305]: selinux: avc:  op=load_policy lsm=selinux seqno=4 res=1
Jan 22 13:01:26 np0005592159.novalocal systemd[4305]: Created slice Slice /user.
Jan 22 13:01:26 np0005592159.novalocal systemd[4305]: podman-11817.scope: unit configures an IP firewall, but not running as root.
Jan 22 13:01:26 np0005592159.novalocal systemd[4305]: (This warning is only shown for the first unit using IP firewalling.)
Jan 22 13:01:26 np0005592159.novalocal systemd[4305]: Started podman-11817.scope.
Jan 22 13:01:26 np0005592159.novalocal systemd[4305]: Started podman-pause-3b1c51bd.scope.

● user@42477.service - User Manager for UID 42477
     Loaded: loaded (/usr/lib/systemd/system/user@.service; static)
    Drop-In: /usr/lib/systemd/system/user@.service.d
             └─10-login-barrier.conf
     Active: active (running) since Thu 2026-01-22 13:33:33 UTC; 2h 12min ago
       Docs: man:user@.service(5)
   Main PID: 72610 (systemd)
     Status: "Ready."
         IO: 0B read, 0B written
      Tasks: 2
     Memory: 7.1M (peak: 11.0M)
        CPU: 606ms
     CGroup: /user.slice/user-42477.slice/user@42477.service
             └─init.scope
               ├─72610 /usr/lib/systemd/systemd --user
               └─72612 "(sd-pam)"

Jan 22 13:33:33 compute-2 systemd[72610]: Reached target Sockets.
Jan 22 13:33:33 compute-2 systemd[72610]: Reached target Basic System.
Jan 22 13:33:33 compute-2 systemd[72610]: Reached target Main User Target.
Jan 22 13:33:33 compute-2 systemd[72610]: Startup finished in 111ms.
Jan 22 13:33:33 compute-2 systemd[1]: Started User Manager for UID 42477.
Jan 22 13:35:42 compute-2 systemd[72610]: Starting Mark boot as successful...
Jan 22 13:35:42 compute-2 systemd[72610]: Finished Mark boot as successful.
Jan 22 13:38:38 compute-2 systemd[72610]: Created slice User Background Tasks Slice.
Jan 22 13:38:38 compute-2 systemd[72610]: Starting Cleanup of User's Temporary Files and Directories...
Jan 22 13:38:38 compute-2 systemd[72610]: Finished Cleanup of User's Temporary Files and Directories.

○ virtinterfaced.service - libvirt interface daemon
     Loaded: loaded (/usr/lib/systemd/system/virtinterfaced.service; disabled; preset: disabled)
     Active: inactive (dead)
TriggeredBy: ○ virtinterfaced.socket
             ○ virtinterfaced-admin.socket
             ○ virtinterfaced-ro.socket
       Docs: man:virtinterfaced(8)
             https://libvirt.org/

○ virtlockd.service - libvirt locking daemon
     Loaded: loaded (/usr/lib/systemd/system/virtlockd.service; disabled; preset: disabled)
     Active: inactive (dead)
TriggeredBy: ○ virtlockd-admin.socket
             ● virtlockd.socket
       Docs: man:virtlockd(8)
             https://libvirt.org/

● virtlogd.service - libvirt logging daemon
     Loaded: loaded (/usr/lib/systemd/system/virtlogd.service; enabled; preset: disabled)
     Active: active (running) since Thu 2026-01-22 13:51:21 UTC; 1h 54min ago
TriggeredBy: ● virtlogd-admin.socket
             ● virtlogd.socket
       Docs: man:virtlogd(8)
             https://libvirt.org/
   Main PID: 194338 (virtlogd)
         IO: 644.0K read, 408.0K written
      Tasks: 1 (limit: 48560)
     Memory: 3.4M (peak: 3.7M)
        CPU: 6.744s
     CGroup: /system.slice/virtlogd.service
             └─194338 /usr/sbin/virtlogd

Jan 22 13:51:21 compute-2 systemd[1]: Starting libvirt logging daemon...
Jan 22 13:51:21 compute-2 systemd[1]: Started libvirt logging daemon.

○ virtnetworkd.service - libvirt network daemon
     Loaded: loaded (/usr/lib/systemd/system/virtnetworkd.service; disabled; preset: disabled)
     Active: inactive (dead)
TriggeredBy: ○ virtnetworkd-ro.socket
             ○ virtnetworkd.socket
             ○ virtnetworkd-admin.socket
       Docs: man:virtnetworkd(8)
             https://libvirt.org/

● virtnodedevd.service - libvirt nodedev daemon
     Loaded: loaded (/usr/lib/systemd/system/virtnodedevd.service; enabled; preset: disabled)
     Active: active (running) since Thu 2026-01-22 13:55:19 UTC; 1h 50min ago
TriggeredBy: ● virtnodedevd-ro.socket
             ● virtnodedevd-admin.socket
             ● virtnodedevd.socket
       Docs: man:virtnodedevd(8)
             https://libvirt.org/
   Main PID: 226244 (virtnodedevd)
         IO: 3.8M read, 0B written
      Tasks: 20 (limit: 48560)
     Memory: 9.4M (peak: 10.9M)
        CPU: 2.974s
     CGroup: /system.slice/virtnodedevd.service
             └─226244 /usr/sbin/virtnodedevd --timeout 120

Jan 22 13:55:19 compute-2 systemd[1]: Starting libvirt nodedev daemon...
Jan 22 13:55:19 compute-2 systemd[1]: Started libvirt nodedev daemon.

○ virtnwfilterd.service - libvirt nwfilter daemon
     Loaded: loaded (/usr/lib/systemd/system/virtnwfilterd.service; disabled; preset: disabled)
     Active: inactive (dead)
TriggeredBy: ○ virtnwfilterd-ro.socket
             ○ virtnwfilterd-admin.socket
             ○ virtnwfilterd.socket
       Docs: man:virtnwfilterd(8)
             https://libvirt.org/

○ virtproxyd.service - libvirt proxy daemon
     Loaded: loaded (/usr/lib/systemd/system/virtproxyd.service; enabled; preset: disabled)
     Active: inactive (dead) since Thu 2026-01-22 14:30:43 UTC; 1h 15min ago
   Duration: 2min 1.583s
TriggeredBy: ● virtproxyd.socket
             ● virtproxyd-admin.socket
             ● virtproxyd-ro.socket
             ● virtproxyd-tls.socket
       Docs: man:virtproxyd(8)
             https://libvirt.org/
    Process: 248578 ExecStart=/usr/sbin/virtproxyd $VIRTPROXYD_ARGS (code=exited, status=0/SUCCESS)
   Main PID: 248578 (code=exited, status=0/SUCCESS)
        CPU: 60ms

Jan 22 14:28:41 compute-2 systemd[1]: Starting libvirt proxy daemon...
Jan 22 14:28:41 compute-2 systemd[1]: Started libvirt proxy daemon.
Jan 22 14:30:43 compute-2 systemd[1]: virtproxyd.service: Deactivated successfully.

● virtqemud.service - libvirt QEMU daemon
     Loaded: loaded (/usr/lib/systemd/system/virtqemud.service; enabled; preset: enabled)
     Active: active (running) since Thu 2026-01-22 13:55:17 UTC; 1h 50min ago
TriggeredBy: ● virtqemud-admin.socket
             ● virtqemud.socket
             ● virtqemud-ro.socket
       Docs: man:virtqemud(8)
             https://libvirt.org/
   Main PID: 225907 (virtqemud)
         IO: 1.6M read, 354.0K written
      Tasks: 20 (limit: 32768)
     Memory: 21.1M (peak: 46.3M)
        CPU: 5.188s
     CGroup: /system.slice/virtqemud.service
             └─225907 /usr/sbin/virtqemud --timeout 120

Jan 22 13:55:20 compute-2 virtqemud[225907]: libvirt version: 11.10.0, package: 2.el9 (builder@centos.org, 2025-12-18-15:09:54, )
Jan 22 13:55:20 compute-2 virtqemud[225907]: hostname: compute-2
Jan 22 13:55:20 compute-2 virtqemud[225907]: End of file while reading data: Input/output error
Jan 22 14:28:48 compute-2 virtqemud[225907]: Domain id=4 name='instance-00000012' uuid=5e2e07b8-ca9c-4abc-81b0-66964eb87fa4 is tainted: custom-monitor
Jan 22 14:29:03 compute-2 virtqemud[225907]: Unable to get XATTR trusted.libvirt.security.ref_selinux on volumes/volume-6e173a8e-fd98-4de4-a470-2c50f67a6d48: No such file or directory
Jan 22 14:29:03 compute-2 virtqemud[225907]: Unable to get XATTR trusted.libvirt.security.ref_dac on volumes/volume-6e173a8e-fd98-4de4-a470-2c50f67a6d48: No such file or directory
Jan 22 14:34:20 compute-2 virtqemud[225907]: argument unsupported: QEMU guest agent is not configured
Jan 22 15:45:37 compute-2 virtqemud[225907]: Failed to connect socket to '/var/run/libvirt/virtnetworkd-sock-ro': No such file or directory
Jan 22 15:45:37 compute-2 virtqemud[225907]: Failed to connect socket to '/var/run/libvirt/virtnwfilterd-sock-ro': No such file or directory
Jan 22 15:45:37 compute-2 virtqemud[225907]: Failed to connect socket to '/var/run/libvirt/virtstoraged-sock-ro': No such file or directory

● virtsecretd.service - libvirt secret daemon
     Loaded: loaded (/usr/lib/systemd/system/virtsecretd.service; enabled; preset: disabled)
     Active: active (running) since Thu 2026-01-22 14:28:39 UTC; 1h 17min ago
TriggeredBy: ● virtsecretd.socket
             ● virtsecretd-ro.socket
             ● virtsecretd-admin.socket
       Docs: man:virtsecretd(8)
             https://libvirt.org/
   Main PID: 248555 (virtsecretd)
         IO: 16.0K read, 0B written
      Tasks: 18 (limit: 48560)
     Memory: 3.7M (peak: 4.9M)
        CPU: 324ms
     CGroup: /system.slice/virtsecretd.service
             └─248555 /usr/sbin/virtsecretd --timeout 120

Jan 22 1Unit ypbind.service could not be found.
Unit yppasswdd.service could not be found.
Unit ypserv.service could not be found.
Unit ypxfrd.service could not be found.
4:28:39 compute-2 systemd[1]: Starting libvirt secret daemon...
Jan 22 14:28:39 compute-2 systemd[1]: Started libvirt secret daemon.

○ virtstoraged.service - libvirt storage daemon
     Loaded: loaded (/usr/lib/systemd/system/virtstoraged.service; disabled; preset: disabled)
     Active: inactive (dead)
TriggeredBy: ○ virtstoraged.socket
             ○ virtstoraged-admin.socket
             ○ virtstoraged-ro.socket
       Docs: man:virtstoraged(8)
             https://libvirt.org/

● -.slice - Root Slice
     Loaded: loaded
     Active: active since Thu 2026-01-22 12:49:11 UTC; 2h 57min ago
      Until: Thu 2026-01-22 12:49:11 UTC; 2h 57min ago
       Docs: man:systemd.special(7)
      Tasks: 1331
     Memory: 3.7G
        CPU: 44min 570ms
     CGroup: /
             ├─297599 turbostat --debug sleep 10
             ├─297604 sleep 10
             ├─init.scope
             │ └─1 /usr/lib/systemd/systemd --switched-root --system --deserialize 31
             ├─machine.slice
             │ ├─libpod-572ffe12c89ef3d651b3d5a5d0d084d01048037ddf29c596a9682c34d685f649.scope
             │ │ └─container
             │ │   ├─226435 dumb-init --single-child -- kolla_start
             │ │   ├─226437 /usr/bin/python3 /usr/bin/nova-compute
             │ │   ├─230058 rbd import --pool vms /var/lib/nova/instances/_base/389efd6047b99779d5161939afa4f2bdb261bfd0 e0e74330-96df-479f-8baf-53fbd2ccba91_disk --image-format=2 --id openstack --conf /etc/ceph/ceph.conf
             │ │   ├─237484 /usr/bin/python3 /bin/privsep-helper --config-file /etc/nova/nova.conf --config-file /etc/nova/nova-compute.conf --config-dir /etc/nova/nova.conf.d --privsep_context vif_plug_ovs.privsep.vif_plug --privsep_sock_path /tmp/tmpwbd1s1u6/privsep.sock
             │ │   ├─238396 rbd import --pool vms /var/lib/nova/instances/_base/389efd6047b99779d5161939afa4f2bdb261bfd0 f591d61b-712e-49aa-85bd-8d222b607eb3_disk --image-format=2 --id openstack --conf /etc/ceph/ceph.conf
             │ │   ├─238793 rbd import --pool vms /var/lib/nova/instances/_base/389efd6047b99779d5161939afa4f2bdb261bfd0 87e798e6-6f00-4fe1-8412-75ddc9e2878e_disk --image-format=2 --id openstack --conf /etc/ceph/ceph.conf
             │ │   ├─244616 rbd import --pool vms /var/lib/nova/instances/_base/389efd6047b99779d5161939afa4f2bdb261bfd0 8331b067-1b3f-4a1d-a596-e966f6de776a_disk --image-format=2 --id openstack --conf /etc/ceph/ceph.conf
             │ │   ├─245897 rbd import --pool vms /var/lib/nova/instances/a0b3924b-4422-47c5-ba40-748e41b14d00/disk.config a0b3924b-4422-47c5-ba40-748e41b14d00_disk.config --image-format=2 --id openstack --conf /etc/ceph/ceph.conf
             │ │   ├─248518 /usr/bin/python3 /bin/privsep-helper --config-file /etc/nova/nova.conf --config-file /etc/nova/nova-compute.conf --config-dir /etc/nova/nova.conf.d --privsep_context os_brick.privileged.default --privsep_sock_path /tmp/tmpk2q2e022/privsep.sock
             │ │   └─250095 rbd import --pool vms /var/lib/nova/instances/_base/389efd6047b99779d5161939afa4f2bdb261bfd0 001ba9a6-ba0c-438d-8150-5cfbcec3d34f_disk --image-format=2 --id openstack --conf /etc/ceph/ceph.conf
             │ ├─libpod-65cda04b9c9e71d648ab5510147314c4de15a37ca8d4a48196c50c9ad6ccb44d.scope
             │ │ └─container
             │ │   ├─143494 dumb-init --single-child -- kolla_start
             │ │   ├─143497 "neutron-ovn-metadata-agent (/usr/bin/python3 /usr/bin/neutron-ovn-metadata-agent)"
             │ │   ├─143757 "neutron-ovn-metadata-agent (/usr/bin/python3 /usr/bin/neutron-ovn-metadata-agent)"
             │ │   ├─143856 /usr/bin/python3 /bin/privsep-helper --config-file /etc/neutron/neutron.conf --config-dir /etc/neutron.conf.d --privsep_context neutron.privileged.namespace_cmd --privsep_sock_path /tmp/tmp405dvk24/privsep.sock
             │ │   ├─237689 /usr/bin/python3 /bin/privsep-helper --config-file /etc/neutron/neutron.conf --config-dir /etc/neutron.conf.d --privsep_context neutron.privileged.default --privsep_sock_path /tmp/tmp_pg3kwj0/privsep.sock
             │ │   └─237788 /usr/bin/python3 /bin/privsep-helper --config-file /etc/neutron/neutron.conf --config-dir /etc/neutron.conf.d --privsep_context neutron.privileged.link_cmd --privsep_sock_path /tmp/tmp3y50ov6x/privsep.sock
             │ ├─libpod-8eec14eed05eebd169934b14ad23738a7c696fad3b7be75ce9a652966539c356.scope
             │ │ └─container
             │ │   ├─133158 dumb-init --single-child -- kolla_start
             │ │   └─133161 /usr/bin/ovn-controller --pidfile unix:/run/openvswitch/db.sock -p /etc/pki/tls/private/ovndb.key -c /etc/pki/tls/certs/ovndb.crt -C /etc/pki/tls/certs/ovndbca.crt
             │ └─machine-qemu\x2d6\x2dinstance\x2d00000016.scope
             │   └─libvirt
             │     └─252492 /usr/libexec/qemu-kvm -name guest=instance-00000016,debug-threads=on -S -object "{\"qom-type\":\"secret\",\"id\":\"masterKey0\",\"format\":\"raw\",\"file\":\"/var/lib/libvirt/qemu/domain-6-instance-00000016/master-key.aes\"}" -machine pc-q35-rhel9.8.0,usb=off,dump-guest-core=off,memory-backend=pc.ram,hpet=off,acpi=on -accel kvm -cpu Nehalem -m size=131072k -object "{\"qom-type\":\"memory-backend-ram\",\"id\":\"pc.ram\",\"size\":134217728}" -overcommit mem-lock=off -smp 1,sockets=1,dies=1,clusters=1,cores=1,threads=1 -uuid 839e8e64-64a9-4e35-85dd-cdbb7f8e71c5 -smbios "type=1,manufacturer=RDO,product=OpenStack Compute,version=27.5.2-0.20250829104910.6f8decf.el9,serial=839e8e64-64a9-4e35-85dd-cdbb7f8e71c5,uuid=839e8e64-64a9-4e35-85dd-cdbb7f8e71c5,family=Virtual Machine" -no-user-config -nodefaults -chardev socket,id=charmonitor,fd=26,server=on,wait=off -mon chardev=charmonitor,id=monitor,mode=control -rtc base=utc,driftfix=slew -global kvm-pit.lost_tick_policy=delay -no-shutdown -boot strict=on -device "{\"driver\":\"pcie-root-port\",\"port\":16,\"chassis\":1,\"id\":\"pci.1\",\"bus\":\"pcie.0\",\"multifunction\":true,\"addr\":\"0x2\"}" -device "{\"driver\":\"pcie-root-port\",\"port\":17,\"chassis\":2,\"id\":\"pci.2\",\"bus\":\"pcie.0\",\"addr\":\"0x2.0x1\"}" -device "{\"driver\":\"pcie-root-port\",\"port\":18,\"chassis\":3,\"id\":\"pci.3\",\"bus\":\"pcie.0\",\"addr\":\"0x2.0x2\"}" -device "{\"driver\":\"pcie-root-port\",\"port\":19,\"chassis\":4,\"id\":\"pci.4\",\"bus\":\"pcie.0\",\"addr\":\"0x2.0x3\"}" -device "{\"driver\":\"pcie-root-port\",\"port\":20,\"chassis\":5,\"id\":\"pci.5\",\"bus\":\"pcie.0\",\"addr\":\"0x2.0x4\"}" -device "{\"driver\":\"pcie-root-port\",\"port\":21,\"chassis\":6,\"id\":\"pci.6\",\"bus\":\"pcie.0\",\"addr\":\"0x2.0x5\"}" -device "{\"driver\":\"pcie-root-port\",\"port\":22,\"chassis\":7,\"id\":\"pci.7\",\"bus\":\"pcie.0\",\"addr\":\"0x2.0x6\"}" -device "{\"driver\":\"pcie-root-port\",\"port\":23,\"chassis\":8,\"id\":\"pci.8\",\"bus\":\"pcie.0\",\"addr\":\"0x2.0x7\"}" -device "{\"driver\":\"pcie-root-port\",\"port\":24,\"chassis\":9,\"id\":\"pci.9\",\"bus\":\"pcie.0\",\"multifunction\":true,\"addr\":\"0x3\"}" -device "{\"driver\":\"pcie-root-port\",\"port\":25,\"chassis\":10,\"id\":\"pci.10\",\"bus\":\"pcie.0\",\"addr\":\"0x3.0x1\"}" -device "{\"driver\":\"pcie-root-port\",\"port\":26,\"chassis\":11,\"id\":\"pci.11\",\"bus\":\"pcie.0\",\"addr\":\"0x3.0x2\"}" -device "{\"driver\":\"pcie-root-port\",\"port\":27,\"chassis\":12,\"id\":\"pci.12\",\"bus\":\"pcie.0\",\"addr\":\"0x3.0x3\"}" -device "{\"driver\":\"pcie-root-port\",\"port\":28,\"chassis\":13,\"id\":\"pci.13\",\"bus\":\"pcie.0\",\"addr\":\"0x3.0x4\"}" -device "{\"driver\":\"pcie-root-port\",\"port\":29,\"chassis\":14,\"id\":\"pci.14\",\"bus\":\"pcie.0\",\"addr\":\"0x3.0x5\"}" -device "{\"driver\":\"pcie-root-port\",\"port\":30,\"chassis\":15,\"id\":\"pci.15\",\"bus\":\"pcie.0\",\"addr\":\"0x3.0x6\"}" -device "{\"driver\":\"pcie-root-port\",\"port\":31,\"chassis\":16,\"id\":\"pci.16\",\"bus\":\"pcie.0\",\"addr\":\"0x3.0x7\"}" -device "{\"driver\":\"pcie-root-port\",\"port\":32,\"chassis\":17,\"id\":\"pci.17\",\"bus\":\"pcie.0\",\"multifunction\":true,\"addr\":\"0x4\"}" -device "{\"driver\":\"pcie-root-port\",\"port\":33,\"chassis\":18,\"id\":\"pci.18\",\"bus\":\"pcie.0\",\"addr\":\"0x4.0x1\"}" -device "{\"driver\":\"pcie-root-port\",\"port\":34,\"chassis\":19,\"id\":\"pci.19\",\"bus\":\"pcie.0\",\"addr\":\"0x4.0x2\"}" -device "{\"driver\":\"pcie-root-port\",\"port\":35,\"chassis\":20,\"id\":\"pci.20\",\"bus\":\"pcie.0\",\"addr\":\"0x4.0x3\"}" -device "{\"driver\":\"pcie-root-port\",\"port\":36,\"chassis\":21,\"id\":\"pci.21\",\"bus\":\"pcie.0\",\"addr\":\"0x4.0x4\"}" -device "{\"driver\":\"pcie-root-port\",\"port\":37,\"chassis\":22,\"id\":\"pci.22\",\"bus\":\"pcie.0\",\"addr\":\"0x4.0x5\"}" -device "{\"driver\":\"pcie-root-port\",\"port\":38,\"chassis\":23,\"id\":\"pci.23\",\"bus\":\"pcie.0\",\"addr\":\"0x4.0x6\"}" -device "{\"driver\":\"pcie-root-port\",\"port\":39,\"chassis\":24,\"id\":\"pci.24\",\"bus\":\"pcie.0\",\"addr\":\"0x4.0x7\"}" -device "{\"driver\":\"pcie-root-port\",\"port\":40,\"chassis\":25,\"id\":\"pci.25\",\"bus\":\"pcie.0\",\"addr\":\"0x5\"}" -device "{\"driver\":\"pcie-pci-bridge\",\"id\":\"pci.26\",\"bus\":\"pci.1\",\"addr\":\"0x0\"}" -device "{\"driver\":\"piix3-usb-uhci\",\"id\":\"usb\",\"bus\":\"pci.26\",\"addr\":\"0x1\"}" -device "{\"driver\":\"virtio-scsi-pci\",\"id\":\"scsi0\",\"bus\":\"pci.3\",\"addr\":\"0x0\"}" -object "{\"qom-type\":\"secret\",\"id\":\"libvirt-2-storage-auth-secret0\",\"data\":\"ckHvapQG84cP5zoSqf7m1gCY4qTASyRQDTUqj+xCcWI=\",\"keyid\":\"masterKey0\",\"iv\":\"qG5IcOgwSBm0Z7uTjw3pxQ==\",\"format\":\"base64\"}" -blockdev "{\"driver\":\"rbd\",\"pool\":\"vms\",\"image\":\"839e8e64-64a9-4e35-85dd-cdbb7f8e71c5_disk\",\"server\":[{\"host\":\"192.168.122.100\",\"port\":\"6789\"},{\"host\":\"192.168.122.102\",\"port\":\"6789\"},{\"host\":\"192.168.122.101\",\"port\":\"6789\"}],\"user\":\"openstack\",\"auth-client-required\":[\"cephx\",\"none\"],\"key-secret\":\"libvirt-2-storage-auth-secret0\",\"node-name\":\"libvirt-2-storage\",\"read-only\":false,\"cache\":{\"direct\":true,\"no-flush\":false}}" -device "{\"driver\":\"scsi-hd\",\"bus\":\"scsi0.0\",\"channel\":0,\"scsi-id\":0,\"lun\":0,\"device_id\":\"drive-scsi0-0-0-0\",\"drive\":\"libvirt-2-storage\",\"id\":\"scsi0-0-0-0\",\"bootindex\":1,\"write-cache\":\"on\"}" -object "{\"qom-type\":\"secret\",\"id\":\"libvirt-1-storage-auth-secret0\",\"data\":\"us/rqTgpkEOPr1e80IYYxNi8jF+jipoTDjYt15m4hho=\",\"keyid\":\"masterKey0\",\"iv\":\"aOqQCrTLnlRcP7tTW3+8PQ==\",\"format\":\"base64\"}" -blockdev "{\"driver\":\"rbd\",\"pool\":\"vms\",\"image\":\"839e8e64-64a9-4e35-85dd-cdbb7f8e71c5_disk.config\",\"server\":[{\"host\":\"192.168.122.100\",\"port\":\"6789\"},{\"host\":\"192.168.122.102\",\"port\":\"6789\"},{\"host\":\"192.168.122.101\",\"port\":\"6789\"}],\"user\":\"openstack\",\"auth-client-required\":[\"cephx\",\"none\"],\"key-secret\":\"libvirt-1-storage-auth-secret0\",\"node-name\":\"libvirt-1-storage\",\"read-only\":true,\"cache\":{\"direct\":true,\"no-flush\":false}}" -device "{\"driver\":\"scsi-cd\",\"bus\":\"scsi0.0\",\"channel\":0,\"scsi-id\":0,\"lun\":1,\"device_id\":\"drive-scsi0-0-0-1\",\"drive\":\"libvirt-1-storage\",\"id\":\"scsi0-0-0-1\",\"write-cache\":\"on\"}" -netdev "{\"type\":\"tap\",\"fd\":\"28\",\"vhost\":true,\"vhostfd\":\"30\",\"id\":\"hostnet0\"}" -device "{\"driver\":\"virtio-net-pci\",\"rx_queue_size\":512,\"host_mtu\":1442,\"netdev\":\"hostnet0\",\"id\":\"net0\",\"mac\":\"fa:16:3e:35:f2:b5\",\"bus\":\"pci.2\",\"addr\":\"0x0\"}" -add-fd set=0,fd=27,opaque=serial0-log -chardev pty,id=charserial0,logfile=/dev/fdset/0,logappend=on -device "{\"driver\":\"isa-serial\",\"chardev\":\"charserial0\",\"id\":\"serial0\",\"index\":0}" -device "{\"driver\":\"usb-tablet\",\"id\":\"input0\",\"bus\":\"usb.0\",\"port\":\"1\"}" -audiodev "{\"id\":\"audio1\",\"driver\":\"none\"}" -object "{\"qom-type\":\"tls-creds-x509\",\"id\":\"vnc-tls-creds0\",\"dir\":\"/etc/pki/qemu\",\"endpoint\":\"server\",\"verify-peer\":true}" -vnc "[::0]:0,tls-creds=vnc-tls-creds0,audiodev=audio1" -device "{\"driver\":\"virtio-vga\",\"id\":\"video0\",\"max_outputs\":1,\"bus\":\"pcie.0\",\"addr\":\"0x1\"}" -global ICH9-LPC.noreboot=off -watchdog-action reset -device "{\"driver\":\"virtio-balloon-pci\",\"id\":\"balloon0\",\"bus\":\"pci.4\",\"addr\":\"0x0\"}" -object "{\"qom-type\":\"rng-random\",\"id\":\"objrng0\",\"filename\":\"/dev/urandom\"}" -device "{\"driver\":\"virtio-rng-pci\",\"rng\":\"objrng0\",\"id\":\"rng0\",\"bus\":\"pci.5\",\"addr\":\"0x0\"}" -device "{\"driver\":\"vmcoreinfo\"}" -sandbox on,obsolete=deny,elevateprivileges=deny,spawn=deny,resourcecontrol=deny -msg timestamp=on
             ├─system.slice
             │ ├─NetworkManager.service
             │ │ └─49000 /usr/sbin/NetworkManager --no-daemon
             │ ├─auditd.service
             │ │ ├─699 /sbin/auditd
             │ │ └─701 /usr/sbin/sedispatch
             │ ├─chronyd.service
             │ │ └─58561 /usr/sbin/chronyd -F 2
             │ ├─crond.service
             │ │ └─1006 /usr/sbin/crond -n
             │ ├─dbus-broker.service
             │ │ ├─760 /usr/bin/dbus-broker-launch --scope system --audit
             │ │ └─772 dbus-broker --log 4 --controller 9 --machine-id 85ac68c10a6e7ae08ceb898dbdca0cb5 --max-bytes 536870912 --max-fds 4096 --max-matches 131072 --audit
             │ ├─edpm_nova_compute.service
             │ │ └─226433 /usr/bin/conmon --api-version 1 -c 572ffe12c89ef3d651b3d5a5d0d084d01048037ddf29c596a9682c34d685f649 -u 572ffe12c89ef3d651b3d5a5d0d084d01048037ddf29c596a9682c34d685f649 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/572ffe12c89ef3d651b3d5a5d0d084d01048037ddf29c596a9682c34d685f649/userdata -p /run/containers/storage/overlay-containers/572ffe12c89ef3d651b3d5a5d0d084d01048037ddf29c596a9682c34d685f649/userdata/pidfile -n nova_compute --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/572ffe12c89ef3d651b3d5a5d0d084d01048037ddf29c596a9682c34d685f649 --full-attach -s -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/572ffe12c89ef3d651b3d5a5d0d084d01048037ddf29c596a9682c34d685f649/userdata/oci-log --conmon-pidfile /run/nova_compute.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg 572ffe12c89ef3d651b3d5a5d0d084d01048037ddf29c596a9682c34d685f649
             │ ├─edpm_ovn_controller.service
             │ │ └─133156 /usr/bin/conmon --api-version 1 -c 8eec14eed05eebd169934b14ad23738a7c696fad3b7be75ce9a652966539c356 -u 8eec14eed05eebd169934b14ad23738a7c696fad3b7be75ce9a652966539c356 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/8eec14eed05eebd169934b14ad23738a7c696fad3b7be75ce9a652966539c356/userdata -p /run/containers/storage/overlay-containers/8eec14eed05eebd169934b14ad23738a7c696fad3b7be75ce9a652966539c356/userdata/pidfile -n ovn_controller --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/8eec14eed05eebd169934b14ad23738a7c696fad3b7be75ce9a652966539c356 --full-attach -s -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/8eec14eed05eebd169934b14ad23738a7c696fad3b7be75ce9a652966539c356/userdata/oci-log --conmon-pidfile /run/ovn_controller.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg 8eec14eed05eebd169934b14ad23738a7c696fad3b7be75ce9a652966539c356
             │ ├─edpm_ovn_metadata_agent.service
             │ │ └─143492 /usr/bin/conmon --api-version 1 -c 65cda04b9c9e71d648ab5510147314c4de15a37ca8d4a48196c50c9ad6ccb44d -u 65cda04b9c9e71d648ab5510147314c4de15a37ca8d4a48196c50c9ad6ccb44d -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/65cda04b9c9e71d648ab5510147314c4de15a37ca8d4a48196c50c9ad6ccb44d/userdata -p /run/containers/storage/overlay-containers/65cda04b9c9e71d648ab5510147314c4de15a37ca8d4a48196c50c9ad6ccb44d/userdata/pidfile -n ovn_metadata_agent --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/65cda04b9c9e71d648ab5510147314c4de15a37ca8d4a48196c50c9ad6ccb44d --full-attach -s -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/65cda04b9c9e71d648ab5510147314c4de15a37ca8d4a48196c50c9ad6ccb44d/userdata/oci-log --conmon-pidfile /run/ovn_metadata_agent.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg 65cda04b9c9e71d648ab5510147314c4de15a37ca8d4a48196c50c9ad6ccb44d
             │ ├─gssproxy.service
             │ │ └─868 /usr/sbin/gssproxy -D
             │ ├─irqbalance.service
             │ │ └─785 /usr/sbin/irqbalance
             │ ├─iscsid.service
             │ │ └─211449 /usr/sbin/iscsid -f
             │ ├─multipathd.service
             │ │ └─211608 /sbin/multipathd -d -s
             │ ├─ovs-vswitchd.service
             │ │ └─47295 ovs-vswitchd unix:/var/run/openvswitch/db.sock -vconsole:emer -vsyslog:err -vfile:info --mlockall --user openvswitch:hugetlbfs --no-chdir --log-file=/var/log/openvswitch/ovs-vswitchd.log --pidfile=/var/run/openvswitch/ovs-vswitchd.pid --detach
             │ ├─ovsdb-server.service
             │ │ └─47215 ovsdb-server /etc/openvswitch/conf.db -vconsole:emer -vsyslog:err -vfile:info --remote=punix:/var/run/openvswitch/db.sock --private-key=db:Open_vSwitch,SSL,private_key --certificate=db:Open_vSwitch,SSL,certificate --bootstrap-ca-cert=db:Open_vSwitch,SSL,ca_cert --user openvswitch:hugetlbfs --no-chdir --log-file=/var/log/openvswitch/ovsdb-server.log --pidfile=/var/run/openvswitch/ovsdb-server.pid --detach
             │ ├─polkit.service
             │ │ └─43481 /usr/lib/polkit-1/polkitd --no-debug
             │ ├─rpcbind.service
             │ │ └─697 /usr/bin/rpcbind -w -f
             │ ├─rsyslog.service
             │ │ └─1002 /usr/sbin/rsyslogd -n
             │ ├─sshd.service
             │ │ └─169467 "sshd: /usr/sbin/sshd -D [listener] 0 of 10-100 startups"
             │ ├─system-ceph\x2d088fe176\x2d0106\x2d5401\x2d803c\x2d2da38b73b76a.slice
             │ │ ├─ceph-088fe176-0106-5401-803c-2da38b73b76a@crash.compute-2.service
             │ │ │ ├─libpod-payload-52f09a99f1b294dc32194bfc1ab7f2d1320bd9205c0632fb77a4b4dfb25dbf93
             │ │ │ │ ├─77794 /run/podman-init -- /usr/bin/ceph-crash -n client.crash.compute-2
             │ │ │ │ └─77796 /usr/bin/python3 -s /usr/bin/ceph-crash -n client.crash.compute-2
             │ │ │ └─runtime
             │ │ │   └─77792 /usr/bin/conmon --api-version 1 -c 52f09a99f1b294dc32194bfc1ab7f2d1320bd9205c0632fb77a4b4dfb25dbf93 -u 52f09a99f1b294dc32194bfc1ab7f2d1320bd9205c0632fb77a4b4dfb25dbf93 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/52f09a99f1b294dc32194bfc1ab7f2d1320bd9205c0632fb77a4b4dfb25dbf93/userdata -p /run/containers/storage/overlay-containers/52f09a99f1b294dc32194bfc1ab7f2d1320bd9205c0632fb77a4b4dfb25dbf93/userdata/pidfile -n ceph-088fe176-0106-5401-803c-2da38b73b76a-crash-compute-2 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/52f09a99f1b294dc32194bfc1ab7f2d1320bd9205c0632fb77a4b4dfb25dbf93 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/52f09a99f1b294dc32194bfc1ab7f2d1320bd9205c0632fb77a4b4dfb25dbf93/userdata/oci-log --conmon-pidfile /run/ceph-088fe176-0106-5401-803c-2da38b73b76a@crash.compute-2.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 52f09a99f1b294dc32194bfc1ab7f2d1320bd9205c0632fb77a4b4dfb25dbf93
             │ │ ├─ceph-088fe176-0106-5401-803c-2da38b73b76a@haproxy.rgw.default.compute-2.zogxki.service
             │ │ │ ├─libpod-payload-ff608106d7c871852a462621c4b38466f0a089e42add90baa06df30604a36e5f
             │ │ │ │ ├─82540 /run/podman-init -- docker-entrypoint.sh haproxy -f /var/lib/haproxy/haproxy.cfg
             │ │ │ │ ├─82542 haproxy -W -db -f /var/lib/haproxy/haproxy.cfg
             │ │ │ │ └─82544 haproxy -W -db -f /var/lib/haproxy/haproxy.cfg
             │ │ │ └─runtime
             │ │ │   └─82538 /usr/bin/conmon --api-version 1 -c ff608106d7c871852a462621c4b38466f0a089e42add90baa06df30604a36e5f -u ff608106d7c871852a462621c4b38466f0a089e42add90baa06df30604a36e5f -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/ff608106d7c871852a462621c4b38466f0a089e42add90baa06df30604a36e5f/userdata -p /run/containers/storage/overlay-containers/ff608106d7c871852a462621c4b38466f0a089e42add90baa06df30604a36e5f/userdata/pidfile -n ceph-088fe176-0106-5401-803c-2da38b73b76a-haproxy-rgw-default-compute-2-zogxki --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/ff608106d7c871852a462621c4b38466f0a089e42add90baa06df30604a36e5f --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/ff608106d7c871852a462621c4b38466f0a089e42add90baa06df30604a36e5f/userdata/oci-log --conmon-pidfile /run/ceph-088fe176-0106-5401-803c-2da38b73b76a@haproxy.rgw.default.compute-2.zogxki.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg ff608106d7c871852a462621c4b38466f0a089e42add90baa06df30604a36e5f
             │ │ ├─ceph-088fe176-0106-5401-803c-2da38b73b76a@keepalived.rgw.default.compute-2.xbsrtt.service
             │ │ │ ├─libpod-payload-6667054e4ceb45c7be5e11486852d0790d9219015e6bca7cdf08e071806b9af4
             │ │ │ │ ├─82994 /run/podman-init -- ./init.sh
             │ │ │ │ ├─82996 /usr/sbin/keepalived -n -l -f /etc/keepalived/keepalived.conf
             │ │ │ │ └─82998 /usr/sbin/keepalived -n -l -f /etc/keepalived/keepalived.conf
             │ │ │ └─runtime
             │ │ │   └─82992 /usr/bin/conmon --api-version 1 -c 6667054e4ceb45c7be5e11486852d0790d9219015e6bca7cdf08e071806b9af4 -u 6667054e4ceb45c7be5e11486852d0790d9219015e6bca7cdf08e071806b9af4 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/6667054e4ceb45c7be5e11486852d0790d9219015e6bca7cdf08e071806b9af4/userdata -p /run/containers/storage/overlay-containers/6667054e4ceb45c7be5e11486852d0790d9219015e6bca7cdf08e071806b9af4/userdata/pidfile -n ceph-088fe176-0106-5401-803c-2da38b73b76a-keepalived-rgw-default-compute-2-xbsrtt --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/6667054e4ceb45c7be5e11486852d0790d9219015e6bca7cdf08e071806b9af4 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/6667054e4ceb45c7be5e11486852d0790d9219015e6bca7cdf08e071806b9af4/userdata/oci-log --conmon-pidfile /run/ceph-088fe176-0106-5401-803c-2da38b73b76a@keepalived.rgw.default.compute-2.xbsrtt.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 6667054e4ceb45c7be5e11486852d0790d9219015e6bca7cdf08e071806b9af4
             │ │ ├─ceph-088fe176-0106-5401-803c-2da38b73b76a@mds.cephfs.compute-2.zycvef.service
             │ │ │ ├─libpod-payload-28402c8a6e0adf22561a923d42802647af00df10eacceb300a94fe8b5f18bf63
             │ │ │ │ ├─81152 /run/podman-init -- /usr/bin/ceph-mds -n mds.cephfs.compute-2.zycvef -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ │ │ └─81154 /usr/bin/ceph-mds -n mds.cephfs.compute-2.zycvef -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ │ └─runtime
             │ │ │   └─81150 /usr/bin/conmon --api-version 1 -c 28402c8a6e0adf22561a923d42802647af00df10eacceb300a94fe8b5f18bf63 -u 28402c8a6e0adf22561a923d42802647af00df10eacceb300a94fe8b5f18bf63 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/28402c8a6e0adf22561a923d42802647af00df10eacceb300a94fe8b5f18bf63/userdata -p /run/containers/storage/overlay-containers/28402c8a6e0adf22561a923d42802647af00df10eacceb300a94fe8b5f18bf63/userdata/pidfile -n ceph-088fe176-0106-5401-803c-2da38b73b76a-mds-cephfs-compute-2-zycvef --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/28402c8a6e0adf22561a923d42802647af00df10eacceb300a94fe8b5f18bf63 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/28402c8a6e0adf22561a923d42802647af00df10eacceb300a94fe8b5f18bf63/userdata/oci-log --conmon-pidfile /run/ceph-088fe176-0106-5401-803c-2da38b73b76a@mds.cephfs.compute-2.zycvef.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 28402c8a6e0adf22561a923d42802647af00df10eacceb300a94fe8b5f18bf63
             │ │ ├─ceph-088fe176-0106-5401-803c-2da38b73b76a@mgr.compute-2.tjdsdx.service
             │ │ │ ├─libpod-payload-3f48eeed4688717dc1b70b826cbb76219abc8f1d02edfa4f514b989747c1506f
             │ │ │ │ ├─77436 /run/podman-init -- /usr/bin/ceph-mgr -n mgr.compute-2.tjdsdx -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ │ │ └─77438 /usr/bin/ceph-mgr -n mgr.compute-2.tjdsdx -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ │ └─runtime
             │ │ │   └─77434 /usr/bin/conmon --api-version 1 -c 3f48eeed4688717dc1b70b826cbb76219abc8f1d02edfa4f514b989747c1506f -u 3f48eeed4688717dc1b70b826cbb76219abc8f1d02edfa4f514b989747c1506f -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/3f48eeed4688717dc1b70b826cbb76219abc8f1d02edfa4f514b989747c1506f/userdata -p /run/containers/storage/overlay-containers/3f48eeed4688717dc1b70b826cbb76219abc8f1d02edfa4f514b989747c1506f/userdata/pidfile -n ceph-088fe176-0106-5401-803c-2da38b73b76a-mgr-compute-2-tjdsdx --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/3f48eeed4688717dc1b70b826cbb76219abc8f1d02edfa4f514b989747c1506f --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/3f48eeed4688717dc1b70b826cbb76219abc8f1d02edfa4f514b989747c1506f/userdata/oci-log --conmon-pidfile /run/ceph-088fe176-0106-5401-803c-2da38b73b76a@mgr.compute-2.tjdsdx.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 3f48eeed4688717dc1b70b826cbb76219abc8f1d02edfa4f514b989747c1506f
             │ │ ├─ceph-088fe176-0106-5401-803c-2da38b73b76a@mon.compute-2.service
             │ │ │ ├─libpod-payload-ad3fee4799b44f9e04b5aa9968630e9af6ffd410d7fc49c4207495984cf6bca6
             │ │ │ │ ├─77079 /run/podman-init -- /usr/bin/ceph-mon -n mon.compute-2 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false
             │ │ │ │ └─77081 /usr/bin/ceph-mon -n mon.compute-2 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false
             │ │ │ └─runtime
             │ │ │   └─77077 /usr/bin/conmon --api-version 1 -c ad3fee4799b44f9e04b5aa9968630e9af6ffd410d7fc49c4207495984cf6bca6 -u ad3fee4799b44f9e04b5aa9968630e9af6ffd410d7fc49c4207495984cf6bca6 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/ad3fee4799b44f9e04b5aa9968630e9af6ffd410d7fc49c4207495984cf6bca6/userdata -p /run/containers/storage/overlay-containers/ad3fee4799b44f9e04b5aa9968630e9af6ffd410d7fc49c4207495984cf6bca6/userdata/pidfile -n ceph-088fe176-0106-5401-803c-2da38b73b76a-mon-compute-2 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/ad3fee4799b44f9e04b5aa9968630e9af6ffd410d7fc49c4207495984cf6bca6 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/ad3fee4799b44f9e04b5aa9968630e9af6ffd410d7fc49c4207495984cf6bca6/userdata/oci-log --conmon-pidfile /run/ceph-088fe176-0106-5401-803c-2da38b73b76a@mon.compute-2.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg ad3fee4799b44f9e04b5aa9968630e9af6ffd410d7fc49c4207495984cf6bca6
             │ │ ├─ceph-088fe176-0106-5401-803c-2da38b73b76a@osd.2.service
             │ │ │ ├─libpod-payload-1f90ecb4fcc015bd1f2f979a5a563080acb2d28030758941d6958f2336c7101d
             │ │ │ │ ├─79777 /run/podman-init -- /usr/bin/ceph-osd -n osd.2 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ │ │ └─79779 /usr/bin/ceph-osd -n osd.2 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ │ └─runtime
             │ │ │   └─79775 /usr/bin/conmon --api-version 1 -c 1f90ecb4fcc015bd1f2f979a5a563080acb2d28030758941d6958f2336c7101d -u 1f90ecb4fcc015bd1f2f979a5a563080acb2d28030758941d6958f2336c7101d -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/1f90ecb4fcc015bd1f2f979a5a563080acb2d28030758941d6958f2336c7101d/userdata -p /run/containers/storage/overlay-containers/1f90ecb4fcc015bd1f2f979a5a563080acb2d28030758941d6958f2336c7101d/userdata/pidfile -n ceph-088fe176-0106-5401-803c-2da38b73b76a-osd-2 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/1f90ecb4fcc015bd1f2f979a5a563080acb2d28030758941d6958f2336c7101d --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/1f90ecb4fcc015bd1f2f979a5a563080acb2d28030758941d6958f2336c7101d/userdata/oci-log --conmon-pidfile /run/ceph-088fe176-0106-5401-803c-2da38b73b76a@osd.2.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 1f90ecb4fcc015bd1f2f979a5a563080acb2d28030758941d6958f2336c7101d
             │ │ └─ceph-088fe176-0106-5401-803c-2da38b73b76a@rgw.rgw.compute-2.gfsxzw.service
             │ │   ├─libpod-payload-49e687254f675aca5071ee91f471edf46c03564ea189efa6346b4d0c66cd7dc0
             │ │   │ ├─80767 /run/podman-init -- /usr/bin/radosgw -n client.rgw.rgw.compute-2.gfsxzw -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │   │ └─80769 /usr/bin/radosgw -n client.rgw.rgw.compute-2.gfsxzw -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │   └─runtime
             │ │     └─80765 /usr/bin/conmon --api-version 1 -c 49e687254f675aca5071ee91f471edf46c03564ea189efa6346b4d0c66cd7dc0 -u 49e687254f675aca5071ee91f471edf46c03564ea189efa6346b4d0c66cd7dc0 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/49e687254f675aca5071ee91f471edf46c03564ea189efa6346b4d0c66cd7dc0/userdata -p /run/containers/storage/overlay-containers/49e687254f675aca5071ee91f471edf46c03564ea189efa6346b4d0c66cd7dc0/userdata/pidfile -n ceph-088fe176-0106-5401-803c-2da38b73b76a-rgw-rgw-compute-2-gfsxzw --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/49e687254f675aca5071ee91f471edf46c03564ea189efa6346b4d0c66cd7dc0 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/49e687254f675aca5071ee91f471edf46c03564ea189efa6346b4d0c66cd7dc0/userdata/oci-log --conmon-pidfile /run/ceph-088fe176-0106-5401-803c-2da38b73b76a@rgw.rgw.compute-2.gfsxzw.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 49e687254f675aca5071ee91f471edf46c03564ea189efa6346b4d0c66cd7dc0
             │ ├─system-getty.slice
             │ │ └─getty@tty1.service
             │ │   └─1007 /sbin/agetty -o "-p -- \\u" --noclear - linux
             │ ├─system-serial\x2dgetty.slice
             │ │ └─serial-getty@ttyS0.service
             │ │   └─1009 /sbin/agetty -o "-p -- \\u" --keep-baud 115200,57600,38400,9600 - vt220
             │ ├─systemd-hostnamed.service
             │ │ └─294012 /usr/lib/systemd/systemd-hostnamed
             │ ├─systemd-journald.service
             │ │ └─675 /usr/lib/systemd/systemd-journald
             │ ├─systemd-logind.service
             │ │ └─787 /usr/lib/systemd/systemd-logind
             │ ├─systemd-machined.service
             │ │ └─194970 /usr/lib/systemd/systemd-machined
             │ ├─systemd-udevd.service
             │ │ └─udev
             │ │   └─727 /usr/lib/systemd/systemd-udevd
             │ ├─tuned.service
             │ │ └─92450 /usr/bin/python3 -Es /usr/sbin/tuned -l -P
             │ ├─virtlogd.service
             │ │ └─194338 /usr/sbin/virtlogd
             │ ├─virtnodedevd.service
             │ │ └─226244 /usr/sbin/virtnodedevd --timeout 120
             │ ├─virtqemud.service
             │ │ └─225907 /usr/sbin/virtqemud --timeout 120
             │ └─virtsecretd.service
             │   └─248555 /usr/sbin/virtsecretd --timeout 120
             └─user.slice
               ├─user-1000.slice
               │ ├─session-1.scope
               │ │ └─4517 /usr/bin/python3
               │ ├─session-51.scope
               │ │ ├─291623 "sshd-session: zuul [priv]"
               │ │ ├─291627 "sshd-session: zuul@notty"
               │ │ ├─291628 sudo bash -c "rm -rf /var/tmp/sos-osp && mkdir /var/tmp/sos-osp && sos report --batch --all-logs --tmp-dir=/var/tmp/sos-osp  -p container,openstack_edpm,system,storage,virt"
               │ │ ├─291652 /usr/bin/python3 -s /sbin/sos report --batch --all-logs --tmp-dir=/var/tmp/sos-osp -p container,openstack_edpm,system,storage,virt
               │ │ ├─297596 timeout 15s turbostat --debug sleep 10
               │ │ ├─298091 timeout 300s systemctl status --all
               │ │ ├─298093 systemctl status --all
               │ │ ├─298386 timeout --foreground 300s virsh -r capabilities
               │ │ ├─298387 virsh -r capabilities
               │ │ ├─298388 timeout 300s ceph pg dump --format json-pretty
               │ │ └─298389 /usr/bin/python3 -s /usr/bin/ceph pg dump --format json-pretty
               │ └─user@1000.service
               │   ├─app.slice
               │   │ └─dbus-broker.service
               │   │   ├─11936 /usr/bin/dbus-broker-launch --scope user
               │   │   └─11948 dbus-broker --log 4 --controller 9 --machine-id 85ac68c10a6e7ae08ceb898dbdca0cb5 --max-bytes 100000000000000 --max-fds 25000000000000 --max-matches 5000000000
               │   ├─init.scope
               │   │ ├─4305 /usr/lib/systemd/systemd --user
               │   │ └─4307 "(sd-pam)"
               │   └─user.slice
               │     └─podman-pause-3b1c51bd.scope
               │       └─11838 catatonit -P
               └─user-42477.slice
                 ├─session-20.scope
                 │ ├─72606 "sshd-session: ceph-admin [priv]"
                 │ └─72628 "sshd-session: ceph-admin"
                 ├─session-22.scope
                 │ ├─72623 "sshd-session: ceph-admin [priv]"
                 │ └─72629 "sshd-session: ceph-admin@notty"
                 ├─session-23.scope
                 │ ├─72680 "sshd-session: ceph-admin [priv]"
                 │ └─72683 "sshd-session: ceph-admin@notty"
                 ├─session-24.scope
                 │ ├─72734 "sshd-session: ceph-admin [priv]"
                 │ └─72737 "sshd-session: ceph-admin@notty"
                 ├─session-25.scope
                 │ ├─72788 "sshd-session: ceph-admin [priv]"
                 │ └─72791 "sshd-session: ceph-admin@notty"
                 ├─session-26.scope
                 │ ├─72842 "sshd-session: ceph-admin [priv]"
                 │ └─72845 "sshd-session: ceph-admin@notty"
                 ├─session-27.scope
                 │ ├─72896 "sshd-session: ceph-admin [priv]"
                 │ └─72899 "sshd-session: ceph-admin@notty"
                 ├─session-28.scope
                 │ ├─72950 "sshd-session: ceph-admin [priv]"
                 │ └─72953 "sshd-session: ceph-admin@notty"
                 ├─session-29.scope
                 │ ├─73004 "sshd-session: ceph-admin [priv]"
                 │ └─73007 "sshd-session: ceph-admin@notty"
                 ├─session-30.scope
                 │ ├─73058 "sshd-session: ceph-admin [priv]"
                 │ └─73061 "sshd-session: ceph-admin@notty"
                 ├─session-31.scope
                 │ ├─73085 "sshd-session: ceph-admin [priv]"
                 │ └─73088 "sshd-session: ceph-admin@notty"
                 ├─session-32.scope
                 │ ├─73139 "sshd-session: ceph-admin [priv]"
                 │ └─73142 "sshd-session: ceph-admin@notty"
                 └─user@42477.service
                   └─init.scope
                     ├─72610 /usr/lib/systemd/systemd --user
                     └─72612 "(sd-pam)"

Jan 22 15:39:23 compute-2 systemd[1]: libpod-conmon-7388ce5ee3d99173f70197fceb574b7daa841b8d9bb8a2d748a9c53909dc30fd.scope: Deactivated successfully.
Jan 22 15:39:23 compute-2 systemd[1]: Started libpod-conmon-4fc8cc84139abc258a816c26eaa0a142ddd799f3b381fccee791026af3a708db.scope.
Jan 22 15:39:23 compute-2 systemd[1]: Started libcrun container.
Jan 22 15:39:25 compute-2 systemd[1]: libpod-4fc8cc84139abc258a816c26eaa0a142ddd799f3b381fccee791026af3a708db.scope: Deactivated successfully.
Jan 22 15:39:25 compute-2 systemd[1]: libpod-4fc8cc84139abc258a816c26eaa0a142ddd799f3b381fccee791026af3a708db.scope: Consumed 1.361s CPU time.
Jan 22 15:39:25 compute-2 systemd[1]: var-lib-containers-storage-overlay-c8b1142ccbf335480b995577fe7d87f8df451a3753a1aab61efbc6016c18fc4a-merged.mount: Deactivated successfully.
Jan 22 15:39:25 compute-2 systemd[1]: libpod-conmon-4fc8cc84139abc258a816c26eaa0a142ddd799f3b381fccee791026af3a708db.scope: Deactivated successfully.
Jan 22 15:45:29 compute-2 systemd[1]: Started Session 51 of User zuul.
Jan 22 15:45:49 compute-2 systemd[1]: Starting Hostname Service...
Jan 22 15:45:49 compute-2 systemd[1]: Started Hostname Service.

● machine.slice - Virtual Machine and Container Slice
     Loaded: loaded (/usr/lib/systemd/system/machine.slice; static)
     Active: active since Thu 2026-01-22 13:35:17 UTC; 2h 10min ago
      Until: Thu 2026-01-22 13:35:17 UTC; 2h 10min ago
       Docs: man:systemd.special(7)
         IO: 183.7M read, 94.5M written
      Tasks: 202
     Memory: 1.5G (peak: 1.6G)
        CPU: 7min 39.327s
     CGroup: /machine.slice
             ├─libpod-572ffe12c89ef3d651b3d5a5d0d084d01048037ddf29c596a9682c34d685f649.scope
             │ └─container
             │   ├─226435 dumb-init --single-child -- kolla_start
             │   ├─226437 /usr/bin/python3 /usr/bin/nova-compute
             │   ├─230058 rbd import --pool vms /var/lib/nova/instances/_base/389efd6047b99779d5161939afa4f2bdb261bfd0 e0e74330-96df-479f-8baf-53fbd2ccba91_disk --image-format=2 --id openstack --conf /etc/ceph/ceph.conf
             │   ├─237484 /usr/bin/python3 /bin/privsep-helper --config-file /etc/nova/nova.conf --config-file /etc/nova/nova-compute.conf --config-dir /etc/nova/nova.conf.d --privsep_context vif_plug_ovs.privsep.vif_plug --privsep_sock_path /tmp/tmpwbd1s1u6/privsep.sock
             │   ├─238396 rbd import --pool vms /var/lib/nova/instances/_base/389efd6047b99779d5161939afa4f2bdb261bfd0 f591d61b-712e-49aa-85bd-8d222b607eb3_disk --image-format=2 --id openstack --conf /etc/ceph/ceph.conf
             │   ├─238793 rbd import --pool vms /var/lib/nova/instances/_base/389efd6047b99779d5161939afa4f2bdb261bfd0 87e798e6-6f00-4fe1-8412-75ddc9e2878e_disk --image-format=2 --id openstack --conf /etc/ceph/ceph.conf
             │   ├─244616 rbd import --pool vms /var/lib/nova/instances/_base/389efd6047b99779d5161939afa4f2bdb261bfd0 8331b067-1b3f-4a1d-a596-e966f6de776a_disk --image-format=2 --id openstack --conf /etc/ceph/ceph.conf
             │   ├─245897 rbd import --pool vms /var/lib/nova/instances/a0b3924b-4422-47c5-ba40-748e41b14d00/disk.config a0b3924b-4422-47c5-ba40-748e41b14d00_disk.config --image-format=2 --id openstack --conf /etc/ceph/ceph.conf
             │   ├─248518 /usr/bin/python3 /bin/privsep-helper --config-file /etc/nova/nova.conf --config-file /etc/nova/nova-compute.conf --config-dir /etc/nova/nova.conf.d --privsep_context os_brick.privileged.default --privsep_sock_path /tmp/tmpk2q2e022/privsep.sock
             │   └─250095 rbd import --pool vms /var/lib/nova/instances/_base/389efd6047b99779d5161939afa4f2bdb261bfd0 001ba9a6-ba0c-438d-8150-5cfbcec3d34f_disk --image-format=2 --id openstack --conf /etc/ceph/ceph.conf
             ├─libpod-65cda04b9c9e71d648ab5510147314c4de15a37ca8d4a48196c50c9ad6ccb44d.scope
             │ └─container
             │   ├─143494 dumb-init --single-child -- kolla_start
             │   ├─143497 "neutron-ovn-metadata-agent (/usr/bin/python3 /usr/bin/neutron-ovn-metadata-agent)"
             │   ├─143757 "neutron-ovn-metadata-agent (/usr/bin/python3 /usr/bin/neutron-ovn-metadata-agent)"
             │   ├─143856 /usr/bin/python3 /bin/privsep-helper --config-file /etc/neutron/neutron.conf --config-dir /etc/neutron.conf.d --privsep_context neutron.privileged.namespace_cmd --privsep_sock_path /tmp/tmp405dvk24/privsep.sock
             │   ├─237689 /usr/bin/python3 /bin/privsep-helper --config-file /etc/neutron/neutron.conf --config-dir /etc/neutron.conf.d --privsep_context neutron.privileged.default --privsep_sock_path /tmp/tmp_pg3kwj0/privsep.sock
             │   └─237788 /usr/bin/python3 /bin/privsep-helper --config-file /etc/neutron/neutron.conf --config-dir /etc/neutron.conf.d --privsep_context neutron.privileged.link_cmd --privsep_sock_path /tmp/tmp3y50ov6x/privsep.sock
             ├─libpod-8eec14eed05eebd169934b14ad23738a7c696fad3b7be75ce9a652966539c356.scope
             │ └─container
             │   ├─133158 dumb-init --single-child -- kolla_start
             │   └─133161 /usr/bin/ovn-controller --pidfile unix:/run/openvswitch/db.sock -p /etc/pki/tls/private/ovndb.key -c /etc/pki/tls/certs/ovndb.crt -C /etc/pki/tls/certs/ovndbca.crt
             └─machine-qemu\x2d6\x2dinstance\x2d00000016.scope
               └─libvirt
                 └─252492 /usr/libexec/qemu-kvm -name guest=instance-00000016,debug-threads=on -S -object "{\"qom-type\":\"secret\",\"id\":\"masterKey0\",\"format\":\"raw\",\"file\":\"/var/lib/libvirt/qemu/domain-6-instance-00000016/master-key.aes\"}" -machine pc-q35-rhel9.8.0,usb=off,dump-guest-core=off,memory-backend=pc.ram,hpet=off,acpi=on -accel kvm -cpu Nehalem -m size=131072k -object "{\"qom-type\":\"memory-backend-ram\",\"id\":\"pc.ram\",\"size\":134217728}" -overcommit mem-lock=off -smp 1,sockets=1,dies=1,clusters=1,cores=1,threads=1 -uuid 839e8e64-64a9-4e35-85dd-cdbb7f8e71c5 -smbios "type=1,manufacturer=RDO,product=OpenStack Compute,version=27.5.2-0.20250829104910.6f8decf.el9,serial=839e8e64-64a9-4e35-85dd-cdbb7f8e71c5,uuid=839e8e64-64a9-4e35-85dd-cdbb7f8e71c5,family=Virtual Machine" -no-user-config -nodefaults -chardev socket,id=charmonitor,fd=26,server=on,wait=off -mon chardev=charmonitor,id=monitor,mode=control -rtc base=utc,driftfix=slew -global kvm-pit.lost_tick_policy=delay -no-shutdown -boot strict=on -device "{\"driver\":\"pcie-root-port\",\"port\":16,\"chassis\":1,\"id\":\"pci.1\",\"bus\":\"pcie.0\",\"multifunction\":true,\"addr\":\"0x2\"}" -device "{\"driver\":\"pcie-root-port\",\"port\":17,\"chassis\":2,\"id\":\"pci.2\",\"bus\":\"pcie.0\",\"addr\":\"0x2.0x1\"}" -device "{\"driver\":\"pcie-root-port\",\"port\":18,\"chassis\":3,\"id\":\"pci.3\",\"bus\":\"pcie.0\",\"addr\":\"0x2.0x2\"}" -device "{\"driver\":\"pcie-root-port\",\"port\":19,\"chassis\":4,\"id\":\"pci.4\",\"bus\":\"pcie.0\",\"addr\":\"0x2.0x3\"}" -device "{\"driver\":\"pcie-root-port\",\"port\":20,\"chassis\":5,\"id\":\"pci.5\",\"bus\":\"pcie.0\",\"addr\":\"0x2.0x4\"}" -device "{\"driver\":\"pcie-root-port\",\"port\":21,\"chassis\":6,\"id\":\"pci.6\",\"bus\":\"pcie.0\",\"addr\":\"0x2.0x5\"}" -device "{\"driver\":\"pcie-root-port\",\"port\":22,\"chassis\":7,\"id\":\"pci.7\",\"bus\":\"pcie.0\",\"addr\":\"0x2.0x6\"}" -device "{\"driver\":\"pcie-root-port\",\"port\":23,\"chassis\":8,\"id\":\"pci.8\",\"bus\":\"pcie.0\",\"addr\":\"0x2.0x7\"}" -device "{\"driver\":\"pcie-root-port\",\"port\":24,\"chassis\":9,\"id\":\"pci.9\",\"bus\":\"pcie.0\",\"multifunction\":true,\"addr\":\"0x3\"}" -device "{\"driver\":\"pcie-root-port\",\"port\":25,\"chassis\":10,\"id\":\"pci.10\",\"bus\":\"pcie.0\",\"addr\":\"0x3.0x1\"}" -device "{\"driver\":\"pcie-root-port\",\"port\":26,\"chassis\":11,\"id\":\"pci.11\",\"bus\":\"pcie.0\",\"addr\":\"0x3.0x2\"}" -device "{\"driver\":\"pcie-root-port\",\"port\":27,\"chassis\":12,\"id\":\"pci.12\",\"bus\":\"pcie.0\",\"addr\":\"0x3.0x3\"}" -device "{\"driver\":\"pcie-root-port\",\"port\":28,\"chassis\":13,\"id\":\"pci.13\",\"bus\":\"pcie.0\",\"addr\":\"0x3.0x4\"}" -device "{\"driver\":\"pcie-root-port\",\"port\":29,\"chassis\":14,\"id\":\"pci.14\",\"bus\":\"pcie.0\",\"addr\":\"0x3.0x5\"}" -device "{\"driver\":\"pcie-root-port\",\"port\":30,\"chassis\":15,\"id\":\"pci.15\",\"bus\":\"pcie.0\",\"addr\":\"0x3.0x6\"}" -device "{\"driver\":\"pcie-root-port\",\"port\":31,\"chassis\":16,\"id\":\"pci.16\",\"bus\":\"pcie.0\",\"addr\":\"0x3.0x7\"}" -device "{\"driver\":\"pcie-root-port\",\"port\":32,\"chassis\":17,\"id\":\"pci.17\",\"bus\":\"pcie.0\",\"multifunction\":true,\"addr\":\"0x4\"}" -device "{\"driver\":\"pcie-root-port\",\"port\":33,\"chassis\":18,\"id\":\"pci.18\",\"bus\":\"pcie.0\",\"addr\":\"0x4.0x1\"}" -device "{\"driver\":\"pcie-root-port\",\"port\":34,\"chassis\":19,\"id\":\"pci.19\",\"bus\":\"pcie.0\",\"addr\":\"0x4.0x2\"}" -device "{\"driver\":\"pcie-root-port\",\"port\":35,\"chassis\":20,\"id\":\"pci.20\",\"bus\":\"pcie.0\",\"addr\":\"0x4.0x3\"}" -device "{\"driver\":\"pcie-root-port\",\"port\":36,\"chassis\":21,\"id\":\"pci.21\",\"bus\":\"pcie.0\",\"addr\":\"0x4.0x4\"}" -device "{\"driver\":\"pcie-root-port\",\"port\":37,\"chassis\":22,\"id\":\"pci.22\",\"bus\":\"pcie.0\",\"addr\":\"0x4.0x5\"}" -device "{\"driver\":\"pcie-root-port\",\"port\":38,\"chassis\":23,\"id\":\"pci.23\",\"bus\":\"pcie.0\",\"addr\":\"0x4.0x6\"}" -device "{\"driver\":\"pcie-root-port\",\"port\":39,\"chassis\":24,\"id\":\"pci.24\",\"bus\":\"pcie.0\",\"addr\":\"0x4.0x7\"}" -device "{\"driver\":\"pcie-root-port\",\"port\":40,\"chassis\":25,\"id\":\"pci.25\",\"bus\":\"pcie.0\",\"addr\":\"0x5\"}" -device "{\"driver\":\"pcie-pci-bridge\",\"id\":\"pci.26\",\"bus\":\"pci.1\",\"addr\":\"0x0\"}" -device "{\"driver\":\"piix3-usb-uhci\",\"id\":\"usb\",\"bus\":\"pci.26\",\"addr\":\"0x1\"}" -device "{\"driver\":\"virtio-scsi-pci\",\"id\":\"scsi0\",\"bus\":\"pci.3\",\"addr\":\"0x0\"}" -object "{\"qom-type\":\"secret\",\"id\":\"libvirt-2-storage-auth-secret0\",\"data\":\"ckHvapQG84cP5zoSqf7m1gCY4qTASyRQDTUqj+xCcWI=\",\"keyid\":\"masterKey0\",\"iv\":\"qG5IcOgwSBm0Z7uTjw3pxQ==\",\"format\":\"base64\"}" -blockdev "{\"driver\":\"rbd\",\"pool\":\"vms\",\"image\":\"839e8e64-64a9-4e35-85dd-cdbb7f8e71c5_disk\",\"server\":[{\"host\":\"192.168.122.100\",\"port\":\"6789\"},{\"host\":\"192.168.122.102\",\"port\":\"6789\"},{\"host\":\"192.168.122.101\",\"port\":\"6789\"}],\"user\":\"openstack\",\"auth-client-required\":[\"cephx\",\"none\"],\"key-secret\":\"libvirt-2-storage-auth-secret0\",\"node-name\":\"libvirt-2-storage\",\"read-only\":false,\"cache\":{\"direct\":true,\"no-flush\":false}}" -device "{\"driver\":\"scsi-hd\",\"bus\":\"scsi0.0\",\"channel\":0,\"scsi-id\":0,\"lun\":0,\"device_id\":\"drive-scsi0-0-0-0\",\"drive\":\"libvirt-2-storage\",\"id\":\"scsi0-0-0-0\",\"bootindex\":1,\"write-cache\":\"on\"}" -object "{\"qom-type\":\"secret\",\"id\":\"libvirt-1-storage-auth-secret0\",\"data\":\"us/rqTgpkEOPr1e80IYYxNi8jF+jipoTDjYt15m4hho=\",\"keyid\":\"masterKey0\",\"iv\":\"aOqQCrTLnlRcP7tTW3+8PQ==\",\"format\":\"base64\"}" -blockdev "{\"driver\":\"rbd\",\"pool\":\"vms\",\"image\":\"839e8e64-64a9-4e35-85dd-cdbb7f8e71c5_disk.config\",\"server\":[{\"host\":\"192.168.122.100\",\"port\":\"6789\"},{\"host\":\"192.168.122.102\",\"port\":\"6789\"},{\"host\":\"192.168.122.101\",\"port\":\"6789\"}],\"user\":\"openstack\",\"auth-client-required\":[\"cephx\",\"none\"],\"key-secret\":\"libvirt-1-storage-auth-secret0\",\"node-name\":\"libvirt-1-storage\",\"read-only\":true,\"cache\":{\"direct\":true,\"no-flush\":false}}" -device "{\"driver\":\"scsi-cd\",\"bus\":\"scsi0.0\",\"channel\":0,\"scsi-id\":0,\"lun\":1,\"device_id\":\"drive-scsi0-0-0-1\",\"drive\":\"libvirt-1-storage\",\"id\":\"scsi0-0-0-1\",\"write-cache\":\"on\"}" -netdev "{\"type\":\"tap\",\"fd\":\"28\",\"vhost\":true,\"vhostfd\":\"30\",\"id\":\"hostnet0\"}" -device "{\"driver\":\"virtio-net-pci\",\"rx_queue_size\":512,\"host_mtu\":1442,\"netdev\":\"hostnet0\",\"id\":\"net0\",\"mac\":\"fa:16:3e:35:f2:b5\",\"bus\":\"pci.2\",\"addr\":\"0x0\"}" -add-fd set=0,fd=27,opaque=serial0-log -chardev pty,id=charserial0,logfile=/dev/fdset/0,logappend=on -device "{\"driver\":\"isa-serial\",\"chardev\":\"charserial0\",\"id\":\"serial0\",\"index\":0}" -device "{\"driver\":\"usb-tablet\",\"id\":\"input0\",\"bus\":\"usb.0\",\"port\":\"1\"}" -audiodev "{\"id\":\"audio1\",\"driver\":\"none\"}" -object "{\"qom-type\":\"tls-creds-x509\",\"id\":\"vnc-tls-creds0\",\"dir\":\"/etc/pki/qemu\",\"endpoint\":\"server\",\"verify-peer\":true}" -vnc "[::0]:0,tls-creds=vnc-tls-creds0,audiodev=audio1" -device "{\"driver\":\"virtio-vga\",\"id\":\"video0\",\"max_outputs\":1,\"bus\":\"pcie.0\",\"addr\":\"0x1\"}" -global ICH9-LPC.noreboot=off -watchdog-action reset -device "{\"driver\":\"virtio-balloon-pci\",\"id\":\"balloon0\",\"bus\":\"pci.4\",\"addr\":\"0x0\"}" -object "{\"qom-type\":\"rng-random\",\"id\":\"objrng0\",\"filename\":\"/dev/urandom\"}" -device "{\"driver\":\"virtio-rng-pci\",\"rng\":\"objrng0\",\"id\":\"rng0\",\"bus\":\"pci.5\",\"addr\":\"0x0\"}" -device "{\"driver\":\"vmcoreinfo\"}" -sandbox on,obsolete=deny,elevateprivileges=deny,spawn=deny,resourcecontrol=deny -msg timestamp=on

Jan 22 15:39:24 compute-2 nostalgic_newton[287195]:             "scheduler_mode": "mq-deadline",
Jan 22 15:39:24 compute-2 nostalgic_newton[287195]:             "sectors": 0,
Jan 22 15:39:24 compute-2 nostalgic_newton[287195]:             "sectorsize": "2048",
Jan 22 15:39:24 compute-2 nostalgic_newton[287195]:             "size": 493568.0,
Jan 22 15:39:24 compute-2 nostalgic_newton[287195]:             "support_discard": "2048",
Jan 22 15:39:24 compute-2 nostalgic_newton[287195]:             "type": "disk",
Jan 22 15:39:24 compute-2 nostalgic_newton[287195]:             "vendor": "QEMU"
Jan 22 15:39:24 compute-2 nostalgic_newton[287195]:         }
Jan 22 15:39:24 compute-2 nostalgic_newton[287195]:     }
Jan 22 15:39:24 compute-2 nostalgic_newton[287195]: ]

● system-ceph\x2d088fe176\x2d0106\x2d5401\x2d803c\x2d2da38b73b76a.slice - Slice /system/ceph-088fe176-0106-5401-803c-2da38b73b76a
     Loaded: loaded
     Active: active since Thu 2026-01-22 13:35:28 UTC; 2h 10min ago
      Until: Thu 2026-01-22 13:35:28 UTC; 2h 10min ago
         IO: 112.4M read, 3.6G written
      Tasks: 761
     Memory: 1.4G (peak: 1.5G)
        CPU: 5min 32.677s
     CGroup: /system.slice/system-ceph\x2d088fe176\x2d0106\x2d5401\x2d803c\x2d2da38b73b76a.slice
             ├─ceph-088fe176-0106-5401-803c-2da38b73b76a@crash.compute-2.service
             │ ├─libpod-payload-52f09a99f1b294dc32194bfc1ab7f2d1320bd9205c0632fb77a4b4dfb25dbf93
             │ │ ├─77794 /run/podman-init -- /usr/bin/ceph-crash -n client.crash.compute-2
             │ │ └─77796 /usr/bin/python3 -s /usr/bin/ceph-crash -n client.crash.compute-2
             │ └─runtime
             │   └─77792 /usr/bin/conmon --api-version 1 -c 52f09a99f1b294dc32194bfc1ab7f2d1320bd9205c0632fb77a4b4dfb25dbf93 -u 52f09a99f1b294dc32194bfc1ab7f2d1320bd9205c0632fb77a4b4dfb25dbf93 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/52f09a99f1b294dc32194bfc1ab7f2d1320bd9205c0632fb77a4b4dfb25dbf93/userdata -p /run/containers/storage/overlay-containers/52f09a99f1b294dc32194bfc1ab7f2d1320bd9205c0632fb77a4b4dfb25dbf93/userdata/pidfile -n ceph-088fe176-0106-5401-803c-2da38b73b76a-crash-compute-2 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/52f09a99f1b294dc32194bfc1ab7f2d1320bd9205c0632fb77a4b4dfb25dbf93 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/52f09a99f1b294dc32194bfc1ab7f2d1320bd9205c0632fb77a4b4dfb25dbf93/userdata/oci-log --conmon-pidfile /run/ceph-088fe176-0106-5401-803c-2da38b73b76a@crash.compute-2.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 52f09a99f1b294dc32194bfc1ab7f2d1320bd9205c0632fb77a4b4dfb25dbf93
             ├─ceph-088fe176-0106-5401-803c-2da38b73b76a@haproxy.rgw.default.compute-2.zogxki.service
             │ ├─libpod-payload-ff608106d7c871852a462621c4b38466f0a089e42add90baa06df30604a36e5f
             │ │ ├─82540 /run/podman-init -- docker-entrypoint.sh haproxy -f /var/lib/haproxy/haproxy.cfg
             │ │ ├─82542 haproxy -W -db -f /var/lib/haproxy/haproxy.cfg
             │ │ └─82544 haproxy -W -db -f /var/lib/haproxy/haproxy.cfg
             │ └─runtime
             │   └─82538 /usr/bin/conmon --api-version 1 -c ff608106d7c871852a462621c4b38466f0a089e42add90baa06df30604a36e5f -u ff608106d7c871852a462621c4b38466f0a089e42add90baa06df30604a36e5f -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/ff608106d7c871852a462621c4b38466f0a089e42add90baa06df30604a36e5f/userdata -p /run/containers/storage/overlay-containers/ff608106d7c871852a462621c4b38466f0a089e42add90baa06df30604a36e5f/userdata/pidfile -n ceph-088fe176-0106-5401-803c-2da38b73b76a-haproxy-rgw-default-compute-2-zogxki --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/ff608106d7c871852a462621c4b38466f0a089e42add90baa06df30604a36e5f --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/ff608106d7c871852a462621c4b38466f0a089e42add90baa06df30604a36e5f/userdata/oci-log --conmon-pidfile /run/ceph-088fe176-0106-5401-803c-2da38b73b76a@haproxy.rgw.default.compute-2.zogxki.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg ff608106d7c871852a462621c4b38466f0a089e42add90baa06df30604a36e5f
             ├─ceph-088fe176-0106-5401-803c-2da38b73b76a@keepalived.rgw.default.compute-2.xbsrtt.service
             │ ├─libpod-payload-6667054e4ceb45c7be5e11486852d0790d9219015e6bca7cdf08e071806b9af4
             │ │ ├─82994 /run/podman-init -- ./init.sh
             │ │ ├─82996 /usr/sbin/keepalived -n -l -f /etc/keepalived/keepalived.conf
             │ │ └─82998 /usr/sbin/keepalived -n -l -f /etc/keepalived/keepalived.conf
             │ └─runtime
             │   └─82992 /usr/bin/conmon --api-version 1 -c 6667054e4ceb45c7be5e11486852d0790d9219015e6bca7cdf08e071806b9af4 -u 6667054e4ceb45c7be5e11486852d0790d9219015e6bca7cdf08e071806b9af4 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/6667054e4ceb45c7be5e11486852d0790d9219015e6bca7cdf08e071806b9af4/userdata -p /run/containers/storage/overlay-containers/6667054e4ceb45c7be5e11486852d0790d9219015e6bca7cdf08e071806b9af4/userdata/pidfile -n ceph-088fe176-0106-5401-803c-2da38b73b76a-keepalived-rgw-default-compute-2-xbsrtt --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/6667054e4ceb45c7be5e11486852d0790d9219015e6bca7cdf08e071806b9af4 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/6667054e4ceb45c7be5e11486852d0790d9219015e6bca7cdf08e071806b9af4/userdata/oci-log --conmon-pidfile /run/ceph-088fe176-0106-5401-803c-2da38b73b76a@keepalived.rgw.default.compute-2.xbsrtt.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 6667054e4ceb45c7be5e11486852d0790d9219015e6bca7cdf08e071806b9af4
             ├─ceph-088fe176-0106-5401-803c-2da38b73b76a@mds.cephfs.compute-2.zycvef.service
             │ ├─libpod-payload-28402c8a6e0adf22561a923d42802647af00df10eacceb300a94fe8b5f18bf63
             │ │ ├─81152 /run/podman-init -- /usr/bin/ceph-mds -n mds.cephfs.compute-2.zycvef -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ └─81154 /usr/bin/ceph-mds -n mds.cephfs.compute-2.zycvef -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ └─runtime
             │   └─81150 /usr/bin/conmon --api-version 1 -c 28402c8a6e0adf22561a923d42802647af00df10eacceb300a94fe8b5f18bf63 -u 28402c8a6e0adf22561a923d42802647af00df10eacceb300a94fe8b5f18bf63 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/28402c8a6e0adf22561a923d42802647af00df10eacceb300a94fe8b5f18bf63/userdata -p /run/containers/storage/overlay-containers/28402c8a6e0adf22561a923d42802647af00df10eacceb300a94fe8b5f18bf63/userdata/pidfile -n ceph-088fe176-0106-5401-803c-2da38b73b76a-mds-cephfs-compute-2-zycvef --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/28402c8a6e0adf22561a923d42802647af00df10eacceb300a94fe8b5f18bf63 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/28402c8a6e0adf22561a923d42802647af00df10eacceb300a94fe8b5f18bf63/userdata/oci-log --conmon-pidfile /run/ceph-088fe176-0106-5401-803c-2da38b73b76a@mds.cephfs.compute-2.zycvef.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 28402c8a6e0adf22561a923d42802647af00df10eacceb300a94fe8b5f18bf63
             ├─ceph-088fe176-0106-5401-803c-2da38b73b76a@mgr.compute-2.tjdsdx.service
             │ ├─libpod-payload-3f48eeed4688717dc1b70b826cbb76219abc8f1d02edfa4f514b989747c1506f
             │ │ ├─77436 /run/podman-init -- /usr/bin/ceph-mgr -n mgr.compute-2.tjdsdx -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ └─77438 /usr/bin/ceph-mgr -n mgr.compute-2.tjdsdx -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ └─runtime
             │   └─77434 /usr/bin/conmon --api-version 1 -c 3f48eeed4688717dc1b70b826cbb76219abc8f1d02edfa4f514b989747c1506f -u 3f48eeed4688717dc1b70b826cbb76219abc8f1d02edfa4f514b989747c1506f -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/3f48eeed4688717dc1b70b826cbb76219abc8f1d02edfa4f514b989747c1506f/userdata -p /run/containers/storage/overlay-containers/3f48eeed4688717dc1b70b826cbb76219abc8f1d02edfa4f514b989747c1506f/userdata/pidfile -n ceph-088fe176-0106-5401-803c-2da38b73b76a-mgr-compute-2-tjdsdx --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/3f48eeed4688717dc1b70b826cbb76219abc8f1d02edfa4f514b989747c1506f --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/3f48eeed4688717dc1b70b826cbb76219abc8f1d02edfa4f514b989747c1506f/userdata/oci-log --conmon-pidfile /run/ceph-088fe176-0106-5401-803c-2da38b73b76a@mgr.compute-2.tjdsdx.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 3f48eeed4688717dc1b70b826cbb76219abc8f1d02edfa4f514b989747c1506f
             ├─ceph-088fe176-0106-5401-803c-2da38b73b76a@mon.compute-2.service
             │ ├─libpod-payload-ad3fee4799b44f9e04b5aa9968630e9af6ffd410d7fc49c4207495984cf6bca6
             │ │ ├─77079 /run/podman-init -- /usr/bin/ceph-mon -n mon.compute-2 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false
             │ │ └─77081 /usr/bin/ceph-mon -n mon.compute-2 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false
             │ └─runtime
             │   └─77077 /usr/bin/conmon --api-version 1 -c ad3fee4799b44f9e04b5aa9968630e9af6ffd410d7fc49c4207495984cf6bca6 -u ad3fee4799b44f9e04b5aa9968630e9af6ffd410d7fc49c4207495984cf6bca6 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/ad3fee4799b44f9e04b5aa9968630e9af6ffd410d7fc49c4207495984cf6bca6/userdata -p /run/containers/storage/overlay-containers/ad3fee4799b44f9e04b5aa9968630e9af6ffd410d7fc49c4207495984cf6bca6/userdata/pidfile -n ceph-088fe176-0106-5401-803c-2da38b73b76a-mon-compute-2 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/ad3fee4799b44f9e04b5aa9968630e9af6ffd410d7fc49c4207495984cf6bca6 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/ad3fee4799b44f9e04b5aa9968630e9af6ffd410d7fc49c4207495984cf6bca6/userdata/oci-log --conmon-pidfile /run/ceph-088fe176-0106-5401-803c-2da38b73b76a@mon.compute-2.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg ad3fee4799b44f9e04b5aa9968630e9af6ffd410d7fc49c4207495984cf6bca6
             ├─ceph-088fe176-0106-5401-803c-2da38b73b76a@osd.2.service
             │ ├─libpod-payload-1f90ecb4fcc015bd1f2f979a5a563080acb2d28030758941d6958f2336c7101d
             │ │ ├─79777 /run/podman-init -- /usr/bin/ceph-osd -n osd.2 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ └─79779 /usr/bin/ceph-osd -n osd.2 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ └─runtime
             │   └─79775 /usr/bin/conmon --api-version 1 -c 1f90ecb4fcc015bd1f2f979a5a563080acb2d28030758941d6958f2336c7101d -u 1f90ecb4fcc015bd1f2f979a5a563080acb2d28030758941d6958f2336c7101d -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/1f90ecb4fcc015bd1f2f979a5a563080acb2d28030758941d6958f2336c7101d/userdata -p /run/containers/storage/overlay-containers/1f90ecb4fcc015bd1f2f979a5a563080acb2d28030758941d6958f2336c7101d/userdata/pidfile -n ceph-088fe176-0106-5401-803c-2da38b73b76a-osd-2 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/1f90ecb4fcc015bd1f2f979a5a563080acb2d28030758941d6958f2336c7101d --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/1f90ecb4fcc015bd1f2f979a5a563080acb2d28030758941d6958f2336c7101d/userdata/oci-log --conmon-pidfile /run/ceph-088fe176-0106-5401-803c-2da38b73b76a@osd.2.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 1f90ecb4fcc015bd1f2f979a5a563080acb2d28030758941d6958f2336c7101d
             └─ceph-088fe176-0106-5401-803c-2da38b73b76a@rgw.rgw.compute-2.gfsxzw.service
               ├─libpod-payload-49e687254f675aca5071ee91f471edf46c03564ea189efa6346b4d0c66cd7dc0
               │ ├─80767 /run/podman-init -- /usr/bin/radosgw -n client.rgw.rgw.compute-2.gfsxzw -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
               │ └─80769 /usr/bin/radosgw -n client.rgw.rgw.compute-2.gfsxzw -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
               └─runtime
                 └─80765 /usr/bin/conmon --api-version 1 -c 49e687254f675aca5071ee91f471edf46c03564ea189efa6346b4d0c66cd7dc0 -u 49e687254f675aca5071ee91f471edf46c03564ea189efa6346b4d0c66cd7dc0 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/49e687254f675aca5071ee91f471edf46c03564ea189efa6346b4d0c66cd7dc0/userdata -p /run/containers/storage/overlay-containers/49e687254f675aca5071ee91f471edf46c03564ea189efa6346b4d0c66cd7dc0/userdata/pidfile -n ceph-088fe176-0106-5401-803c-2da38b73b76a-rgw-rgw-compute-2-gfsxzw --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/49e687254f675aca5071ee91f471edf46c03564ea189efa6346b4d0c66cd7dc0 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/49e687254f675aca5071ee91f471edf46c03564ea189efa6346b4d0c66cd7dc0/userdata/oci-log --conmon-pidfile /run/ceph-088fe176-0106-5401-803c-2da38b73b76a@rgw.rgw.compute-2.gfsxzw.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 49e687254f675aca5071ee91f471edf46c03564ea189efa6346b4d0c66cd7dc0

Jan 22 15:46:12 compute-2 ceph-mon[77081]: 62 slow requests (by type [ 'delayed' : 62 ] most affected pool [ 'vms' : 37 ])
Jan 22 15:46:12 compute-2 ceph-mon[77081]: Health check update: 62 slow ops, oldest one blocked for 7763 sec, osd.2 has slow ops (SLOW_OPS)
Jan 22 15:46:12 compute-2 ceph-mon[77081]: from='client.? 192.168.122.102:0/1564577597' entity='client.admin' cmd=[{"prefix": "osd numa-status", "format": "json-pretty"}]: dispatch
Jan 22 15:46:12 compute-2 ceph-088fe176-0106-5401-803c-2da38b73b76a-osd-2[79775]: 2026-01-22T15:46:12.922+0000 7f47f8ed4640 -1 osd.2 183 get_health_metrics reporting 62 slow ops, oldest is osd_op(client.14140.0:10 2.12 2:4e99cc3e:::rbd_mirror_snapshot_schedule:head [omap-get-vals in=16b] snapc 0=[] ondisk+read+known_if_redirected+supports_pool_eio e50)
Jan 22 15:46:12 compute-2 ceph-osd[79779]: osd.2 183 get_health_metrics reporting 62 slow ops, oldest is osd_op(client.14140.0:10 2.12 2:4e99cc3e:::rbd_mirror_snapshot_schedule:head [omap-get-vals in=16b] snapc 0=[] ondisk+read+known_if_redirected+supports_pool_eio e50)
Jan 22 15:46:12 compute-2 ceph-osd[79779]: log_channel(cluster) log [WRN] : 62 slow requests (by type [ 'delayed' : 62 ] most affected pool [ 'vms' : 37 ])
Jan 22 15:46:12 compute-2 ceph-mon[77081]: mon.compute-2@1(peon) e3 handle_command mon_command({"prefix": "osd pool ls", "detail": "detail", "format": "json-pretty"} v 0) v1
Jan 22 15:46:12 compute-2 ceph-mon[77081]: log_channel(audit) log [DBG] : from='client.? 192.168.122.102:0/3653425845' entity='client.admin' cmd=[{"prefix": "osd pool ls", "detail": "detail", "format": "json-pretty"}]: dispatch
Jan 22 15:46:13 compute-2 ceph-mon[77081]: mon.compute-2@1(peon) e3 handle_command mon_command({"prefix": "osd stat", "format": "json-pretty"} v 0) v1
Jan 22 15:46:13 compute-2 ceph-mon[77081]: log_channel(audit) log [DBG] : from='client.? 192.168.122.102:0/285384556' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json-pretty"}]: dispatch

● system-dbus\x2d:1.1\x2dorg.fedoraproject.SetroubleshootPrivileged.slice - Slice /system/dbus-:1.1-org.fedoraproject.SetroubleshootPrivileged
     Loaded: loaded
     Active: active since Thu 2026-01-22 13:51:23 UTC; 1h 54min ago
      Until: Thu 2026-01-22 13:51:23 UTC; 1h 54min ago
         IO: 4.0K read, 0B written
      Tasks: 0
     Memory: 4.0K (peak: 58.6M)
        CPU: 925ms
     CGroup: /system.slice/system-dbus\x2d:1.1\x2dorg.fedoraproject.SetroubleshootPrivileged.slice

Jan 22 13:51:23 compute-2 systemd[1]: Created slice Slice /system/dbus-:1.1-org.fedoraproject.SetroubleshootPrivileged.

● system-getty.slice - Slice /system/getty
     Loaded: loaded
     Active: active since Thu 2026-01-22 12:49:14 UTC; 2h 56min ago
      Until: Thu 2026-01-22 12:49:14 UTC; 2h 56min ago
         IO: 0B read, 0B written
      Tasks: 1
     Memory: 304.0K (peak: 780.0K)
        CPU: 8ms
     CGroup: /system.slice/system-getty.slice
             └─getty@tty1.service
               └─1007 /sbin/agetty -o "-p -- \\u" --noclear - linux

● system-modprobe.slice - Slice /system/modprobe
     Loaded: loaded
     Active: active since Thu 2026-01-22 12:49:12 UTC; 2h 57min ago
      Until: Thu 2026-01-22 12:49:12 UTC; 2h 57min ago
         IO: 0B read, 0B written
      Tasks: 0
     Memory: 52.0K (peak: 11.5M)
        CPU: 143ms
     CGroup: /system.slice/system-modprobe.slice

Jan 22 12:49:12 localhost systemd[1]: Created slice Slice /system/modprobe.

● system-serial\x2dgetty.slice - Slice /system/serial-getty
     Loaded: loaded
     Active: active since Thu 2026-01-22 12:49:14 UTC; 2h 56min ago
      Until: Thu 2026-01-22 12:49:14 UTC; 2h 56min ago
         IO: 0B read, 0B written
      Tasks: 1
     Memory: 252.0K (peak: 508.0K)
        CPU: 12ms
     CGroup: /system.slice/system-serial\x2dgetty.slice
             └─serial-getty@ttyS0.service
               └─1009 /sbin/agetty -o "-p -- \\u" --keep-baud 115200,57600,38400,9600 - vt220

● system-sshd\x2dkeygen.slice - Slice /system/sshd-keygen
     Loaded: loaded
     Active: active since Thu 2026-01-22 12:49:14 UTC; 2h 56min ago
      Until: Thu 2026-01-22 12:49:14 UTC; 2h 56min ago
         IO: 0B read, 0B written
      Tasks: 0
     Memory: 0B (peak: 0B)
        CPU: 0
     CGroup: /system.slice/system-sshd\x2dkeygen.slice

● system.slice - System Slice
     Loaded: loaded
     Active: active since Thu 2026-01-22 12:49:11 UTC; 2h 57min ago
      Until: Thu 2026-01-22 12:49:11 UTC; 2h 57min ago
       Docs: man:systemd.special(7)
         IO: 216.0M read, 3.7G written
      Tasks: 892
     Memory: 2.1G (peak: 2.2G)
        CPU: 12min 16.884s
     CGroup: /system.slice
             ├─NetworkManager.service
             │ └─49000 /usr/sbin/NetworkManager --no-daemon
             ├─auditd.service
             │ ├─699 /sbin/auditd
             │ └─701 /usr/sbin/sedispatch
             ├─chronyd.service
             │ └─58561 /usr/sbin/chronyd -F 2
             ├─crond.service
             │ └─1006 /usr/sbin/crond -n
             ├─dbus-broker.service
             │ ├─760 /usr/bin/dbus-broker-launch --scope system --audit
             │ └─772 dbus-broker --log 4 --controller 9 --machine-id 85ac68c10a6e7ae08ceb898dbdca0cb5 --max-bytes 536870912 --max-fds 4096 --max-matches 131072 --audit
             ├─edpm_nova_compute.service
             │ └─226433 /usr/bin/conmon --api-version 1 -c 572ffe12c89ef3d651b3d5a5d0d084d01048037ddf29c596a9682c34d685f649 -u 572ffe12c89ef3d651b3d5a5d0d084d01048037ddf29c596a9682c34d685f649 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/572ffe12c89ef3d651b3d5a5d0d084d01048037ddf29c596a9682c34d685f649/userdata -p /run/containers/storage/overlay-containers/572ffe12c89ef3d651b3d5a5d0d084d01048037ddf29c596a9682c34d685f649/userdata/pidfile -n nova_compute --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/572ffe12c89ef3d651b3d5a5d0d084d01048037ddf29c596a9682c34d685f649 --full-attach -s -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/572ffe12c89ef3d651b3d5a5d0d084d01048037ddf29c596a9682c34d685f649/userdata/oci-log --conmon-pidfile /run/nova_compute.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg 572ffe12c89ef3d651b3d5a5d0d084d01048037ddf29c596a9682c34d685f649
             ├─edpm_ovn_controller.service
             │ └─133156 /usr/bin/conmon --api-version 1 -c 8eec14eed05eebd169934b14ad23738a7c696fad3b7be75ce9a652966539c356 -u 8eec14eed05eebd169934b14ad23738a7c696fad3b7be75ce9a652966539c356 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/8eec14eed05eebd169934b14ad23738a7c696fad3b7be75ce9a652966539c356/userdata -p /run/containers/storage/overlay-containers/8eec14eed05eebd169934b14ad23738a7c696fad3b7be75ce9a652966539c356/userdata/pidfile -n ovn_controller --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/8eec14eed05eebd169934b14ad23738a7c696fad3b7be75ce9a652966539c356 --full-attach -s -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/8eec14eed05eebd169934b14ad23738a7c696fad3b7be75ce9a652966539c356/userdata/oci-log --conmon-pidfile /run/ovn_controller.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg 8eec14eed05eebd169934b14ad23738a7c696fad3b7be75ce9a652966539c356
             ├─edpm_ovn_metadata_agent.service
             │ └─143492 /usr/bin/conmon --api-version 1 -c 65cda04b9c9e71d648ab5510147314c4de15a37ca8d4a48196c50c9ad6ccb44d -u 65cda04b9c9e71d648ab5510147314c4de15a37ca8d4a48196c50c9ad6ccb44d -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/65cda04b9c9e71d648ab5510147314c4de15a37ca8d4a48196c50c9ad6ccb44d/userdata -p /run/containers/storage/overlay-containers/65cda04b9c9e71d648ab5510147314c4de15a37ca8d4a48196c50c9ad6ccb44d/userdata/pidfile -n ovn_metadata_agent --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/65cda04b9c9e71d648ab5510147314c4de15a37ca8d4a48196c50c9ad6ccb44d --full-attach -s -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/65cda04b9c9e71d648ab5510147314c4de15a37ca8d4a48196c50c9ad6ccb44d/userdata/oci-log --conmon-pidfile /run/ovn_metadata_agent.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg 65cda04b9c9e71d648ab5510147314c4de15a37ca8d4a48196c50c9ad6ccb44d
             ├─gssproxy.service
             │ └─868 /usr/sbin/gssproxy -D
             ├─irqbalance.service
             │ └─785 /usr/sbin/irqbalance
             ├─iscsid.service
             │ └─211449 /usr/sbin/iscsid -f
             ├─multipathd.service
             │ └─211608 /sbin/multipathd -d -s
             ├─ovs-vswitchd.service
             │ └─47295 ovs-vswitchd unix:/var/run/openvswitch/db.sock -vconsole:emer -vsyslog:err -vfile:info --mlockall --user openvswitch:hugetlbfs --no-chdir --log-file=/var/log/openvswitch/ovs-vswitchd.log --pidfile=/var/run/openvswitch/ovs-vswitchd.pid --detach
             ├─ovsdb-server.service
             │ └─47215 ovsdb-server /etc/openvswitch/conf.db -vconsole:emer -vsyslog:err -vfile:info --remote=punix:/var/run/openvswitch/db.sock --private-key=db:Open_vSwitch,SSL,private_key --certificate=db:Open_vSwitch,SSL,certificate --bootstrap-ca-cert=db:Open_vSwitch,SSL,ca_cert --user openvswitch:hugetlbfs --no-chdir --log-file=/var/log/openvswitch/ovsdb-server.log --pidfile=/var/run/openvswitch/ovsdb-server.pid --detach
             ├─polkit.service
             │ └─43481 /usr/lib/polkit-1/polkitd --no-debug
             ├─rpcbind.service
             │ └─697 /usr/bin/rpcbind -w -f
             ├─rsyslog.service
             │ └─1002 /usr/sbin/rsyslogd -n
             ├─sshd.service
             │ └─169467 "sshd: /usr/sbin/sshd -D [listener] 0 of 10-100 startups"
             ├─system-ceph\x2d088fe176\x2d0106\x2d5401\x2d803c\x2d2da38b73b76a.slice
             │ ├─ceph-088fe176-0106-5401-803c-2da38b73b76a@crash.compute-2.service
             │ │ ├─libpod-payload-52f09a99f1b294dc32194bfc1ab7f2d1320bd9205c0632fb77a4b4dfb25dbf93
             │ │ │ ├─77794 /run/podman-init -- /usr/bin/ceph-crash -n client.crash.compute-2
             │ │ │ └─77796 /usr/bin/python3 -s /usr/bin/ceph-crash -n client.crash.compute-2
             │ │ └─runtime
             │ │   └─77792 /usr/bin/conmon --api-version 1 -c 52f09a99f1b294dc32194bfc1ab7f2d1320bd9205c0632fb77a4b4dfb25dbf93 -u 52f09a99f1b294dc32194bfc1ab7f2d1320bd9205c0632fb77a4b4dfb25dbf93 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/52f09a99f1b294dc32194bfc1ab7f2d1320bd9205c0632fb77a4b4dfb25dbf93/userdata -p /run/containers/storage/overlay-containers/52f09a99f1b294dc32194bfc1ab7f2d1320bd9205c0632fb77a4b4dfb25dbf93/userdata/pidfile -n ceph-088fe176-0106-5401-803c-2da38b73b76a-crash-compute-2 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/52f09a99f1b294dc32194bfc1ab7f2d1320bd9205c0632fb77a4b4dfb25dbf93 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/52f09a99f1b294dc32194bfc1ab7f2d1320bd9205c0632fb77a4b4dfb25dbf93/userdata/oci-log --conmon-pidfile /run/ceph-088fe176-0106-5401-803c-2da38b73b76a@crash.compute-2.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 52f09a99f1b294dc32194bfc1ab7f2d1320bd9205c0632fb77a4b4dfb25dbf93
             │ ├─ceph-088fe176-0106-5401-803c-2da38b73b76a@haproxy.rgw.default.compute-2.zogxki.service
             │ │ ├─libpod-payload-ff608106d7c871852a462621c4b38466f0a089e42add90baa06df30604a36e5f
             │ │ │ ├─82540 /run/podman-init -- docker-entrypoint.sh haproxy -f /var/lib/haproxy/haproxy.cfg
             │ │ │ ├─82542 haproxy -W -db -f /var/lib/haproxy/haproxy.cfg
             │ │ │ └─82544 haproxy -W -db -f /var/lib/haproxy/haproxy.cfg
             │ │ └─runtime
             │ │   └─82538 /usr/bin/conmon --api-version 1 -c ff608106d7c871852a462621c4b38466f0a089e42add90baa06df30604a36e5f -u ff608106d7c871852a462621c4b38466f0a089e42add90baa06df30604a36e5f -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/ff608106d7c871852a462621c4b38466f0a089e42add90baa06df30604a36e5f/userdata -p /run/containers/storage/overlay-containers/ff608106d7c871852a462621c4b38466f0a089e42add90baa06df30604a36e5f/userdata/pidfile -n ceph-088fe176-0106-5401-803c-2da38b73b76a-haproxy-rgw-default-compute-2-zogxki --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/ff608106d7c871852a462621c4b38466f0a089e42add90baa06df30604a36e5f --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/ff608106d7c871852a462621c4b38466f0a089e42add90baa06df30604a36e5f/userdata/oci-log --conmon-pidfile /run/ceph-088fe176-0106-5401-803c-2da38b73b76a@haproxy.rgw.default.compute-2.zogxki.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg ff608106d7c871852a462621c4b38466f0a089e42add90baa06df30604a36e5f
             │ ├─ceph-088fe176-0106-5401-803c-2da38b73b76a@keepalived.rgw.default.compute-2.xbsrtt.service
             │ │ ├─libpod-payload-6667054e4ceb45c7be5e11486852d0790d9219015e6bca7cdf08e071806b9af4
             │ │ │ ├─82994 /run/podman-init -- ./init.sh
             │ │ │ ├─82996 /usr/sbin/keepalived -n -l -f /etc/keepalived/keepalived.conf
             │ │ │ └─82998 /usr/sbin/keepalived -n -l -f /etc/keepalived/keepalived.conf
             │ │ └─runtime
             │ │   └─82992 /usr/bin/conmon --api-version 1 -c 6667054e4ceb45c7be5e11486852d0790d9219015e6bca7cdf08e071806b9af4 -u 6667054e4ceb45c7be5e11486852d0790d9219015e6bca7cdf08e071806b9af4 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/6667054e4ceb45c7be5e11486852d0790d9219015e6bca7cdf08e071806b9af4/userdata -p /run/containers/storage/overlay-containers/6667054e4ceb45c7be5e11486852d0790d9219015e6bca7cdf08e071806b9af4/userdata/pidfile -n ceph-088fe176-0106-5401-803c-2da38b73b76a-keepalived-rgw-default-compute-2-xbsrtt --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/6667054e4ceb45c7be5e11486852d0790d9219015e6bca7cdf08e071806b9af4 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/6667054e4ceb45c7be5e11486852d0790d9219015e6bca7cdf08e071806b9af4/userdata/oci-log --conmon-pidfile /run/ceph-088fe176-0106-5401-803c-2da38b73b76a@keepalived.rgw.default.compute-2.xbsrtt.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 6667054e4ceb45c7be5e11486852d0790d9219015e6bca7cdf08e071806b9af4
             │ ├─ceph-088fe176-0106-5401-803c-2da38b73b76a@mds.cephfs.compute-2.zycvef.service
             │ │ ├─libpod-payload-28402c8a6e0adf22561a923d42802647af00df10eacceb300a94fe8b5f18bf63
             │ │ │ ├─81152 /run/podman-init -- /usr/bin/ceph-mds -n mds.cephfs.compute-2.zycvef -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ │ └─81154 /usr/bin/ceph-mds -n mds.cephfs.compute-2.zycvef -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ └─runtime
             │ │   └─81150 /usr/bin/conmon --api-version 1 -c 28402c8a6e0adf22561a923d42802647af00df10eacceb300a94fe8b5f18bf63 -u 28402c8a6e0adf22561a923d42802647af00df10eacceb300a94fe8b5f18bf63 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/28402c8a6e0adf22561a923d42802647af00df10eacceb300a94fe8b5f18bf63/userdata -p /run/containers/storage/overlay-containers/28402c8a6e0adf22561a923d42802647af00df10eacceb300a94fe8b5f18bf63/userdata/pidfile -n ceph-088fe176-0106-5401-803c-2da38b73b76a-mds-cephfs-compute-2-zycvef --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/28402c8a6e0adf22561a923d42802647af00df10eacceb300a94fe8b5f18bf63 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/28402c8a6e0adf22561a923d42802647af00df10eacceb300a94fe8b5f18bf63/userdata/oci-log --conmon-pidfile /run/ceph-088fe176-0106-5401-803c-2da38b73b76a@mds.cephfs.compute-2.zycvef.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 28402c8a6e0adf22561a923d42802647af00df10eacceb300a94fe8b5f18bf63
             │ ├─ceph-088fe176-0106-5401-803c-2da38b73b76a@mgr.compute-2.tjdsdx.service
             │ │ ├─libpod-payload-3f48eeed4688717dc1b70b826cbb76219abc8f1d02edfa4f514b989747c1506f
             │ │ │ ├─77436 /run/podman-init -- /usr/bin/ceph-mgr -n mgr.compute-2.tjdsdx -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ │ └─77438 /usr/bin/ceph-mgr -n mgr.compute-2.tjdsdx -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ └─runtime
             │ │   └─77434 /usr/bin/conmon --api-version 1 -c 3f48eeed4688717dc1b70b826cbb76219abc8f1d02edfa4f514b989747c1506f -u 3f48eeed4688717dc1b70b826cbb76219abc8f1d02edfa4f514b989747c1506f -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/3f48eeed4688717dc1b70b826cbb76219abc8f1d02edfa4f514b989747c1506f/userdata -p /run/containers/storage/overlay-containers/3f48eeed4688717dc1b70b826cbb76219abc8f1d02edfa4f514b989747c1506f/userdata/pidfile -n ceph-088fe176-0106-5401-803c-2da38b73b76a-mgr-compute-2-tjdsdx --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/3f48eeed4688717dc1b70b826cbb76219abc8f1d02edfa4f514b989747c1506f --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/3f48eeed4688717dc1b70b826cbb76219abc8f1d02edfa4f514b989747c1506f/userdata/oci-log --conmon-pidfile /run/ceph-088fe176-0106-5401-803c-2da38b73b76a@mgr.compute-2.tjdsdx.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 3f48eeed4688717dc1b70b826cbb76219abc8f1d02edfa4f514b989747c1506f
             │ ├─ceph-088fe176-0106-5401-803c-2da38b73b76a@mon.compute-2.service
             │ │ ├─libpod-payload-ad3fee4799b44f9e04b5aa9968630e9af6ffd410d7fc49c4207495984cf6bca6
             │ │ │ ├─77079 /run/podman-init -- /usr/bin/ceph-mon -n mon.compute-2 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false
             │ │ │ └─77081 /usr/bin/ceph-mon -n mon.compute-2 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false
             │ │ └─runtime
             │ │   └─77077 /usr/bin/conmon --api-version 1 -c ad3fee4799b44f9e04b5aa9968630e9af6ffd410d7fc49c4207495984cf6bca6 -u ad3fee4799b44f9e04b5aa9968630e9af6ffd410d7fc49c4207495984cf6bca6 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/ad3fee4799b44f9e04b5aa9968630e9af6ffd410d7fc49c4207495984cf6bca6/userdata -p /run/containers/storage/overlay-containers/ad3fee4799b44f9e04b5aa9968630e9af6ffd410d7fc49c4207495984cf6bca6/userdata/pidfile -n ceph-088fe176-0106-5401-803c-2da38b73b76a-mon-compute-2 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/ad3fee4799b44f9e04b5aa9968630e9af6ffd410d7fc49c4207495984cf6bca6 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/ad3fee4799b44f9e04b5aa9968630e9af6ffd410d7fc49c4207495984cf6bca6/userdata/oci-log --conmon-pidfile /run/ceph-088fe176-0106-5401-803c-2da38b73b76a@mon.compute-2.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg ad3fee4799b44f9e04b5aa9968630e9af6ffd410d7fc49c4207495984cf6bca6
             │ ├─ceph-088fe176-0106-5401-803c-2da38b73b76a@osd.2.service
             │ │ ├─libpod-payload-1f90ecb4fcc015bd1f2f979a5a563080acb2d28030758941d6958f2336c7101d
             │ │ │ ├─79777 /run/podman-init -- /usr/bin/ceph-osd -n osd.2 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ │ └─79779 /usr/bin/ceph-osd -n osd.2 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ └─runtime
             │ │   └─79775 /usr/bin/conmon --api-version 1 -c 1f90ecb4fcc015bd1f2f979a5a563080acb2d28030758941d6958f2336c7101d -u 1f90ecb4fcc015bd1f2f979a5a563080acb2d28030758941d6958f2336c7101d -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/1f90ecb4fcc015bd1f2f979a5a563080acb2d28030758941d6958f2336c7101d/userdata -p /run/containers/storage/overlay-containers/1f90ecb4fcc015bd1f2f979a5a563080acb2d28030758941d6958f2336c7101d/userdata/pidfile -n ceph-088fe176-0106-5401-803c-2da38b73b76a-osd-2 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/1f90ecb4fcc015bd1f2f979a5a563080acb2d28030758941d6958f2336c7101d --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/1f90ecb4fcc015bd1f2f979a5a563080acb2d28030758941d6958f2336c7101d/userdata/oci-log --conmon-pidfile /run/ceph-088fe176-0106-5401-803c-2da38b73b76a@osd.2.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 1f90ecb4fcc015bd1f2f979a5a563080acb2d28030758941d6958f2336c7101d
             │ └─ceph-088fe176-0106-5401-803c-2da38b73b76a@rgw.rgw.compute-2.gfsxzw.service
             │   ├─libpod-payload-49e687254f675aca5071ee91f471edf46c03564ea189efa6346b4d0c66cd7dc0
             │   │ ├─80767 /run/podman-init -- /usr/bin/radosgw -n client.rgw.rgw.compute-2.gfsxzw -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │   │ └─80769 /usr/bin/radosgw -n client.rgw.rgw.compute-2.gfsxzw -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │   └─runtime
             │     └─80765 /usr/bin/conmon --api-version 1 -c 49e687254f675aca5071ee91f471edf46c03564ea189efa6346b4d0c66cd7dc0 -u 49e687254f675aca5071ee91f471edf46c03564ea189efa6346b4d0c66cd7dc0 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/49e687254f675aca5071ee91f471edf46c03564ea189efa6346b4d0c66cd7dc0/userdata -p /run/containers/storage/overlay-containers/49e687254f675aca5071ee91f471edf46c03564ea189efa6346b4d0c66cd7dc0/userdata/pidfile -n ceph-088fe176-0106-5401-803c-2da38b73b76a-rgw-rgw-compute-2-gfsxzw --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/49e687254f675aca5071ee91f471edf46c03564ea189efa6346b4d0c66cd7dc0 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/49e687254f675aca5071ee91f471edf46c03564ea189efa6346b4d0c66cd7dc0/userdata/oci-log --conmon-pidfile /run/ceph-088fe176-0106-5401-803c-2da38b73b76a@rgw.rgw.compute-2.gfsxzw.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 49e687254f675aca5071ee91f471edf46c03564ea189efa6346b4d0c66cd7dc0
             ├─system-getty.slice
             │ └─getty@tty1.service
             │   └─1007 /sbin/agetty -o "-p -- \\u" --noclear - linux
             ├─system-serial\x2dgetty.slice
             │ └─serial-getty@ttyS0.service
             │   └─1009 /sbin/agetty -o "-p -- \\u" --keep-baud 115200,57600,38400,9600 - vt220
             ├─systemd-hostnamed.service
             │ └─294012 /usr/lib/systemd/systemd-hostnamed
             ├─systemd-journald.service
             │ └─675 /usr/lib/systemd/systemd-journald
             ├─systemd-logind.service
             │ └─787 /usr/lib/systemd/systemd-logind
             ├─systemd-machined.service
             │ └─194970 /usr/lib/systemd/systemd-machined
             ├─systemd-udevd.service
             │ └─udev
             │   └─727 /usr/lib/systemd/systemd-udevd
             ├─tuned.service
             │ └─92450 /usr/bin/python3 -Es /usr/sbin/tuned -l -P
             ├─virtlogd.service
             │ └─194338 /usr/sbin/virtlogd
             ├─virtnodedevd.service
             │ └─226244 /usr/sbin/virtnodedevd --timeout 120
             ├─virtqemud.service
             │ └─225907 /usr/sbin/virtqemud --timeout 120
             └─virtsecretd.service
               └─248555 /usr/sbin/virtsecretd --timeout 120

Jan 22 15:45:37 compute-2 virtqemud[225907]: Failed to connect socket to '/var/run/libvirt/virtnetworkd-sock-ro': No such file or directory
Jan 22 15:45:37 compute-2 virtqemud[225907]: Failed to connect socket to '/var/run/libvirt/virtnwfilterd-sock-ro': No such file or directory
Jan 22 15:45:37 compute-2 virtqemud[225907]: Failed to connect socket to '/var/run/libvirt/virtstoraged-sock-ro': No such file or directory
Jan 22 15:45:38 compute-2 lvm[292291]: PV /dev/loop3 online, VG ceph_vg0 is complete.
Jan 22 15:45:38 compute-2 lvm[292291]: VG ceph_vg0 finished
Jan 22 15:45:47 compute-2 ovn_metadata_agent[143492]: 2026-01-22 15:45:47.287 143497 DEBUG oslo_concurrency.lockutils [-] Acquiring lock "_check_child_processes" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:404[00m
Jan 22 15:45:47 compute-2 ovn_metadata_agent[143492]: 2026-01-22 15:45:47.287 143497 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" acquired by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: waited 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:409[00m
Jan 22 15:45:47 compute-2 ovn_metadata_agent[143492]: 2026-01-22 15:45:47.287 143497 DEBUG oslo_concurrency.lockutils [-] Lock "_check_child_processes" "released" by "neutron.agent.linux.external_process.ProcessMonitor._check_child_processes" :: held 0.000s inner /usr/lib/python3.9/site-packages/oslo_concurrency/lockutils.py:423[00m
Jan 22 15:45:57 compute-2 podman[295071]: 2026-01-22 15:45:57.148557003 +0000 UTC m=+0.188369860 container health_status 8eec14eed05eebd169934b14ad23738a7c696fad3b7be75ce9a652966539c356 (image=quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified, name=ovn_controller, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20251202, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, tcib_build_tag=c3923531bcda0b0811b2d5053f189beb, config_data={'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'EDPM_CONFIG_HASH': '109b2e65a809d9df2b2d81c602046702b988fc7a594c944e65d89c0e3a64ae71-32177f2c3fa09030b0d1ae5cc46811ab0cd45ff7cf090b1a287b538f8d13e58d-32177f2c3fa09030b0d1ae5cc46811ab0cd45ff7cf090b1a287b538f8d13e58d-32177f2c3fa09030b0d1ae5cc46811ab0cd45ff7cf090b1a287b538f8d13e58d'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_controller', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified', 'net': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/openvswitch/ovn:/run/ovn:shared,z', '/var/lib/kolla/config_files/ovn_controller.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/openstack/cacerts/ovn/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/ovn/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/ovn/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_controller:/openstack:ro,z']}, managed_by=edpm_ansible, org.label-schema.vendor=CentOS, tcib_managed=true, config_id=ovn_controller, container_name=ovn_controller, io.buildah.version=1.41.3, maintainer=OpenStack Kubernetes Operator team)
Jan 22 15:46:08 compute-2 podman[297782]: 2026-01-22 15:46:08.037972221 +0000 UTC m=+0.086500367 container health_status 65cda04b9c9e71d648ab5510147314c4de15a37ca8d4a48196c50c9ad6ccb44d (image=quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified, name=ovn_metadata_agent, health_status=healthy, health_failing_streak=0, health_log=, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, config_data={'cgroupns': 'host', 'depends_on': ['openvswitch.service'], 'environment': {'KOLLA_CONFIG_STRATEGY': 'COPY_ALWAYS', 'EDPM_CONFIG_HASH': '109b2e65a809d9df2b2d81c602046702b988fc7a594c944e65d89c0e3a64ae71-32177f2c3fa09030b0d1ae5cc46811ab0cd45ff7cf090b1a287b538f8d13e58d-32177f2c3fa09030b0d1ae5cc46811ab0cd45ff7cf090b1a287b538f8d13e58d-32177f2c3fa09030b0d1ae5cc46811ab0cd45ff7cf090b1a287b538f8d13e58d-0823bd3e096c75f72e4a95820d41b0d4b6a1172bd2892ddb9f29b788a11bc87d'}, 'healthcheck': {'mount': '/var/lib/openstack/healthchecks/ovn_metadata_agent', 'test': '/openstack/healthcheck'}, 'image': 'quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified', 'net': 'host', 'pid': 'host', 'privileged': True, 'restart': 'always', 'user': 'root', 'volumes': ['/run/openvswitch:/run/openvswitch:z', '/var/lib/openstack/neutron-ovn-metadata-agent:/etc/neutron.conf.d:z', '/run/netns:/run/netns:shared', '/var/lib/kolla/config_files/ovn_metadata_agent.json:/var/lib/kolla/config_files/config.json:ro', '/var/lib/neutron:/var/lib/neutron:shared,z', '/var/lib/neutron/ovn_metadata_haproxy_wrapper:/usr/local/bin/haproxy:ro', '/var/lib/neutron/kill_scripts:/etc/neutron/kill_scripts:ro', '/var/lib/openstack/cacerts/neutron-metadata/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/ca.crt:/etc/pki/tls/certs/ovndbca.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.crt:/etc/pki/tls/certs/ovndb.crt:ro,z', '/var/lib/openstack/certs/neutron-metadata/default/tls.key:/etc/pki/tls/private/ovndb.key:ro,Z', '/var/lib/openstack/healthchecks/ovn_metadata_agent:/openstack:ro,z']}, io.buildah.version=1.41.3, tcib_build_tag=c3923531bcda0b0811b2d5053f189beb, org.label-schema.build-date=20251202, org.label-schema.vendor=CentOS, config_id=ovn_metadata_agent, managed_by=edpm_ansible, tcib_managed=true, container_name=ovn_metadata_agent, maintainer=OpenStack Kubernetes Operator team, org.label-schema.license=GPLv2)

● user-1000.slice - User Slice of UID 1000
     Loaded: loaded
    Drop-In: /usr/lib/systemd/system/user-.slice.d
             └─10-defaults.conf
     Active: active since Thu 2026-01-22 12:50:30 UTC; 2h 55min ago
      Until: Thu 2026-01-22 12:50:30 UTC; 2h 55min ago
       Docs: man:user@.service(5)
         IO: 581.4M read, 6.2G written
      Tasks: 24 (limit: 20031)
     Memory: 1.3G (peak: 3.3G)
        CPU: 19min 27.252s
     CGroup: /user.slice/user-1000.slice
             ├─session-1.scope
             │ └─4517 /usr/bin/python3
             ├─session-51.scope
             │ ├─291623 "sshd-session: zuul [priv]"
             │ ├─291627 "sshd-session: zuul@notty"
             │ ├─291628 sudo bash -c "rm -rf /var/tmp/sos-osp && mkdir /var/tmp/sos-osp && sos report --batch --all-logs --tmp-dir=/var/tmp/sos-osp  -p container,openstack_edpm,system,storage,virt"
             │ ├─291652 /usr/bin/python3 -s /sbin/sos report --batch --all-logs --tmp-dir=/var/tmp/sos-osp -p container,openstack_edpm,system,storage,virt
             │ ├─297596 timeout 15s turbostat --debug sleep 10
             │ ├─298091 timeout 300s systemctl status --all
             │ ├─298093 systemctl status --all
             │ ├─298388 timeout 300s ceph pg dump --format json-pretty
             │ ├─298389 /usr/bin/python3 -s /usr/bin/ceph pg dump --format json-pretty
             │ ├─298392 timeout --foreground 300s virsh -r nodeinfo
             │ └─298393 virsh -r nodeinfo
             └─user@1000.service
               ├─app.slice
               │ └─dbus-broker.service
               │   ├─11936 /usr/bin/dbus-broker-launch --scope user
               │   └─11948 dbus-broker --log 4 --controller 9 --machine-id 85ac68c10a6e7ae08ceb898dbdca0cb5 --max-bytes 100000000000000 --max-fds 25000000000000 --max-matches 5000000000
               ├─init.scope
               │ ├─4305 /usr/lib/systemd/systemd --user
               │ └─4307 "(sd-pam)"
               └─user.slice
                 └─podman-pause-3b1c51bd.scope
                   └─11838 catatonit -P

Jan 22 13:55:25 compute-2 podman[226649]: 2026-01-22 13:55:25.034272671 +0000 UTC m=+0.478758184 container start 384311074c185cc2bd08af1e04f8bece9d73e2ea32d868979213354237efbac4 (image=quay.io/podified-antelope-centos9/openstack-nova-compute:current-podified, name=nova_compute_init, managed_by=edpm_ansible, org.label-schema.build-date=20251202, org.label-schema.schema-version=1.0, tcib_managed=true, config_data={'image': 'quay.io/podified-antelope-centos9/openstack-nova-compute:current-podified', 'privileged': False, 'user': 'root', 'restart': 'never', 'command': 'bash -c $* -- eval python3 /sbin/nova_statedir_ownership.py | logger -t nova_compute_init', 'net': 'none', 'security_opt': ['label=disable'], 'detach': False, 'environment': {'NOVA_STATEDIR_OWNERSHIP_SKIP': '/var/lib/nova/compute_id', '__OS_DEBUG': False}, 'volumes': ['/dev/log:/dev/log', '/var/lib/nova:/var/lib/nova:shared', '/var/lib/_nova_secontext:/var/lib/_nova_secontext:shared,z', '/var/lib/openstack/config/nova/nova_statedir_ownership.py:/sbin/nova_statedir_ownership.py:z']}, maintainer=OpenStack Kubernetes Operator team, org.label-schema.name=CentOS Stream 9 Base Image, container_name=nova_compute_init, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, tcib_build_tag=c3923531bcda0b0811b2d5053f189beb, config_id=edpm, io.buildah.version=1.41.3)
Jan 22 13:55:25 compute-2 python3.9[226618]: ansible-containers.podman.podman_container PODMAN-CONTAINER-DEBUG: podman start nova_compute_init
Jan 22 13:55:25 compute-2 sudo[226616]: pam_unix(sudo:session): session closed for user root
Jan 22 13:55:25 compute-2 sshd-session[202253]: Connection closed by 192.168.122.30 port 59414
Jan 22 13:55:25 compute-2 sshd-session[202250]: pam_unix(sshd:session): session closed for user zuul
Jan 22 15:45:29 compute-2 sudo[291628]:     zuul : PWD=/home/zuul ; USER=root ; COMMAND=/bin/bash -c 'rm -rf /var/tmp/sos-osp && mkdir /var/tmp/sos-osp && sos report --batch --all-logs --tmp-dir=/var/tmp/sos-osp  -p container,openstack_edpm,system,storage,virt'
Jan 22 15:45:29 compute-2 sudo[291628]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Jan 22 15:46:01 compute-2 ovs-appctl[296364]: ovs|00001|daemon_unix|WARN|/var/run/openvswitch/ovs-monitor-ipsec.pid: open: No such file or directory
Jan 22 15:46:01 compute-2 ovs-appctl[296374]: ovs|00001|daemon_unix|WARN|/var/run/openvswitch/ovs-monitor-ipsec.pid: open: No such file or directory
Jan 22 15:46:01 compute-2 ovs-appctl[296378]: ovs|00001|daemon_unix|WARN|/var/run/openvswitch/ovs-monitor-ipsec.pid: open: No such file or directory

● user-42477.slice - User Slice of UID 42477
     Loaded: loaded
    Drop-In: /usr/lib/systemd/system/user-.slice.d
             └─10-defaults.conf
     Active: active since Thu 2026-01-22 13:33:33 UTC; 2h 12min ago
      Until: Thu 2026-01-22 13:33:33 UTC; 2h 12min ago
       Docs: man:user@.service(5)
         IO: 296.0K read, 1.9G written
      Tasks: 26 (limit: 20031)
     Memory: 1.4G (peak: 1.7G)
        CPU: 5min 3.347s
     CGroup: /user.slice/user-42477.slice
             ├─session-20.scope
             │ ├─72606 "sshd-session: ceph-admin [priv]"
             │ └─72628 "sshd-session: ceph-admin"
             ├─session-22.scope
             │ ├─72623 "sshd-session: ceph-admin [priv]"
             │ └─72629 "sshd-session: ceph-admin@notty"
             ├─session-23.scope
             │ ├─72680 "sshd-session: ceph-admin [priv]"
             │ └─72683 "sshd-session: ceph-admin@notty"
             ├─session-24.scope
             │ ├─72734 "sshd-session: ceph-admin [priv]"
             │ └─72737 "sshd-session: ceph-admin@notty"
             ├─session-25.scope
             │ ├─72788 "sshd-session: ceph-admin [priv]"
             │ └─72791 "sshd-session: ceph-admin@notty"
             ├─session-26.scope
             │ ├─72842 "sshd-session: ceph-admin [priv]"
             │ └─72845 "sshd-session: ceph-admin@notty"
             ├─session-27.scope
             │ ├─72896 "sshd-session: ceph-admin [priv]"
             │ └─72899 "sshd-session: ceph-admin@notty"
             ├─session-28.scope
             │ ├─72950 "sshd-session: ceph-admin [priv]"
             │ └─72953 "sshd-session: ceph-admin@notty"
             ├─session-29.scope
             │ ├─73004 "sshd-session: ceph-admin [priv]"
             │ └─73007 "sshd-session: ceph-admin@notty"
             ├─session-30.scope
             │ ├─73058 "sshd-session: ceph-admin [priv]"
             │ └─73061 "sshd-session: ceph-admin@notty"
             ├─session-31.scope
             │ ├─73085 "sshd-session: ceph-admin [priv]"
             │ └─73088 "sshd-session: ceph-admin@notty"
             ├─session-32.scope
             │ ├─73139 "sshd-session: ceph-admin [priv]"
             │ └─73142 "sshd-session: ceph-admin@notty"
             └─user@42477.service
               └─init.scope
                 ├─72610 /usr/lib/systemd/systemd --user
                 └─72612 "(sd-pam)"

Jan 22 15:45:39 compute-2 sudo[292367]: pam_unix(sudo:session): session closed for user root
Jan 22 15:45:39 compute-2 sudo[292414]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Jan 22 15:45:39 compute-2 sudo[292414]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Jan 22 15:45:39 compute-2 sudo[292414]: pam_unix(sudo:session): session closed for user root
Jan 22 15:45:59 compute-2 sudo[295523]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Jan 22 15:45:59 compute-2 sudo[295523]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Jan 22 15:45:59 compute-2 sudo[295523]: pam_unix(sudo:session): session closed for user root
Jan 22 15:45:59 compute-2 sudo[295565]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Jan 22 15:45:59 compute-2 sudo[295565]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Jan 22 15:45:59 compute-2 sudo[295565]: pam_unix(sudo:session): session closed for user root

● user.slice - User and Session Slice
     Loaded: loaded (/usr/lib/systemd/system/user.slice; static)
     Active: active since Thu 2026-01-22 12:49:14 UTC; 2h 56min ago
      Until: Thu 2026-01-22 12:49:14 UTC; 2h 56min ago
       Docs: man:systemd.special(7)
         IO: 581.7M read, 8.2G written
      Tasks: 51
     Memory: 2.8G (peak: 4.9G)
        CPU: 24min 30.875s
     CGroup: /user.slice
             ├─user-1000.slice
             │ ├─session-1.scope
             │ │ └─4517 /usr/bin/python3
             │ ├─session-51.scope
             │ │ ├─291623 "sshd-session: zuul [priv]"
             │ │ ├─291627 "sshd-session: zuul@notty"
             │ │ ├─291628 sudo bash -c "rm -rf /var/tmp/sos-osp && mkdir /var/tmp/sos-osp && sos report --batch --all-logs --tmp-dir=/var/tmp/sos-osp  -p container,openstack_edpm,system,storage,virt"
             │ │ ├─291652 /usr/bin/python3 -s /sbin/sos report --batch --all-logs --tmp-dir=/var/tmp/sos-osp -p container,openstack_edpm,system,storage,virt
             │ │ ├─297596 timeout 15s turbostat --debug sleep 10
             │ │ ├─298091 timeout 300s systemctl status --all
             │ │ ├─298093 systemctl status --all
             │ │ ├─298388 timeout 300s ceph pg dump --format json-pretty
             │ │ ├─298389 /usr/bin/python3 -s /usr/bin/ceph pg dump --format json-pretty
             │ │ ├─298392 timeout --foreground 300s virsh -r nodeinfo
             │ │ └─298393 virsh -r nodeinfo
             │ └─user@1000.service
             │   ├─app.slice
             │   │ └─dbus-broker.service
             │   │   ├─11936 /usr/bin/dbus-broker-launch --scope user
             │   │   └─11948 dbus-broker --log 4 --controller 9 --machine-id 85ac68c10a6e7ae08ceb898dbdca0cb5 --max-bytes 100000000000000 --max-fds 25000000000000 --max-matches 5000000000
             │   ├─init.scope
             │   │ ├─4305 /usr/lib/systemd/systemd --user
             │   │ └─4307 "(sd-pam)"
             │   └─user.slice
             │     └─podman-pause-3b1c51bd.scope
             │       └─11838 catatonit -P
             └─user-42477.slice
               ├─session-20.scope
               │ ├─72606 "sshd-session: ceph-admin [priv]"
               │ └─72628 "sshd-session: ceph-admin"
               ├─session-22.scope
               │ ├─72623 "sshd-session: ceph-admin [priv]"
               │ └─72629 "sshd-session: ceph-admin@notty"
               ├─session-23.scope
               │ ├─72680 "sshd-session: ceph-admin [priv]"
               │ └─72683 "sshd-session: ceph-admin@notty"
               ├─session-24.scope
               │ ├─72734 "sshd-session: ceph-admin [priv]"
               │ └─72737 "sshd-session: ceph-admin@notty"
               ├─session-25.scope
               │ ├─72788 "sshd-session: ceph-admin [priv]"
               │ └─72791 "sshd-session: ceph-admin@notty"
               ├─session-26.scope
               │ ├─72842 "sshd-session: ceph-admin [priv]"
               │ └─72845 "sshd-session: ceph-admin@notty"
               ├─session-27.scope
               │ ├─72896 "sshd-session: ceph-admin [priv]"
               │ └─72899 "sshd-session: ceph-admin@notty"
               ├─session-28.scope
               │ ├─72950 "sshd-session: ceph-admin [priv]"
               │ └─72953 "sshd-session: ceph-admin@notty"
               ├─session-29.scope
               │ ├─73004 "sshd-session: ceph-admin [priv]"
               │ └─73007 "sshd-session: ceph-admin@notty"
               ├─session-30.scope
               │ ├─73058 "sshd-session: ceph-admin [priv]"
               │ └─73061 "sshd-session: ceph-admin@notty"
               ├─session-31.scope
               │ ├─73085 "sshd-session: ceph-admin [priv]"
               │ └─73088 "sshd-session: ceph-admin@notty"
               ├─session-32.scope
               │ ├─73139 "sshd-session: ceph-admin [priv]"
               │ └─73142 "sshd-session: ceph-admin@notty"
               └─user@42477.service
                 └─init.scope
                   ├─72610 /usr/lib/systemd/systemd --user
                   └─72612 "(sd-pam)"

● dbus.socket - D-Bus System Message Bus Socket
     Loaded: loaded (/usr/lib/systemd/system/dbus.socket; enabled; preset: enabled)
     Active: active (running) since Thu 2026-01-22 12:49:15 UTC; 2h 56min ago
      Until: Thu 2026-01-22 12:49:15 UTC; 2h 56min ago
   Triggers: ● dbus-broker.service
     Listen: /run/dbus/system_bus_socket (Stream)
     CGroup: /system.slice/dbus.socket

Jan 22 12:49:15 localhost systemd[1]: Listening on D-Bus System Message Bus Socket.

● dm-event.socket - Device-mapper event daemon FIFOs
     Loaded: loaded (/usr/lib/systemd/system/dm-event.socket; enabled; preset: enabled)
     Active: active (listening) since Thu 2026-01-22 13:23:42 UTC; 2h 22min ago
      Until: Thu 2026-01-22 13:23:42 UTC; 2h 22min ago
   Triggers: ● dm-event.service
       Docs: man:dmeventd(8)
     Listen: /run/dmeventd-server (FIFO)
             /run/dmeventd-client (FIFO)
     CGroup: /system.slice/dm-event.socket

Jan 22 13:23:42 compute-2 systemd[1]: Listening on Device-mapper event daemon FIFOs.

● iscsid.socket - Open-iSCSI iscsid Socket
     Loaded: loaded (/usr/lib/systemd/system/iscsid.socket; enabled; preset: enabled)
     Active: active (running) since Thu 2026-01-22 13:52:38 UTC; 1h 53min ago
      Until: Thu 2026-01-22 13:52:38 UTC; 1h 53min ago
   Triggers: ● iscsid.service
       Docs: man:iscsid(8)
             man:iscsiadm(8)
     Listen: @ISCSIADM_ABSTRACT_NAMESPACE (Stream)
     CGroup: /system.slice/iscsid.socket

Jan 22 13:52:38 compute-2 systemd[1]: Listening on Open-iSCSI iscsid Socket.

○ iscsiuio.socket - Open-iSCSI iscsiuio Socket
     Loaded: loaded (/usr/lib/systemd/system/iscsiuio.socket; enabled; preset: enabled)
     Active: inactive (dead)
   Triggers: ● iscsiuio.service
       Docs: man:iscsiuio(8)
     Listen: @ISCSID_UIP_ABSTRACT_NAMESPACE (Stream)

○ libvirtd-admin.socket - libvirt legacy monolithic daemon admin socket
     Loaded: loaded (/usr/lib/systemd/system/libvirtd-admin.socket; disabled; preset: disabled)
     Active: inactive (dead)
   Triggers: ● libvirtd.service
     Listen: /run/libvirt/libvirt-admin-sock (Stream)

○ libvirtd-ro.socket - libvirt legacy monolithic daemon read-only socket
     Loaded: loaded (/usr/lib/systemd/system/libvirtd-ro.socket; disabled; preset: disabled)
     Active: inactive (dead)
   Triggers: ● libvirtd.service
     Listen: /run/libvirt/libvirt-sock-ro (Stream)

○ libvirtd-tls.socket
     Loaded: masked (Reason: Unit libvirtd-tls.socket is masked.)
     Active: inactive (dead)

○ libvirtd.socket - libvirt legacy monolithic daemon socket
     Loaded: loaded (/usr/lib/systemd/system/libvirtd.socket; disabled; preset: disabled)
     Active: inactive (dead)
   Triggers: ● libvirtd.service
     Listen: /run/libvirt/libvirt-sock (Stream)

● lvm2-lvmpolld.socket - LVM2 poll daemon socket
     Loaded: loaded (/usr/lib/systemd/system/lvm2-lvmpolld.socket; enabled; preset: enabled)
     Active: active (listening) since Thu 2026-01-22 13:23:43 UTC; 2h 22min ago
      Until: Thu 2026-01-22 13:23:43 UTC; 2h 22min ago
   Triggers: ● lvm2-lvmpolld.service
       Docs: man:lvmpolld(8)
     Listen: /run/lvm/lvmpolld.socket (Stream)
     CGroup: /system.slice/lvm2-lvmpolld.socket

Jan 22 13:23:43 compute-2 systemd[1]: Listening on LVM2 poll daemon socket.

● multipathd.socket - multipathd control socket
     Loaded: loaded (/usr/lib/systemd/system/multipathd.socket; enabled; preset: disabled)
     Active: active (running) since Thu 2026-01-22 13:53:16 UTC; 1h 52min ago
      Until: Thu 2026-01-22 13:53:16 UTC; 1h 52min ago
   Triggers: ● multipathd.service
     Listen: @/org/kernel/linux/storage/multipathd (Stream)
             /run/multipathd.socket (Stream)
     CGroup: /system.slice/multipathd.socket

Jan 22 13:53:16 compute-2 systemd[1]: Listening on multipathd control socket.

● rpcbind.socket - RPCbind Server Activation Socket
     Loaded: loaded (/usr/lib/systemd/system/rpcbind.socket; enabled; preset: enabled)
     Active: active (running) since Thu 2026-01-22 12:49:14 UTC; 2h 56min ago
      Until: Thu 2026-01-22 12:49:14 UTC; 2h 56min ago
   Triggers: ● rpcbind.service
     Listen: /run/rpcbind.sock (Stream)
             0.0.0.0:111 (Stream)
             0.0.0.0:111 (Datagram)
             [::]:111 (Stream)
             [::]:111 (Datagram)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48560)
     Memory: 16.0K (peak: 288.0K)
        CPU: 4ms
     CGroup: /system.slice/rpcbind.socket

● sssd-kcm.socket - SSSD Kerberos Cache Manager responder socket
     Loaded: loaded (/usr/lib/systemd/system/sssd-kcm.socket; enabled; preset: enabled)
     Active: active (listening) since Thu 2026-01-22 12:49:15 UTC; 2h 56min ago
      Until: Thu 2026-01-22 12:49:15 UTC; 2h 56min ago
   Triggers: ● sssd-kcm.service
       Docs: man:sssd-kcm(8)
     Listen: /run/.heim_org.h5l.kcm-socket (Stream)
     CGroup: /system.slice/sssd-kcm.socket

Jan 22 12:49:15 localhost systemd[1]: Listening on SSSD Kerberos Cache Manager responder socket.

○ syslog.socket - Syslog Socket
     Loaded: loaded (/usr/lib/systemd/system/syslog.socket; static)
     Active: inactive (dead)
   Triggers: ● syslog.service
       Docs: man:systemd.special(7)
             https://www.freedesktop.org/wiki/Software/systemd/syslog
     Listen: /run/systemd/journal/syslog (Datagram)

● systemd-coredump.socket - Process Core Dump Socket
     Loaded: loaded (/usr/lib/systemd/system/systemd-coredump.socket; static)
     Active: active (listening) since Thu 2026-01-22 12:49:14 UTC; 2h 56min ago
      Until: Thu 2026-01-22 12:49:14 UTC; 2h 56min ago
       Docs: man:systemd-coredump(8)
     Listen: /run/systemd/coredump (SequentialPacket)
   Accepted: 0; Connected: 0;
     CGroup: /system.slice/systemd-coredump.socket

● systemd-initctl.socket - initctl Compatibility Named Pipe
     Loaded: loaded (/usr/lib/systemd/system/systemd-initctl.socket; static)
     Active: active (listening) since Thu 2026-01-22 12:49:14 UTC; 2h 56min ago
      Until: Thu 2026-01-22 12:49:14 UTC; 2h 56min ago
   Triggers: ● systemd-initctl.service
       Docs: man:systemd-initctl.socket(8)
     Listen: /run/initctl (FIFO)
     CGroup: /system.slice/systemd-initctl.socket

● systemd-journald-dev-log.socket - Journal Socket (/dev/log)
     Loaded: loaded (/usr/lib/systemd/system/systemd-journald-dev-log.socket; static)
     Active: active (running) since Thu 2026-01-22 12:49:11 UTC; 2h 57min ago
      Until: Thu 2026-01-22 12:49:11 UTC; 2h 57min ago
   Triggers: ● systemd-journald.service
       Docs: man:systemd-journald.service(8)
             man:journald.conf(5)
     Listen: /run/systemd/journal/dev-log (Datagram)
     CGroup: /system.slice/systemd-journald-dev-log.socket

Notice: journal has been rotated since unit was started, output may be incomplete.

● systemd-journald.socket - Journal Socket
     Loaded: loaded (/usr/lib/systemd/system/systemd-journald.socket; static)
     Active: active (running) since Thu 2026-01-22 12:49:11 UTC; 2h 57min ago
      Until: Thu 2026-01-22 12:49:11 UTC; 2h 57min ago
   Triggers: ● systemd-journald.service
       Docs: man:systemd-journald.service(8)
             man:journald.conf(5)
     Listen: /run/systemd/journal/socket (Datagram)
             /run/systemd/journal/stdout (Stream)
     CGroup: /system.slice/systemd-journald.socket

Notice: journal has been rotated since unit was started, output may be incomplete.

● systemd-rfkill.socket - Load/Save RF Kill Switch Status /dev/rfkill Watch
     Loaded: loaded (/usr/lib/systemd/system/systemd-rfkill.socket; static)
     Active: active (listening) since Thu 2026-01-22 12:49:16 UTC; 2h 56min ago
      Until: Thu 2026-01-22 12:49:16 UTC; 2h 56min ago
   Triggers: ● systemd-rfkill.service
       Docs: man:systemd-rfkill.socket(8)
     Listen: /dev/rfkill (Special)
     CGroup: /system.slice/systemd-rfkill.socket

Jan 22 12:49:16 np0005592159.novalocal systemd[1]: Listening on Load/Save RF Kill Switch Status /dev/rfkill Watch.

● systemd-udevd-control.socket - udev Control Socket
     Loaded: loaded (/usr/lib/systemd/system/systemd-udevd-control.socket; static)
     Active: active (running) since Thu 2026-01-22 12:49:14 UTC; 2h 56min ago
      Until: Thu 2026-01-22 12:49:14 UTC; 2h 56min ago
   Triggers: ● systemd-udevd.service
       Docs: man:systemd-udevd-control.socket(8)
             man:udev(7)
     Listen: /run/udev/control (SequentialPacket)
     CGroup: /system.slice/systemd-udevd-control.socket

● systemd-udevd-kernel.socket - udev Kernel Socket
     Loaded: loaded (/usr/lib/systemd/system/systemd-udevd-kernel.socket; static)
     Active: active (running) since Thu 2026-01-22 12:49:14 UTC; 2h 56min ago
      Until: Thu 2026-01-22 12:49:14 UTC; 2h 56min ago
   Triggers: ● systemd-udevd.service
       Docs: man:systemd-udevd-kernel.socket(8)
             man:udev(7)
     Listen: kobject-uevent 1 (Netlink)
     CGroup: /system.slice/systemd-udevd-kernel.socket

○ virtinterfaced-admin.socket - libvirt interface daemon admin socket
     Loaded: loaded (/usr/lib/systemd/system/virtinterfaced-admin.socket; enabled; preset: enabled)
     Active: inactive (dead)
   Triggers: ● virtinterfaced.service
     Listen: /run/libvirt/virtinterfaced-admin-sock (Stream)

○ virtinterfaced-ro.socket - libvirt interface daemon read-only socket
     Loaded: loaded (/usr/lib/systemd/system/virtinterfaced-ro.socket; enabled; preset: enabled)
     Active: inactive (dead)
   Triggers: ● virtinterfaced.service
     Listen: /run/libvirt/virtinterfaced-sock-ro (Stream)

○ virtinterfaced.socket - libvirt interface daemon socket
     Loaded: loaded (/usr/lib/systemd/system/virtinterfaced.socket; enabled; preset: enabled)
     Active: inactive (dead)
   Triggers: ● virtinterfaced.service
     Listen: /run/libvirt/virtinterfaced-sock (Stream)

○ virtlockd-admin.socket - libvirt locking daemon admin socket
     Loaded: loaded (/usr/lib/systemd/system/virtlockd-admin.socket; enabled; preset: enabled)
     Active: inactive (dead)
   Triggers: ● virtlockd.service
     Listen: /run/libvirt/virtlockd-admin-sock (Stream)

● virtlockd.socket - libvirt locking daemon socket
     Loaded: loaded (/usr/lib/systemd/system/virtlockd.socket; enabled; preset: enabled)
     Active: active (listening) since Thu 2026-01-22 13:51:24 UTC; 1h 54min ago
      Until: Thu 2026-01-22 13:51:24 UTC; 1h 54min ago
   Triggers: ● virtlockd.service
     Listen: /run/libvirt/virtlockd-sock (Stream)
     CGroup: /system.slice/virtlockd.socket

Jan 22 13:51:24 compute-2 systemd[1]: Listening on libvirt locking daemon socket.

● virtlogd-admin.socket - libvirt logging daemon admin socket
     Loaded: loaded (/usr/lib/systemd/system/virtlogd-admin.socket; enabled; preset: enabled)
    Drop-In: /etc/systemd/system/virtlogd-admin.socket.d
             └─override.conf
     Active: active (running) since Thu 2026-01-22 13:51:21 UTC; 1h 54min ago
      Until: Thu 2026-01-22 13:51:21 UTC; 1h 54min ago
   Triggers: ● virtlogd.service
     Listen: /run/libvirt/virtlogd-admin-sock (Stream)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48560)
     Memory: 4.0K (peak: 512.0K)
        CPU: 3ms
     CGroup: /system.slice/virtlogd-admin.socket

Jan 22 13:51:21 compute-2 systemd[1]: Starting libvirt logging daemon admin socket...
Jan 22 13:51:21 compute-2 systemd[1]: Listening on libvirt logging daemon admin socket.

● virtlogd.socket - libvirt logging daemon socket
     Loaded: loaded (/usr/lib/systemd/system/virtlogd.socket; enabled; preset: enabled)
    Drop-In: /etc/systemd/system/virtlogd.socket.d
             └─override.conf
     Active: active (running) since Thu 2026-01-22 13:51:21 UTC; 1h 54min ago
      Until: Thu 2026-01-22 13:51:21 UTC; 1h 54min ago
   Triggers: ● virtlogd.service
     Listen: /run/libvirt/virtlogd-sock (Stream)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48560)
     Memory: 0B (peak: 512.0K)
        CPU: 3ms
     CGroup: /system.slice/virtlogd.socket

Jan 22 13:51:21 compute-2 systemd[1]: Starting libvirt logging daemon socket...
Jan 22 13:51:21 compute-2 systemd[1]: Listening on libvirt logging daemon socket.

○ virtnetworkd-admin.socket - libvirt network daemon admin socket
     Loaded: loaded (/usr/lib/systemd/system/virtnetworkd-admin.socket; enabled; preset: enabled)
     Active: inactive (dead)
   Triggers: ● virtnetworkd.service
     Listen: /run/libvirt/virtnetworkd-admin-sock (Stream)

○ virtnetworkd-ro.socket - libvirt network daemon read-only socket
     Loaded: loaded (/usr/lib/systemd/system/virtnetworkd-ro.socket; enabled; preset: enabled)
     Active: inactive (dead)
   Triggers: ● virtnetworkd.service
     Listen: /run/libvirt/virtnetworkd-sock-ro (Stream)

○ virtnetworkd.socket - libvirt network daemon socket
     Loaded: loaded (/usr/lib/systemd/system/virtnetworkd.socket; enabled; preset: enabled)
     Active: inactive (dead)
   Triggers: ● virtnetworkd.service
     Listen: /run/libvirt/virtnetworkd-sock (Stream)

● virtnodedevd-admin.socket - libvirt nodedev daemon admin socket
     Loaded: loaded (/usr/lib/systemd/system/virtnodedevd-admin.socket; enabled; preset: enabled)
    Drop-In: /etc/systemd/system/virtnodedevd-admin.socket.d
             └─override.conf
     Active: active (running) since Thu 2026-01-22 13:51:22 UTC; 1h 54min ago
      Until: Thu 2026-01-22 13:51:22 UTC; 1h 54min ago
   Triggers: ● virtnodedevd.service
     Listen: /run/libvirt/virtnodedevd-admin-sock (Stream)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48560)
     Memory: 4.0K (peak: 512.0K)
        CPU: 2ms
     CGroup: /system.slice/virtnodedevd-admin.socket

Jan 22 13:51:22 compute-2 systemd[1]: Starting libvirt nodedev daemon admin socket...
Jan 22 13:51:22 compute-2 systemd[1]: Listening on libvirt nodedev daemon admin socket.

● virtnodedevd-ro.socket - libvirt nodedev daemon read-only socket
     Loaded: loaded (/usr/lib/systemd/system/virtnodedevd-ro.socket; enabled; preset: enabled)
    Drop-In: /etc/systemd/system/virtnodedevd-ro.socket.d
             └─override.conf
     Active: active (running) since Thu 2026-01-22 13:51:22 UTC; 1h 54min ago
      Until: Thu 2026-01-22 13:51:22 UTC; 1h 54min ago
   Triggers: ● virtnodedevd.service
     Listen: /run/libvirt/virtnodedevd-sock-ro (Stream)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48560)
     Memory: 0B (peak: 512.0K)
        CPU: 2ms
     CGroup: /system.slice/virtnodedevd-ro.socket

Jan 22 13:51:22 compute-2 systemd[1]: Starting libvirt nodedev daemon read-only socket...
Jan 22 13:51:22 compute-2 systemd[1]: Listening on libvirt nodedev daemon read-only socket.

● virtnodedevd.socket - libvirt nodedev daemon socket
     Loaded: loaded (/usr/lib/systemd/system/virtnodedevd.socket; enabled; preset: enabled)
    Drop-In: /etc/systemd/system/virtnodedevd.socket.d
             └─override.conf
     Active: active (running) since Thu 2026-01-22 13:51:22 UTC; 1h 54min ago
      Until: Thu 2026-01-22 13:51:22 UTC; 1h 54min ago
   Triggers: ● virtnodedevd.service
     Listen: /run/libvirt/virtnodedevd-sock (Stream)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48560)
     Memory: 0B (peak: 512.0K)
        CPU: 2ms
     CGroup: /system.slice/virtnodedevd.socket

Jan 22 13:51:22 compute-2 systemd[1]: Starting libvirt nodedev daemon socket...
Jan 22 13:51:22 compute-2 systemd[1]: Listening on libvirt nodedev daemon socket.

○ virtnwfilterd-admin.socket - libvirt nwfilter daemon admin socket
     Loaded: loaded (/usr/lib/systemd/system/virtnwfilterd-admin.socket; enabled; preset: enabled)
     Active: inactive (dead)
   Triggers: ● virtnwfilterd.service
     Listen: /run/libvirt/virtnwfilterd-admin-sock (Stream)

○ virtnwfilterd-ro.socket - libvirt nwfilter daemon read-only socket
     Loaded: loaded (/usr/lib/systemd/system/virtnwfilterd-ro.socket; enabled; preset: enabled)
     Active: inactive (dead)
   Triggers: ● virtnwfilterd.service
     Listen: /run/libvirt/virtnwfilterd-sock-ro (Stream)

○ virtnwfilterd.socket - libvirt nwfilter daemon socket
     Loaded: loaded (/usr/lib/systemd/system/virtnwfilterd.socket; enabled; preset: enabled)
     Active: inactive (dead)
   Triggers: ● virtnwfilterd.service
     Listen: /run/libvirt/virtnwfilterd-sock (Stream)

● virtproxyd-admin.socket - libvirt proxy daemon admin socket
     Loaded: loaded (/usr/lib/systemd/system/virtproxyd-admin.socket; enabled; preset: enabled)
    Drop-In: /etc/systemd/system/virtproxyd-admin.socket.d
             └─override.conf
     Active: active (listening) since Thu 2026-01-22 13:51:23 UTC; 1h 54min ago
      Until: Thu 2026-01-22 13:51:23 UTC; 1h 54min ago
   Triggers: ● virtproxyd.service
     Listen: /run/libvirt/libvirt-admin-sock (Stream)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48560)
     Memory: 4.0K (peak: 512.0K)
        CPU: 3ms
     CGroup: /system.slice/virtproxyd-admin.socket

Jan 22 13:51:23 compute-2 systemd[1]: Starting libvirt proxy daemon admin socket...
Jan 22 13:51:23 compute-2 systemd[1]: Listening on libvirt proxy daemon admin socket.

● virtproxyd-ro.socket - libvirt proxy daemon read-only socket
     Loaded: loaded (/usr/lib/systemd/system/virtproxyd-ro.socket; enabled; preset: enabled)
    Drop-In: /etc/systemd/system/virtproxyd-ro.socket.d
             └─override.conf
     Active: active (listening) since Thu 2026-01-22 13:51:23 UTC; 1h 54min ago
      Until: Thu 2026-01-22 13:51:23 UTC; 1h 54min ago
   Triggers: ● virtproxyd.service
     Listen: /run/libvirt/libvirt-sock-ro (Stream)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48560)
     Memory: 0B (peak: 512.0K)
        CPU: 3ms
     CGroup: /system.slice/virtproxyd-ro.socket

Jan 22 13:51:23 compute-2 systemd[1]: Starting libvirt proxy daemon read-only socket...
Jan 22 13:51:23 compute-2 systemd[1]: Listening on libvirt proxy daemon read-only socket.

● virtproxyd-tls.socket - libvirt proxy daemon TLS IP socket
     Loaded: loaded (/usr/lib/systemd/system/virtproxyd-tls.socket; enabled; preset: disabled)
     Active: active (listening) since Thu 2026-01-22 13:50:05 UTC; 1h 56min ago
      Until: Thu 2026-01-22 13:50:05 UTC; 1h 56min ago
   Triggers: ● virtproxyd.service
     Listen: [::]:16514 (Stream)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48560)
     Memory: 8.0K (peak: 256.0K)
        CPU: 1ms
     CGroup: /system.slice/virtproxyd-tls.socket

Jan 22 13:50:05 compute-2 systemd[1]: Listening on libvirt proxy daemon TLS IP socket.

● virtproxyd.socket - libvirt proxy daemon socket
     Loaded: loaded (/usr/lib/systemd/system/virtproxyd.socket; enabled; preset: enabled)
    Drop-In: /etc/systemd/system/virtproxyd.socket.d
             └─override.conf
     Active: active (listening) since Thu 2026-01-22 13:50:05 UTC; 1h 56min ago
      Until: Thu 2026-01-22 13:50:05 UTC; 1h 56min ago
   Triggers: ● virtproxyd.service
     Listen: /run/libvirt/libvirt-sock (Stream)
     CGroup: /system.slice/virtproxyd.socket

Jan 22 13:50:05 compute-2 systemd[1]: Listening on libvirt proxy daemon socket.

● virtqemud-admin.socket - libvirt QEMU daemon admin socket
     Loaded: loaded (/usr/lib/systemd/system/virtqemud-admin.socket; enabled; preset: enabled)
    Drop-In: /etc/systemd/system/virtqemud-admin.socket.d
             └─override.conf
     Active: active (running) since Thu 2026-01-22 13:51:25 UTC; 1h 54min ago
      Until: Thu 2026-01-22 13:51:25 UTC; 1h 54min ago
   Triggers: ● virtqemud.service
     Listen: /run/libvirt/virtqemud-admin-sock (Stream)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48560)
     Memory: 4.0K (peak: 512.0K)
        CPU: 2ms
     CGroup: /system.slice/virtqemud-admin.socket

Jan 22 13:51:25 compute-2 systemd[1]: Starting libvirt QEMU daemon admin socket...
Jan 22 13:51:25 compute-2 systemd[1]: Listening on libvirt QEMU daemon admin socket.

● virtqemud-ro.socket - libvirt QEMU daemon read-only socket
     Loaded: loaded (/usr/lib/systemd/system/virtqemud-ro.socket; enabled; preset: enabled)
    Drop-In: /etc/systemd/system/virtqemud-ro.socket.d
             └─override.conf
     Active: active (running) since Thu 2026-01-22 13:51:25 UTC; 1h 54min ago
      Until: Thu 2026-01-22 13:51:25 UTC; 1h 54min ago
   Triggers: ● virtqemud.service
     Listen: /run/libvirt/virtqemud-sock-ro (Stream)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48560)
     Memory: 4.0K (peak: 512.0K)
        CPU: 2ms
     CGroup: /system.slice/virtqemud-ro.socket

Jan 22 13:51:25 compute-2 systemd[1]: Starting libvirt QEMU daemon read-only socket...
Jan 22 13:51:25 compute-2 systemd[1]: Listening on libvirt QEMU daemon read-only socket.

● virtqemud.socket - libvirt QEMU daemon socket
     Loaded: loaded (/usr/lib/systemd/system/virtqemud.socket; enabled; preset: enabled)
    Drop-In: /etc/systemd/system/virtqemud.socket.d
             └─override.conf
     Active: active (running) since Thu 2026-01-22 13:51:25 UTC; 1h 54min ago
      Until: Thu 2026-01-22 13:51:25 UTC; 1h 54min ago
   Triggers: ● virtqemud.service
     Listen: /run/libvirt/virtqemud-sock (Stream)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48560)
     Memory: 4.0K (peak: 512.0K)
        CPU: 3ms
     CGroup: /system.slice/virtqemud.socket

Jan 22 13:51:24 compute-2 systemd[1]: Starting libvirt QEMU daemon socket...
Jan 22 13:51:25 compute-2 systemd[1]: Listening on libvirt QEMU daemon socket.

● virtsecretd-admin.socket - libvirt secret daemon admin socket
     Loaded: loaded (/usr/lib/systemd/system/virtsecretd-admin.socket; enabled; preset: enabled)
    Drop-In: /etc/systemd/system/virtsecretd-admin.socket.d
             └─override.conf
     Active: active (running) since Thu 2026-01-22 13:51:26 UTC; 1h 54min ago
      Until: Thu 2026-01-22 13:51:26 UTC; 1h 54min ago
   Triggers: ● virtsecretd.service
     Listen: /run/libvirt/virtsecretd-admin-sock (Stream)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48560)
     Memory: 0B (peak: 404.0K)
        CPU: 6ms
     CGroup: /system.slice/virtsecretd-admin.socket

Jan 22 13:51:26 compute-2 systemd[1]: Starting libvirt secret daemon admin socket...
Jan 22 13:51:26 compute-2 systemd[1]: Listening on libvirt secret daemon admin socket.

● virtsecretd-ro.socket - libvirt secret daemon read-only socket
     Loaded: loaded (/usr/lib/systemd/system/virtsecretd-ro.socket; enabled; preset: enabled)
    Drop-In: /etc/systemd/system/virtsecretd-ro.socket.d
             └─override.conf
     Active: active (running) since Thu 2026-01-22 13:51:26 UTC; 1h 54min ago
      Until: Thu 2026-01-22 13:51:26 UTC; 1h 54min ago
   Triggers: ● virtsecretd.service
     Listen: /run/libvirt/virtsecretd-sock-ro (Stream)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48560)
     Memory: 0B (peak: 512.0K)
        CPU: 3ms
     CGroup: /system.slice/virtsecretd-ro.socket

Jan 22 13:51:26 compute-2 systemd[1]: Starting libvirt secret daemon read-only socket...
Jan 22 13:51:26 compute-2 systemd[1]: Listening on libvirt secret daemon read-only socket.

● virtsecretd.socket - libvirt secret daemon socket
     Loaded: loaded (/usr/lib/systemd/system/virtsecretd.socket; enabled; preset: enabled)
    Drop-In: /etc/systemd/system/virtsecretd.socket.d
             └─override.conf
     Active: active (running) since Thu 2026-01-22 13:51:26 UTC; 1h 54min ago
      Until: Thu 2026-01-22 13:51:26 UTC; 1h 54min ago
   Triggers: ● virtsecretd.service
     Listen: /run/libvirt/virtsecretd-sock (Stream)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48560)
     Memory: 0B (peak: 580.0K)
        CPU: 5ms
     CGroup: /system.slice/virtsecretd.socket

Jan 22 13:51:26 compute-2 systemd[1]: Starting libvirt secret daemon socket...
Jan 22 13:51:26 compute-2 systemd[1]: Listening on libvirt secret daemon socket.

○ virtstoraged-admin.socket - libvirt storage daemon admin socket
     Loaded: loaded (/usr/lib/systemd/system/virtstoraged-admin.socket; enabled; preset: enabled)
     Active: inactive (dead)
   Triggers: ● virtstoraged.service
     Listen: /run/libvirt/virtstoraged-admin-sock (Stream)

○ virtstoraged-ro.socket - libvirt storage daemon read-only socket
     Loaded: loaded (/usr/lib/systemd/system/virtstoraged-ro.socket; enabled; preset: enabled)
     Active: inactive (dead)
   Triggers: ● virtstoraged.service
     Listen: /run/libvirt/virtstoraged-sock-ro (Stream)

○ virtstoraged.socket - libvirt storage daemon socket
     Loaded: loaded (/usr/lib/systemd/system/virtstoraged.socket; enabled; preset: enabled)
     Active: inactive (dead)
   Triggers: ● virtstoraged.service
     Listen: /run/libvirt/virtstoraged-sock (Stream)

● swap.swap - /swap
     Loaded: loaded (/etc/fstab; generated)
     Active: active since Thu 2026-01-22 13:25:54 UTC; 2h 20min ago
      Until: Thu 2026-01-22 13:25:54 UTC; 2h 20min ago
       What: /swap
       Docs: man:fstab(5)
             man:systemd-fstab-generator(8)

● basic.target - Basic System
     Loaded: loaded (/usr/lib/systemd/system/basic.target; static)
     Active: active since Thu 2026-01-22 12:49:15 UTC; 2h 56min ago
      Until: Thu 2026-01-22 12:49:15 UTC; 2h 56min ago
       Docs: man:systemd.special(7)

Jan 22 12:49:15 localhost systemd[1]: Reached target Basic System.

○ blockdev@dev-disk-by\x2duuid-22ac9141\x2d3960\x2d4912\x2db20e\x2d19fc8a328d40.target - Block Device Preparation for /dev/disk/by-uuid/22ac9141-3960-4912-b20e-19fc8a328d40
     Loaded: loaded (/usr/lib/systemd/system/blockdev@.target; static)
     Active: inactive (dead)
       Docs: man:systemd.special(7)

○ blockdev@dev-vda1.target - Block Device Preparation for /dev/vda1
     Loaded: loaded (/usr/lib/systemd/system/blockdev@.target; static)
     Active: inactive (dead)
       Docs: man:systemd.special(7)

● ceph-088fe176-0106-5401-803c-2da38b73b76a.target - Ceph cluster 088fe176-0106-5401-803c-2da38b73b76a
     Loaded: loaded (/etc/systemd/system/ceph-088fe176-0106-5401-803c-2da38b73b76a.target; enabled; preset: disabled)
     Active: active since Thu 2026-01-22 13:35:28 UTC; 2h 10min ago
      Until: Thu 2026-01-22 13:35:28 UTC; 2h 10min ago

Jan 22 13:35:28 compute-2 systemd[1]: Reached target Ceph cluster 088fe176-0106-5401-803c-2da38b73b76a.

● ceph.target - All Ceph clusters and services
     Loaded: loaded (/etc/systemd/system/ceph.target; enabled; preset: disabled)
     Active: active since Thu 2026-01-22 13:35:27 UTC; 2h 10min ago
      Until: Thu 2026-01-22 13:35:27 UTC; 2h 10min ago

Jan 22 13:35:27 compute-2 systemd[1]: Reached target All Ceph clusters and services.

● cloud-config.target - Cloud-config availability
     Loaded: loaded (/usr/lib/systemd/system/cloud-config.target; static)
     Active: active since Thu 2026-01-22 12:49:18 UTC; 2h 56min ago
      Until: Thu 2026-01-22 12:49:18 UTC; 2h 56min ago

Jan 22 12:49:18 np0005592159.novalocal systemd[1]: Reached target Cloud-config availability.

● cloud-init.target - Cloud-init target
     Loaded: loaded (/usr/lib/systemd/system/cloud-init.target; enabled-runtime; preset: disabled)
     Active: active since Thu 2026-01-22 12:49:19 UTC; 2h 56min ago
      Until: Thu 2026-01-22 12:49:19 UTC; 2h 56min ago

Jan 22 12:49:19 np0005592159.novalocal systemd[1]: Reached target Cloud-init target.

○ cryptsetup-pre.target - Local Encrypted Volumes (Pre)
     Loaded: loaded (/usr/lib/systemd/system/cryptsetup-pre.target; static)
     Active: inactive (dead)
       Docs: man:systemd.special(7)

● cryptsetup.target - Local Encrypted Volumes
     Loaded: loaded (/usr/lib/systemd/system/cryptsetup.target; static)
     Active: active since Thu 2026-01-22 12:49:14 UTC; 2h 56min ago
      Until: Thu 2026-01-22 12:49:14 UTC; 2h 56min ago
       Docs: man:systemd.special(7)

● edpm_libvirt.target
     Loaded: loaded (/etc/systemd/system/edpm_libvirt.target; static)
     Active: active since Thu 2026-01-22 13:52:05 UTC; 1h 54min ago
      Until: Thu 2026-01-22 13:52:05 UTC; 1h 54min ago

Jan 22 13:52:05 compute-2 systemd[1]: Reached target edpm_libvirt.target.

○ emergency.target - Emergency Mode
     Loaded: loaded (/usr/lib/systemd/system/emergency.target; static)
     Active: inactive (dead)
       Docs: man:systemd.special(7)

○ first-boot-complete.target - First Boot Complete
     Loaded: loaded (/usr/lib/systemd/system/first-boot-complete.target; static)
     Active: inactive (dead)
  Condition: start condition failed at Thu 2026-01-22 12:49:14 UTC; 2h 56min ago
       Docs: man:systemd.special(7)

Jan 22 12:49:14 localhost systemd[1]: First Boot Complete was skipped because of an unmet condition check (ConditionFirstBoot=yes).

○ getty-pre.target - Preparation for Logins
     Loaded: loaded (/usr/lib/systemd/system/getty-pre.target; static)
     Active: inactive (dead)
       Docs: man:systemd.special(7)
             man:systemd-getty-generator(8)
             http://0pointer.de/blog/projects/serial-console.html

● getty.target - Login Prompts
     Loaded: loaded (/usr/lib/systemd/system/getty.target; static)
     Active: active since Thu 2026-01-22 12:49:18 UTC; 2h 56min ago
      Until: Thu 2026-01-22 12:49:18 UTC; 2h 56min ago
       Docs: man:systemd.special(7)
             man:systemd-getty-generator(8)
             http://0pointer.de/blog/projects/serial-console.html

Jan 22 12:49:18 np0005592159.novalocal systemd[1]: Reached target Login Prompts.

○ graphical.target - Graphical Interface
     Loaded: loaded (/usr/lib/systemd/system/graphical.target; static)
     Active: inactive (dead)
       Docs: man:systemd.special(7)

○ initrd-fs.target - Initrd File Systems
     Loaded: loaded (/usr/lib/systemd/system/initrd-fs.target; static)
     Active: inactive (dead) since Thu 2026-01-22 12:49:14 UTC; 2h 56min ago
       Docs: man:systemd.special(7)

Jan 22 12:49:13 localhost systemd[1]: Reached target Initrd File Systems.

○ initrd-root-device.target - Initrd Root Device
     Loaded: loaded (/usr/lib/systemd/system/initrd-root-device.target; static)
     Active: inactive (dead) since Thu 2026-01-22 12:49:13 UTC; 2h 57min ago
       Docs: man:systemd.special(7)

Jan 22 12:49:12 localhost systemd[1]: Reached target Initrd Root Device.
Jan 22 12:49:13 localhost systemd[1]: Stopped target Initrd Root Device.

○ initrd-root-fs.target - Initrd Root File System
     Loaded: loaded (/usr/lib/systemd/system/initrd-root-fs.target; static)
     Active: inactive (dead) since Thu 2026-01-22 12:49:14 UTC; 2h 56min ago
       Docs: man:systemd.special(7)

Jan 22 12:49:13 localhost systemd[1]: Reached target Initrd Root File System.

○ initrd-switch-root.target - Switch Root
     Loaded: loaded (/usr/lib/systemd/system/initrd-switch-root.target; static)
     Active: inactive (dead) since Thu 2026-01-22 12:49:14 UTC; 2h 56min ago

Jan 22 12:49:13 localhost systemd[1]: Reached target Switch Root.

○ initrd-usr-fs.target - Initrd /usr File System
     Loaded: loaded (/usr/lib/systemd/system/initrd-usr-fs.target; static)
     Active: inactive (dead) since Thu 2026-01-22 12:49:13 UTC; 2h 57min ago
       Docs: man:systemd.special(7)

Jan 22 12:49:13 localhost systemd[1]: Stopped target Initrd /usr File System.
Notice: journal has been rotated since unit was started, output may be incomplete.

○ initrd.target - Initrd Default Target
     Loaded: loaded (/usr/lib/systemd/system/initrd.target; static)
     Active: inactive (dead) since Thu 2026-01-22 12:49:13 UTC; 2h 57min ago
       Docs: man:systemd.special(7)

Jan 22 12:49:13 localhost systemd[1]: Reached target Initrd Default Target.
Jan 22 12:49:13 localhost systemd[1]: Stopped target Initrd Default Target.

● integritysetup.target - Local Integrity Protected Volumes
     Loaded: loaded (/usr/lib/systemd/system/integritysetup.target; static)
     Active: active since Thu 2026-01-22 12:49:14 UTC; 2h 56min ago
      Until: Thu 2026-01-22 12:49:14 UTC; 2h 56min ago
       Docs: man:systemd.special(7)

● local-fs-pre.target - Preparation for Local File Systems
     Loaded: loaded (/usr/lib/systemd/system/local-fs-pre.target; static)
     Active: active since Thu 2026-01-22 12:49:14 UTC; 2h 56min ago
      Until: Thu 2026-01-22 12:49:14 UTC; 2h 56min ago
       Docs: man:systemd.special(7)

Jan 22 12:49:14 localhost systemd[1]: Reached target Preparation for Local File Systems.

● local-fs.target - Local File Systems
     Loaded: loaded (/usr/lib/systemd/system/local-fs.target; static)
     Active: active since Thu 2026-01-22 12:49:14 UTC; 2h 56min ago
      Until: Thu 2026-01-22 12:49:14 UTC; 2h 56min ago
       Docs: man:systemd.special(7)

Jan 22 12:49:14 localhost systemd[1]: Reached target Local File Systems.

● multi-user.target - Multi-User System
     Loaded: loaded (/usr/lib/systemd/system/multi-user.target; indirect; preset: disabled)
     Active: active since Thu 2026-01-22 12:49:18 UTC; 2h 56min ago
      Until: Thu 2026-01-22 12:49:18 UTC; 2h 56min ago
       Docs: man:systemd.special(7)

Jan 22 12:49:18 np0005592159.novalocal systemd[1]: Reached target Multi-User System.

● network-online.target - Network is Online
     Loaded: loaded (/usr/lib/systemd/system/network-online.target; static)
     Active: active since Thu 2026-01-22 12:49:18 UTC; 2h 56min ago
      Until: Thu 2026-01-22 12:49:18 UTC; 2h 56min ago
       Docs: man:systemd.special(7)
             https://systemd.io/NETWORK_ONLINE

Jan 22 12:49:18 np0005592159.novalocal systemd[1]: Reached target Network is Online.

● network-pre.target - Preparation for Network
     Loaded: loaded (/usr/lib/systemd/system/network-pre.target; static)
     Active: active since Thu 2026-01-22 12:49:16 UTC; 2h 56min ago
      Until: Thu 2026-01-22 12:49:16 UTC; 2h 56min ago
       Docs: man:systemd.special(7)
             https://systemd.io/NETWORK_ONLINE

Jan 22 12:49:16 np0005592159.novalocal systemd[1]: Reached target Preparation for Network.

● network.target - Network
     Loaded: loaded (/usr/lib/systemd/system/network.target; static)
     Active: active since Thu 2026-01-22 12:49:16 UTC; 2h 56min ago
      Until: Thu 2026-01-22 12:49:16 UTC; 2h 56min ago
       Docs: man:systemd.special(7)
             https://systemd.io/NETWORK_ONLINE

Jan 22 12:49:16 np0005592159.novalocal systemd[1]: Reached target Network.

● nfs-client.target - NFS client services
     Loaded: loaded (/usr/lib/systemd/system/nfs-client.target; enabled; preset: disabled)
     Active: active since Thu 2026-01-22 12:49:16 UTC; 2h 56min ago
      Until: Thu 2026-01-22 12:49:16 UTC; 2h 56min ago

Jan 22 12:49:16 np0005592159.novalocal systemd[1]: Reached target NFS client services.

○ nss-lookup.target - Host and Network Name Lookups
     Loaded: loaded (/usr/lib/systemd/system/nss-lookup.target; static)
     Active: inactive (dead)
       Docs: man:systemd.special(7)

● nss-user-lookup.target - User and Group Name Lookups
     Loaded: loaded (/usr/lib/systemd/system/nss-user-lookup.target; static)
     Active: active since Thu 2026-01-22 12:49:15 UTC; 2h 56min ago
      Until: Thu 2026-01-22 12:49:15 UTC; 2h 56min ago
       Docs: man:systemd.special(7)

Jan 22 12:49:15 localhost systemd[1]: Reached target User and Group Name Lookups.

● paths.target - Path Units
     Loaded: loaded (/usr/lib/systemd/system/paths.target; static)
     Active: active since Thu 2026-01-22 12:49:14 UTC; 2h 56min ago
      Until: Thu 2026-01-22 12:49:14 UTC; 2h 56min ago
       Docs: man:systemd.special(7)

○ remote-cryptsetup.target - Remote Encrypted Volumes
     Loaded: loaded (/usr/lib/systemd/system/remote-cryptsetup.target; disabled; preset: enabled)
     Active: inactive (dead) since Thu 2026-01-22 12:49:13 UTC; 2h 57min agUnit syslog.target could not be found.
o
       Docs: man:systemd.special(7)

Jan 22 12:49:12 localhost systemd[1]: Reached target Remote Encrypted Volumes.
Jan 22 12:49:13 localhost systemd[1]: Stopped target Remote Encrypted Volumes.

● remote-fs-pre.target - Preparation for Remote File Systems
     Loaded: loaded (/usr/lib/systemd/system/remote-fs-pre.target; static)
     Active: active since Thu 2026-01-22 12:49:16 UTC; 2h 56min ago
      Until: Thu 2026-01-22 12:49:16 UTC; 2h 56min ago
       Docs: man:systemd.special(7)

Jan 22 12:49:16 np0005592159.novalocal systemd[1]: Reached target Preparation for Remote File Systems.

● remote-fs.target - Remote File Systems
     Loaded: loaded (/usr/lib/systemd/system/remote-fs.target; enabled; preset: enabled)
     Active: active since Thu 2026-01-22 12:49:16 UTC; 2h 56min ago
      Until: Thu 2026-01-22 12:49:16 UTC; 2h 56min ago
       Docs: man:systemd.special(7)

Jan 22 12:49:16 np0005592159.novalocal systemd[1]: Reached target Remote File Systems.

○ remote-veritysetup.target - Remote Verity Protected Volumes
     Loaded: loaded (/usr/lib/systemd/system/remote-veritysetup.target; disabled; preset: disabled)
     Active: inactive (dead)
       Docs: man:systemd.special(7)

○ rescue.target - Rescue Mode
     Loaded: loaded (/usr/lib/systemd/system/rescue.target; static)
     Active: inactive (dead)
       Docs: man:systemd.special(7)

● rpc_pipefs.target
     Loaded: loaded (/usr/lib/systemd/system/rpc_pipefs.target; static)
     Active: active since Thu 2026-01-22 12:49:14 UTC; 2h 56min ago
      Until: Thu 2026-01-22 12:49:14 UTC; 2h 56min ago

● rpcbind.target - RPC Port Mapper
     Loaded: loaded (/usr/lib/systemd/system/rpcbind.target; static)
     Active: active since Thu 2026-01-22 12:49:14 UTC; 2h 56min ago
      Until: Thu 2026-01-22 12:49:14 UTC; 2h 56min ago
       Docs: man:systemd.special(7)

○ shutdown.target - System Shutdown
     Loaded: loaded (/usr/lib/systemd/system/shutdown.target; static)
     Active: inactive (dead)
       Docs: man:systemd.special(7)

● slices.target - Slice Units
     Loaded: loaded (/usr/lib/systemd/system/slices.target; static)
     Active: active since Thu 2026-01-22 12:49:14 UTC; 2h 56min ago
      Until: Thu 2026-01-22 12:49:14 UTC; 2h 56min ago
       Docs: man:systemd.special(7)

● sockets.target - Socket Units
     Loaded: loaded (/usr/lib/systemd/system/sockets.target; static)
     Active: active since Thu 2026-01-22 12:49:15 UTC; 2h 56min ago
      Until: Thu 2026-01-22 12:49:15 UTC; 2h 56min ago
       Docs: man:systemd.special(7)

Jan 22 12:49:15 localhost systemd[1]: Reached target Socket Units.

● sshd-keygen.target
     Loaded: loaded (/usr/lib/systemd/system/sshd-keygen.target; static)
     Active: active since Thu 2026-01-22 13:49:31 UTC; 1h 56min ago
      Until: Thu 2026-01-22 13:49:31 UTC; 1h 56min ago

Jan 22 13:49:31 compute-2 systemd[1]: Reached target sshd-keygen.target.

● swap.target - Swaps
     Loaded: loaded (/usr/lib/systemd/system/swap.target; static)
     Active: active since Thu 2026-01-22 12:49:14 UTC; 2h 56min ago
      Until: Thu 2026-01-22 12:49:14 UTC; 2h 56min ago
       Docs: man:systemd.special(7)

● sysinit.target - System Initialization
     Loaded: loaded (/usr/lib/systemd/system/sysinit.target; static)
     Active: active since Thu 2026-01-22 12:49:15 UTC; 2h 56min ago
      Until: Thu 2026-01-22 12:49:15 UTC; 2h 56min ago
       Docs: man:systemd.special(7)

Jan 22 12:49:15 localhost systemd[1]: Reached target System Initialization.

● time-set.target - System Time Set
     Loaded: loaded (/usr/lib/systemd/system/time-set.target; static)
     Active: active since Thu 2026-01-22 13:35:28 UTC; 2h 10min ago
      Until: Thu 2026-01-22 13:35:28 UTC; 2h 10min ago
       Docs: man:systemd.special(7)

Jan 22 13:35:28 compute-2 systemd[1]: Reached target System Time Set.

● time-sync.target - System Time Synchronized
     Loaded: loaded (/usr/lib/systemd/system/time-sync.target; static)
     Active: active since Thu 2026-01-22 13:35:28 UTC; 2h 10min ago
      Until: Thu 2026-01-22 13:35:28 UTC; 2h 10min ago
       Docs: man:systemd.special(7)

Jan 22 13:35:28 compute-2 systemd[1]: Reached target System Time Synchronized.

● timers.target - Timer Units
     Loaded: loaded (/usr/lib/systemd/system/timers.target; static)
     Active: active since Thu 2026-01-22 12:49:15 UTC; 2h 56min ago
      Until: Thu 2026-01-22 12:49:15 UTC; 2h 56min ago
       Docs: man:systemd.special(7)

Jan 22 12:49:15 localhost systemd[1]: Reached target Timer Units.

○ umount.target - Unmount All Filesystems
     Loaded: loaded (/usr/lib/systemd/system/umount.target; static)
     Active: inactive (dead)
       Docs: man:systemd.special(7)

○ veritysetup-pre.target - Local Verity Protected Volumes (Pre)
     Loaded: loaded (/usr/lib/systemd/system/veritysetup-pre.target; static)
     Active: inactive (dead)
       Docs: man:systemd.special(7)

● veritysetup.target - Local Verity Protected Volumes
     Loaded: loaded (/usr/lib/systemd/system/veritysetup.target; static)
     Active: active since Thu 2026-01-22 12:49:14 UTC; 2h 56min ago
      Until: Thu 2026-01-22 12:49:14 UTC; 2h 56min ago
       Docs: man:systemd.special(7)

○ virt-guest-shutdown.target - Libvirt guests shutdown
     Loaded: loaded (/etc/systemd/system/virt-guest-shutdown.target; static)
     Active: inactive (dead)
       Docs: https://libvirt.org

● 65cda04b9c9e71d648ab5510147314c4de15a37ca8d4a48196c50c9ad6ccb44d-4facdc8abbad11c7.timer - /usr/bin/podman healthcheck run 65cda04b9c9e71d648ab5510147314c4de15a37ca8d4a48196c50c9ad6ccb44d
     Loaded: loaded (/run/systemd/transient/65cda04b9c9e71d648ab5510147314c4de15a37ca8d4a48196c50c9ad6ccb44d-4facdc8abbad11c7.timer; transient)
  Transient: yes
     Active: active (waiting) since Thu 2026-01-22 13:46:45 UTC; 1h 59min ago
      Until: Thu 2026-01-22 13:46:45 UTC; 1h 59min ago
    Trigger: Thu 2026-01-22 15:46:38 UTC; 24s left
   Triggers: ● 65cda04b9c9e71d648ab5510147314c4de15a37ca8d4a48196c50c9ad6ccb44d-4facdc8abbad11c7.service

Jan 22 13:46:45 compute-2 systemd[1]: Started /usr/bin/podman healthcheck run 65cda04b9c9e71d648ab5510147314c4de15a37ca8d4a48196c50c9ad6ccb44d.

● 8eec14eed05eebd169934b14ad23738a7c696fad3b7be75ce9a652966539c356-44e4d69ad703dadb.timer - /usr/bin/podman healthcheck run 8eec14eed05eebd169934b14ad23738a7c696fad3b7be75ce9a652966539c356
     Loaded: loaded (/run/systemd/transient/8eec14eed05eebd169934b14ad23738a7c696fad3b7be75ce9a652966539c356-44e4d69ad703dadb.timer; transient)
  Transient: yes
     Active: active (waiting) since Thu 2026-01-22 13:45:27 UTC; 2h 0min ago
      Until: Thu 2026-01-22 13:45:27 UTC; 2h 0min ago
    Trigger: Thu 2026-01-22 15:46:27 UTC; 13s left
   Triggers: ● 8eec14eed05eebd169934b14ad23738a7c696fad3b7be75ce9a652966539c356-44e4d69ad703dadb.service

Jan 22 13:45:27 compute-2 systemd[1]: Started /usr/bin/podman healthcheck run 8eec14eed05eebd169934b14ad23738a7c696fad3b7be75ce9a652966539c356.

● dnf-makecache.timer - dnf makecache --timer
     Loaded: loaded (/usr/lib/systemd/system/dnf-makecache.timer; enabled; preset: enabled)
     Active: active (waiting) since Thu 2026-01-22 12:49:15 UTC; 2h 56min ago
      Until: Thu 2026-01-22 12:49:15 UTC; 2h 56min ago
    Trigger: Thu 2026-01-22 16:18:49 UTC; 32min left
   Triggers: ● dnf-makecache.service

Jan 22 12:49:15 localhost systemd[1]: Started dnf makecache --timer.

● logrotate.timer - Daily rotation of log files
     Loaded: loaded (/usr/lib/systemd/system/logrotate.timer; enabled; preset: enabled)
     Active: active (waiting) since Thu 2026-01-22 12:49:15 UTC; 2h 56min ago
      Until: Thu 2026-01-22 12:49:15 UTC; 2h 56min ago
    Trigger: Fri 2026-01-23 00:00:00 UTC; 8h left
   Triggers: ● logrotate.service
       Docs: man:logrotate(8)
             man:logrotate.conf(5)

Jan 22 12:49:15 localhost systemd[1]: Started Daily rotation of log files.

○ raid-check.timer - Weekly RAID setup health check
     Loaded: loaded (/usr/lib/systemd/system/raid-check.timer; enabled; preset: enabled)
     Active: inactive (dead)
    Trigger: n/a
   Triggers: ● raid-check.service

○ sysstat-collect.timer - Run system activity accounting tool every 10 minutes
     Loaded: loaded (/usr/lib/systemd/system/sysstat-collect.timer; enabled; preset: disabled)
     Active: inactive (dead)
    Trigger: n/a
   Triggers: ● sysstat-collect.service

○ sysstat-summary.timer - Generate summary of yesterday's process accounting
     Loaded: loaded (/usr/lib/systemd/system/sysstat-summary.timer; enabled; preset: disabled)
     Active: inactive (dead)
    Trigger: n/a
   Triggers: ● sysstat-summary.service

● systemd-tmpfiles-clean.timer - Daily Cleanup of Temporary Directories
     Loaded: loaded (/usr/lib/systemd/system/systemd-tmpfiles-clean.timer; static)
     Active: active (waiting) since Thu 2026-01-22 12:49:15 UTC; 2h 56min ago
      Until: Thu 2026-01-22 12:49:15 UTC; 2h 56min ago
    Trigger: Fri 2026-01-23 13:04:25 UTC; 21h left
   Triggers: ● systemd-tmpfiles-clean.service
       Docs: man:tmpfiles.d(5)
             man:systemd-tmpfiles(8)

Jan 22 12:49:15 localhost systemd[1]: Started Daily Cleanup of Temporary Directories.

● unbound-anchor.timer - daily update of the root trust anchor for DNSSEC
     Loaded: loaded (/usr/lib/systemd/system/unbound-anchor.timer; enabled; preset: enabled)
     Active: active (waiting) since Thu 2026-01-22 13:26:26 UTC; 2h 19min ago
      Until: Thu 2026-01-22 13:26:26 UTC; 2h 19min ago
    Trigger: Fri 2026-01-23 00:00:00 UTC; 8h left
   Triggers: ● unbound-anchor.service
       Docs: man:unbound-anchor(8)

Jan 22 13:26:26 compute-2 systemd[1]: Started daily update of the root trust anchor for DNSSEC.
