● compute-0
    State: running
    Units: 386 loaded (incl. loaded aliases)
     Jobs: 0 queued
   Failed: 0 units
    Since: Tue 2025-11-25 09:43:53 UTC; 39min ago
  systemd: 252-59.el9
   CGroup: /
           ├─108356 turbostat --debug sleep 10
           ├─108359 sleep 10
           ├─init.scope
           │ └─1 /usr/lib/systemd/systemd --switched-root --system --deserialize 31
           ├─system.slice
           │ ├─NetworkManager.service
           │ │ └─49043 /usr/sbin/NetworkManager --no-daemon
           │ ├─auditd.service
           │ │ ├─700 /sbin/auditd
           │ │ └─702 /usr/sbin/sedispatch
           │ ├─chronyd.service
           │ │ └─58618 /usr/sbin/chronyd -F 2
           │ ├─crond.service
           │ │ ├─ 1012 /usr/sbin/crond -n
           │ │ └─30899 /usr/sbin/anacron -s
           │ ├─dbus-broker.service
           │ │ ├─810 /usr/bin/dbus-broker-launch --scope system --audit
           │ │ └─811 dbus-broker --log 4 --controller 9 --machine-id fee38d0f94bf6f4b17ec77ba536bd6ab --max-bytes 536870912 --max-fds 4096 --max-matches 131072 --audit
           │ ├─gssproxy.service
           │ │ └─885 /usr/sbin/gssproxy -D
           │ ├─irqbalance.service
           │ │ └─816 /usr/sbin/irqbalance
           │ ├─ovs-vswitchd.service
           │ │ └─47347 ovs-vswitchd unix:/var/run/openvswitch/db.sock -vconsole:emer -vsyslog:err -vfile:info --mlockall --user openvswitch:hugetlbfs --no-chdir --log-file=/var/log/openvswitch/ovs-vswitchd.log --pidfile=/var/run/openvswitch/ovs-vswitchd.pid --detach
           │ ├─ovsdb-server.service
           │ │ └─47265 ovsdb-server /etc/openvswitch/conf.db -vconsole:emer -vsyslog:err -vfile:info --remote=punix:/var/run/openvswitch/db.sock --private-key=db:Open_vSwitch,SSL,private_key --certificate=db:Open_vSwitch,SSL,certificate --bootstrap-ca-cert=db:Open_vSwitch,SSL,ca_cert --user openvswitch:hugetlbfs --no-chdir --log-file=/var/log/openvswitch/ovsdb-server.log --pidfile=/var/run/openvswitch/ovsdb-server.pid --detach
           │ ├─polkit.service
           │ │ └─43525 /usr/lib/polkit-1/polkitd --no-debug
           │ ├─rpcbind.service
           │ │ └─698 /usr/bin/rpcbind -w -f
           │ ├─rsyslog.service
           │ │ └─1008 /usr/sbin/rsyslogd -n
           │ ├─sshd.service
           │ │ └─1009 "sshd: /usr/sbin/sshd -D [listener] 0 of 10-100 startups"
           │ ├─system-ceph\x2d86651951\x2dd7ad\x2d54ed\x2d8d1a\x2d1eae0c6b599c.slice
           │ │ ├─ceph-86651951-d7ad-54ed-8d1a-1eae0c6b599c@crash.compute-0.service
           │ │ │ ├─libpod-payload-b14c4d0f9a091cf8a47fce60a36e80426e9a0297b91362c53f6634918d26917f
           │ │ │ │ ├─82984 /run/podman-init -- /usr/bin/ceph-crash -n client.crash.compute-0
           │ │ │ │ └─82986 /usr/bin/python3 -s /usr/bin/ceph-crash -n client.crash.compute-0
           │ │ │ └─runtime
           │ │ │   └─82982 /usr/bin/conmon --api-version 1 -c b14c4d0f9a091cf8a47fce60a36e80426e9a0297b91362c53f6634918d26917f -u b14c4d0f9a091cf8a47fce60a36e80426e9a0297b91362c53f6634918d26917f -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/b14c4d0f9a091cf8a47fce60a36e80426e9a0297b91362c53f6634918d26917f/userdata -p /run/containers/storage/overlay-containers/b14c4d0f9a091cf8a47fce60a36e80426e9a0297b91362c53f6634918d26917f/userdata/pidfile -n ceph-86651951-d7ad-54ed-8d1a-1eae0c6b599c-crash-compute-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/b14c4d0f9a091cf8a47fce60a36e80426e9a0297b91362c53f6634918d26917f --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/b14c4d0f9a091cf8a47fce60a36e80426e9a0297b91362c53f6634918d26917f/userdata/oci-log --conmon-pidfile /run/ceph-86651951-d7ad-54ed-8d1a-1eae0c6b599c@crash.compute-0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg b14c4d0f9a091cf8a47fce60a36e80426e9a0297b91362c53f6634918d26917f
           │ │ ├─ceph-86651951-d7ad-54ed-8d1a-1eae0c6b599c@mds.cephfs.compute-0.avwmrm.service
           │ │ │ ├─libpod-payload-ada03e038ff7afa81ce99514785a1f7f23ec1c616e608a0488afc3c61f4ad270
           │ │ │ │ ├─101346 /run/podman-init -- /usr/bin/ceph-mds -n mds.cephfs.compute-0.avwmrm -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
           │ │ │ │ └─101348 /usr/bin/ceph-mds -n mds.cephfs.compute-0.avwmrm -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
           │ │ │ └─runtime
           │ │ │   └─101344 /usr/bin/conmon --api-version 1 -c ada03e038ff7afa81ce99514785a1f7f23ec1c616e608a0488afc3c61f4ad270 -u ada03e038ff7afa81ce99514785a1f7f23ec1c616e608a0488afc3c61f4ad270 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/ada03e038ff7afa81ce99514785a1f7f23ec1c616e608a0488afc3c61f4ad270/userdata -p /run/containers/storage/overlay-containers/ada03e038ff7afa81ce99514785a1f7f23ec1c616e608a0488afc3c61f4ad270/userdata/pidfile -n ceph-86651951-d7ad-54ed-8d1a-1eae0c6b599c-mds-cephfs-compute-0-avwmrm --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/ada03e038ff7afa81ce99514785a1f7f23ec1c616e608a0488afc3c61f4ad270 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/ada03e038ff7afa81ce99514785a1f7f23ec1c616e608a0488afc3c61f4ad270/userdata/oci-log --conmon-pidfile /run/ceph-86651951-d7ad-54ed-8d1a-1eae0c6b599c@mds.cephfs.compute-0.avwmrm.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg ada03e038ff7afa81ce99514785a1f7f23ec1c616e608a0488afc3c61f4ad270
           │ │ ├─ceph-86651951-d7ad-54ed-8d1a-1eae0c6b599c@mgr.compute-0.oomwtk.service
           │ │ │ ├─libpod-payload-50fa81e3de3bbed9014b0ada17a3661d687d8263d45aa4c2cb331c53aa6e66c5
           │ │ │ │ ├─75454 /run/podman-init -- /usr/bin/ceph-mgr -n mgr.compute-0.oomwtk -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
           │ │ │ │ └─75456 /usr/bin/ceph-mgr -n mgr.compute-0.oomwtk -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
           │ │ │ └─runtime
           │ │ │   └─75452 /usr/bin/conmon --api-version 1 -c 50fa81e3de3bbed9014b0ada17a3661d687d8263d45aa4c2cb331c53aa6e66c5 -u 50fa81e3de3bbed9014b0ada17a3661d687d8263d45aa4c2cb331c53aa6e66c5 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/50fa81e3de3bbed9014b0ada17a3661d687d8263d45aa4c2cb331c53aa6e66c5/userdata -p /run/containers/storage/overlay-containers/50fa81e3de3bbed9014b0ada17a3661d687d8263d45aa4c2cb331c53aa6e66c5/userdata/pidfile -n ceph-86651951-d7ad-54ed-8d1a-1eae0c6b599c-mgr-compute-0-oomwtk --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/50fa81e3de3bbed9014b0ada17a3661d687d8263d45aa4c2cb331c53aa6e66c5 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/50fa81e3de3bbed9014b0ada17a3661d687d8263d45aa4c2cb331c53aa6e66c5/userdata/oci-log --conmon-pidfile /run/ceph-86651951-d7ad-54ed-8d1a-1eae0c6b599c@mgr.compute-0.oomwtk.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 50fa81e3de3bbed9014b0ada17a3661d687d8263d45aa4c2cb331c53aa6e66c5
           │ │ ├─ceph-86651951-d7ad-54ed-8d1a-1eae0c6b599c@mon.compute-0.service
           │ │ │ ├─libpod-payload-da26e6b5eb184e66ff0f01e858a9ba09086081279bc61dcc18804f12bf2746e5
           │ │ │ │ ├─75164 /run/podman-init -- /usr/bin/ceph-mon -n mon.compute-0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false
           │ │ │ │ └─75166 /usr/bin/ceph-mon -n mon.compute-0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false
           │ │ │ └─runtime
           │ │ │   └─75162 /usr/bin/conmon --api-version 1 -c da26e6b5eb184e66ff0f01e858a9ba09086081279bc61dcc18804f12bf2746e5 -u da26e6b5eb184e66ff0f01e858a9ba09086081279bc61dcc18804f12bf2746e5 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/da26e6b5eb184e66ff0f01e858a9ba09086081279bc61dcc18804f12bf2746e5/userdata -p /run/containers/storage/overlay-containers/da26e6b5eb184e66ff0f01e858a9ba09086081279bc61dcc18804f12bf2746e5/userdata/pidfile -n ceph-86651951-d7ad-54ed-8d1a-1eae0c6b599c-mon-compute-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/da26e6b5eb184e66ff0f01e858a9ba09086081279bc61dcc18804f12bf2746e5 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/da26e6b5eb184e66ff0f01e858a9ba09086081279bc61dcc18804f12bf2746e5/userdata/oci-log --conmon-pidfile /run/ceph-86651951-d7ad-54ed-8d1a-1eae0c6b599c@mon.compute-0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg da26e6b5eb184e66ff0f01e858a9ba09086081279bc61dcc18804f12bf2746e5
           │ │ ├─ceph-86651951-d7ad-54ed-8d1a-1eae0c6b599c@osd.0.service
           │ │ │ ├─libpod-payload-509125080fc4a991dad78f48c3bcc9760616adb551f766f6768ee97cdf6f087c
           │ │ │ │ ├─88858 /run/podman-init -- /usr/bin/ceph-osd -n osd.0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
           │ │ │ │ └─88860 /usr/bin/ceph-osd -n osd.0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
           │ │ │ └─runtime
           │ │ │   └─88856 /usr/bin/conmon --api-version 1 -c 509125080fc4a991dad78f48c3bcc9760616adb551f766f6768ee97cdf6f087c -u 509125080fc4a991dad78f48c3bcc9760616adb551f766f6768ee97cdf6f087c -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/509125080fc4a991dad78f48c3bcc9760616adb551f766f6768ee97cdf6f087c/userdata -p /run/containers/storage/overlay-containers/509125080fc4a991dad78f48c3bcc9760616adb551f766f6768ee97cdf6f087c/userdata/pidfile -n ceph-86651951-d7ad-54ed-8d1a-1eae0c6b599c-osd-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/509125080fc4a991dad78f48c3bcc9760616adb551f766f6768ee97cdf6f087c --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/509125080fc4a991dad78f48c3bcc9760616adb551f766f6768ee97cdf6f087c/userdata/oci-log --conmon-pidfile /run/ceph-86651951-d7ad-54ed-8d1a-1eae0c6b599c@osd.0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 509125080fc4a991dad78f48c3bcc9760616adb551f766f6768ee97cdf6f087c
           │ │ ├─ceph-86651951-d7ad-54ed-8d1a-1eae0c6b599c@osd.1.service
           │ │ │ ├─libpod-payload-53527432356f881cb3f7f18463b7737b38bc93fa92677dff67e318c4cdec67e3
           │ │ │ │ ├─89879 /run/podman-init -- /usr/bin/ceph-osd -n osd.1 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
           │ │ │ │ └─89881 /usr/bin/ceph-osd -n osd.1 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
           │ │ │ └─runtime
           │ │ │   └─89877 /usr/bin/conmon --api-version 1 -c 53527432356f881cb3f7f18463b7737b38bc93fa92677dff67e318c4cdec67e3 -u 53527432356f881cb3f7f18463b7737b38bc93fa92677dff67e318c4cdec67e3 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/53527432356f881cb3f7f18463b7737b38bc93fa92677dff67e318c4cdec67e3/userdata -p /run/containers/storage/overlay-containers/53527432356f881cb3f7f18463b7737b38bc93fa92677dff67e318c4cdec67e3/userdata/pidfile -n ceph-86651951-d7ad-54ed-8d1a-1eae0c6b599c-osd-1 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/53527432356f881cb3f7f18463b7737b38bc93fa92677dff67e318c4cdec67e3 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/53527432356f881cb3f7f18463b7737b38bc93fa92677dff67e318c4cdec67e3/userdata/oci-log --conmon-pidfile /run/ceph-86651951-d7ad-54ed-8d1a-1eae0c6b599c@osd.1.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 53527432356f881cb3f7f18463b7737b38bc93fa92677dff67e318c4cdec67e3
           │ │ ├─ceph-86651951-d7ad-54ed-8d1a-1eae0c6b599c@osd.2.service
           │ │ │ ├─libpod-payload-a38f2f8c61e9843df057582eeccd529fb52fee544a2f2daa3caa5266718af181
           │ │ │ │ ├─90887 /run/podman-init -- /usr/bin/ceph-osd -n osd.2 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
           │ │ │ │ └─90889 /usr/bin/ceph-osd -n osd.2 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
           │ │ │ └─runtime
           │ │ │   └─90885 /usr/bin/conmon --api-version 1 -c a38f2f8c61e9843df057582eeccd529fb52fee544a2f2daa3caa5266718af181 -u a38f2f8c61e9843df057582eeccd529fb52fee544a2f2daa3caa5266718af181 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/a38f2f8c61e9843df057582eeccd529fb52fee544a2f2daa3caa5266718af181/userdata -p /run/containers/storage/overlay-containers/a38f2f8c61e9843df057582eeccd529fb52fee544a2f2daa3caa5266718af181/userdata/pidfile -n ceph-86651951-d7ad-54ed-8d1a-1eae0c6b599c-osd-2 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/a38f2f8c61e9843df057582eeccd529fb52fee544a2f2daa3caa5266718af181 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/a38f2f8c61e9843df057582eeccd529fb52fee544a2f2daa3caa5266718af181/userdata/oci-log --conmon-pidfile /run/ceph-86651951-d7ad-54ed-8d1a-1eae0c6b599c@osd.2.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg a38f2f8c61e9843df057582eeccd529fb52fee544a2f2daa3caa5266718af181
           │ │ └─ceph-86651951-d7ad-54ed-8d1a-1eae0c6b599c@rgw.rgw.compute-0.nmzhvu.service
           │ │   ├─libpod-payload-37d8d4d4e35122aa7398826ce5305d40a9c1b412301945f14e7b5b9738d21ef6
           │ │   │ ├─100884 /run/podman-init -- /usr/bin/radosgw -n client.rgw.rgw.compute-0.nmzhvu -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
           │ │   │ └─100886 /usr/bin/radosgw -n client.rgw.rgw.compute-0.nmzhvu -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
           │ │   └─runtime
           │ │     └─100882 /usr/bin/conmon --api-version 1 -c 37d8d4d4e35122aa7398826ce5305d40a9c1b412301945f14e7b5b9738d21ef6 -u 37d8d4d4e35122aa7398826ce5305d40a9c1b412301945f14e7b5b9738d21ef6 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/37d8d4d4e35122aa7398826ce5305d40a9c1b412301945f14e7b5b9738d21ef6/userdata -p /run/containers/storage/overlay-containers/37d8d4d4e35122aa7398826ce5305d40a9c1b412301945f14e7b5b9738d21ef6/userdata/pidfile -n ceph-86651951-d7ad-54ed-8d1a-1eae0c6b599c-rgw-rgw-compute-0-nmzhvu --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/37d8d4d4e35122aa7398826ce5305d40a9c1b412301945f14e7b5b9738d21ef6 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/37d8d4d4e35122aa7398826ce5305d40a9c1b412301945f14e7b5b9738d21ef6/userdata/oci-log --conmon-pidfile /run/ceph-86651951-d7ad-54ed-8d1a-1eae0c6b599c@rgw.rgw.compute-0.nmzhvu.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 37d8d4d4e35122aa7398826ce5305d40a9c1b412301945f14e7b5b9738d21ef6
           │ ├─system-getty.slice
           │ │ └─getty@tty1.service
           │ │   └─1013 /sbin/agetty -o "-p -- \\u" --noclear - linux
           │ ├─system-serial\x2dgetty.slice
           │ │ └─serial-getty@ttyS0.service
           │ │   └─2566 /sbin/agetty -o "-p -- \\u" --keep-baud 115200,57600,38400,9600 - vt220
           │ ├─systemd-hostnamed.service
           │ │ └─106828 /usr/lib/systemd/systemd-hostnamed
           │ ├─systemd-journald.service
           │ │ └─678 /usr/lib/systemd/systemd-journald
           │ ├─systemd-logind.service
           │ │ └─820 /usr/lib/systemd/systemd-logind
           │ ├─systemd-udevd.service
           │ │ └─udev
           │ │   └─728 /usr/lib/systemd/systemd-udevd
           │ └─tuned.service
           │   └─43704 /usr/bin/python3 -Es /usr/sbin/tuned -l -P
           └─user.slice
             ├─user-1000.slice
             │ ├─session-1.scope
             │ │ └─4518 /usr/bin/python3
             │ ├─session-17.scope
             │ │ ├─71412 "sshd-session: zuul [priv]"
             │ │ └─71415 "sshd-session: zuul@notty"
             │ ├─session-33.scope
             │ │ ├─105525 "sshd-session: zuul [priv]"
             │ │ ├─105528 "sshd-session: zuul@notty"
             │ │ ├─105529 sudo bash -c "rm -rf /var/tmp/sos-osp && mkdir /var/tmp/sos-osp && sos report --batch --all-logs --tmp-dir=/var/tmp/sos-osp  -p container,openstack_edpm,system,storage,virt"
             │ │ ├─105553 /usr/bin/python3 -s /sbin/sos report --batch --all-logs --tmp-dir=/var/tmp/sos-osp -p container,openstack_edpm,system,storage,virt
             │ │ ├─108355 timeout 15s turbostat --debug sleep 10
             │ │ ├─108758 timeout 300s semanage node -l
             │ │ ├─108759 /usr/bin/python3 -EsI /usr/sbin/semanage node -l
             │ │ ├─108762 timeout 300s systemctl status --all
             │ │ └─108763 systemctl status --all
             │ └─user@1000.service
             │   ├─app.slice
             │   │ └─dbus-broker.service
             │   │   ├─12489 /usr/bin/dbus-broker-launch --scope user
             │   │   └─12506 dbus-broker --log 4 --controller 9 --machine-id fee38d0f94bf6f4b17ec77ba536bd6ab --max-bytes 100000000000000 --max-fds 25000000000000 --max-matches 5000000000
             │   ├─init.scope
             │   │ ├─4307 /usr/lib/systemd/systemd --user
             │   │ └─4309 "(sd-pam)"
             │   └─user.slice
             │     └─podman-pause-9f2ee473.scope
             │       └─12394 catatonit -P
             └─user-42477.slice
               ├─session-20.scope
               │ ├─76729 "sshd-session: ceph-admin [priv]"
               Unit boot.automount could not be found.
 └─76752 "sshd-session: ceph-admin"
               ├─session-22.scope
               │ ├─76747 "sshd-session: ceph-admin [priv]"
               │ └─76753 "sshd-session: ceph-admin@notty"
               ├─session-23.scope
               │ ├─76804 "sshd-session: ceph-admin [priv]"
               │ └─76807 "sshd-session: ceph-admin@notty"
               ├─session-24.scope
               │ ├─76858 "sshd-session: ceph-admin [priv]"
               │ └─76861 "sshd-session: ceph-admin@notty"
               ├─session-25.scope
               │ ├─76912 "sshd-session: ceph-admin [priv]"
               │ └─76915 "sshd-session: ceph-admin@notty"
               ├─session-26.scope
               │ ├─76966 "sshd-session: ceph-admin [priv]"
               │ └─76969 "sshd-session: ceph-admin@notty"
               ├─session-27.scope
               │ ├─77020 "sshd-session: ceph-admin [priv]"
               │ └─77023 "sshd-session: ceph-admin@notty"
               ├─session-28.scope
               │ ├─77074 "sshd-session: ceph-admin [priv]"
               │ └─77077 "sshd-session: ceph-admin@notty"
               ├─session-29.scope
               │ ├─77128 "sshd-session: ceph-admin [priv]"
               │ └─77131 "sshd-session: ceph-admin@notty"
               ├─session-30.scope
               │ ├─77182 "sshd-session: ceph-admin [priv]"
               │ └─77185 "sshd-session: ceph-admin@notty"
               ├─session-31.scope
               │ ├─77209 "sshd-session: ceph-admin [priv]"
               │ └─77212 "sshd-session: ceph-admin@notty"
               ├─session-32.scope
               │ ├─77263 "sshd-session: ceph-admin [priv]"
               │ └─77266 "sshd-session: ceph-admin@notty"
               └─user@42477.service
                 └─init.scope
                   ├─76733 /usr/lib/systemd/systemd --user
                   └─76736 "(sd-pam)"

● proc-sys-fs-binfmt_misc.automount - Arbitrary Executable File Formats File System Automount Point
     Loaded: loaded (/usr/lib/systemd/system/proc-sys-fs-binfmt_misc.automount; static)
     Active: active (running) since Tue 2025-11-25 09:43:54 UTC; 39min ago
      Until: Tue 2025-11-25 09:43:54 UTC; 39min ago
   Triggers: ● proc-sys-fs-binfmt_misc.mount
      Where: /proc/sys/fs/binfmt_misc
       Docs: https://docs.kernel.org/admin-guide/binfmt-misc.html
             https://www.freedesktop.org/wiki/Software/systemd/APIFileSystems

Nov 25 10:19:41 compute-0 systemd[1]: proc-sys-fs-binfmt_misc.automount: Got automount request for /proc/sys/fs/binfmt_misc, triggered by 78150 (sysctl)

● dev-cdrom.device - QEMU_DVD-ROM config-2
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:01.1-ata1-host0-target0:0:0-0:0:0:0-block-sr0.device
     Loaded: loaded
     Active: active (plugged) since Tue 2025-11-25 09:43:56 UTC; 39min ago
      Until: Tue 2025-11-25 09:43:56 UTC; 39min ago
     Device: /sys/devices/pci0000:00/0000:00:01.1/ata1/host0/target0:0:0/0:0:0:0/block/sr0

● dev-ceph_vg0-ceph_lv0.device - /dev/ceph_vg0/ceph_lv0
    Follows: unit currently follows state of sys-devices-virtual-block-dm\x2d0.device
     Loaded: loaded
     Active: active (plugged) since Tue 2025-11-25 10:17:49 UTC; 5min ago
      Until: Tue 2025-11-25 10:17:49 UTC; 5min ago
     Device: /sys/devices/virtual/block/dm-0

● dev-ceph_vg1-ceph_lv1.device - /dev/ceph_vg1/ceph_lv1
    Follows: unit currently follows state of sys-devices-virtual-block-dm\x2d1.device
     Loaded: loaded
     Active: active (plugged) since Tue 2025-11-25 10:17:54 UTC; 5min ago
      Until: Tue 2025-11-25 10:17:54 UTC; 5min ago
     Device: /sys/devices/virtual/block/dm-1

● dev-ceph_vg2-ceph_lv2.device - /dev/ceph_vg2/ceph_lv2
    Follows: unit currently follows state of sys-devices-virtual-block-dm\x2d2.device
     Loaded: loaded
     Active: active (plugged) since Tue 2025-11-25 10:17:59 UTC; 5min ago
      Until: Tue 2025-11-25 10:17:59 UTC; 5min ago
     Device: /sys/devices/virtual/block/dm-2

● dev-disk-by\x2ddiskseq-1.device - /dev/disk/by-diskseq/1
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:04.0-virtio2-block-vda.device
     Loaded: loaded
     Active: active (plugged) since Tue 2025-11-25 09:43:56 UTC; 39min ago
      Until: Tue 2025-11-25 09:43:56 UTC; 39min ago
     Device: /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda

● dev-disk-by\x2ddiskseq-11.device - /dev/disk/by-diskseq/11
    Follows: unit currently follows state of sys-devices-virtual-block-loop5.device
     Loaded: loaded
     Active: active (plugged) since Tue 2025-11-25 10:17:58 UTC; 5min ago
      Until: Tue 2025-11-25 10:17:58 UTC; 5min ago
     Device: /sys/devices/virtual/block/loop5

● dev-disk-by\x2ddiskseq-12.device - /dev/disk/by-diskseq/12
    Follows: unit currently follows state of sys-devices-virtual-block-dm\x2d2.device
     Loaded: loaded
     Active: active (plugged) since Tue 2025-11-25 10:17:59 UTC; 5min ago
      Until: Tue 2025-11-25 10:17:59 UTC; 5min ago
     Device: /sys/devices/virtual/block/dm-2

● dev-disk-by\x2ddiskseq-3.device - QEMU_DVD-ROM config-2
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:01.1-ata1-host0-target0:0:0-0:0:0:0-block-sr0.device
     Loaded: loaded
     Active: active (plugged) since Tue 2025-11-25 09:43:56 UTC; 39min ago
      Until: Tue 2025-11-25 09:43:56 UTC; 39min ago
     Device: /sys/devices/pci0000:00/0000:00:01.1/ata1/host0/target0:0:0/0:0:0:0/block/sr0

● dev-disk-by\x2ddiskseq-5.device - /dev/disk/by-diskseq/5
    Follows: unit currently follows state of sys-devices-virtual-block-loop3.device
     Loaded: loaded
     Active: active (plugged) since Tue 2025-11-25 10:17:48 UTC; 5min ago
      Until: Tue 2025-11-25 10:17:48 UTC; 5min ago
     Device: /sys/devices/virtual/block/loop3

● dev-disk-by\x2ddiskseq-6.device - /dev/disk/by-diskseq/6
    Follows: unit currently follows state of sys-devices-virtual-block-dm\x2d0.device
     Loaded: loaded
     Active: active (plugged) since Tue 2025-11-25 10:17:49 UTC; 5min ago
      Until: Tue 2025-11-25 10:17:49 UTC; 5min ago
     Device: /sys/devices/virtual/block/dm-0

● dev-disk-by\x2ddiskseq-8.device - /dev/disk/by-diskseq/8
    Follows: unit currently follows state of sys-devices-virtual-block-loop4.device
     Loaded: loaded
     Active: active (plugged) since Tue 2025-11-25 10:17:53 UTC; 5min ago
      Until: Tue 2025-11-25 10:17:53 UTC; 5min ago
     Device: /sys/devices/virtual/block/loop4

● dev-disk-by\x2ddiskseq-9.device - /dev/disk/by-diskseq/9
    Follows: unit currently follows state of sys-devices-virtual-block-dm\x2d1.device
     Loaded: loaded
     Active: active (plugged) since Tue 2025-11-25 10:17:54 UTC; 5min ago
      Until: Tue 2025-11-25 10:17:54 UTC; 5min ago
     Device: /sys/devices/virtual/block/dm-1

● dev-disk-by\x2did-ata\x2dQEMU_DVD\x2dROM_QM00001.device - QEMU_DVD-ROM config-2
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:01.1-ata1-host0-target0:0:0-0:0:0:0-block-sr0.device
     Loaded: loaded
     Active: active (plugged) since Tue 2025-11-25 09:43:56 UTC; 39min ago
      Until: Tue 2025-11-25 09:43:56 UTC; 39min ago
     Device: /sys/devices/pci0000:00/0000:00:01.1/ata1/host0/target0:0:0/0:0:0:0/block/sr0

● dev-disk-by\x2did-dm\x2dname\x2dceph_vg0\x2dceph_lv0.device - /dev/disk/by-id/dm-name-ceph_vg0-ceph_lv0
    Follows: unit currently follows state of sys-devices-virtual-block-dm\x2d0.device
     Loaded: loaded
     Active: active (plugged) since Tue 2025-11-25 10:17:49 UTC; 5min ago
      Until: Tue 2025-11-25 10:17:49 UTC; 5min ago
     Device: /sys/devices/virtual/block/dm-0

● dev-disk-by\x2did-dm\x2dname\x2dceph_vg1\x2dceph_lv1.device - /dev/disk/by-id/dm-name-ceph_vg1-ceph_lv1
    Follows: unit currently follows state of sys-devices-virtual-block-dm\x2d1.device
     Loaded: loaded
     Active: active (plugged) since Tue 2025-11-25 10:17:54 UTC; 5min ago
      Until: Tue 2025-11-25 10:17:54 UTC; 5min ago
     Device: /sys/devices/virtual/block/dm-1

● dev-disk-by\x2did-dm\x2dname\x2dceph_vg2\x2dceph_lv2.device - /dev/disk/by-id/dm-name-ceph_vg2-ceph_lv2
    Follows: unit currently follows state of sys-devices-virtual-block-dm\x2d2.device
     Loaded: loaded
     Active: active (plugged) since Tue 2025-11-25 10:17:59 UTC; 5min ago
      Until: Tue 2025-11-25 10:17:59 UTC; 5min ago
     Device: /sys/devices/virtual/block/dm-2

● dev-disk-by\x2did-dm\x2duuid\x2dLVM\x2demgFya1WzXAZmUPMykQ8sVfmtjnpZ131cdF7O4dve289CQIhMwlf8UhSJlZEt8EF.device - /dev/disk/by-id/dm-uuid-LVM-emgFya1WzXAZmUPMykQ8sVfmtjnpZ131cdF7O4dve289CQIhMwlf8UhSJlZEt8EF
    Follows: unit currently follows state of sys-devices-virtual-block-dm\x2d2.device
     Loaded: loaded
     Active: active (plugged) since Tue 2025-11-25 10:17:59 UTC; 5min ago
      Until: Tue 2025-11-25 10:17:59 UTC; 5min ago
     Device: /sys/devices/virtual/block/dm-2

● dev-disk-by\x2did-dm\x2duuid\x2dLVM\x2dFJjZOV1HDSFflkKHJBU1e1T58IJRT3tDCPeJvszRFfx1oK8qX23eM2pBbtkJLqLm.device - /dev/disk/by-id/dm-uuid-LVM-FJjZOV1HDSFflkKHJBU1e1T58IJRT3tDCPeJvszRFfx1oK8qX23eM2pBbtkJLqLm
    Follows: unit currently follows state of sys-devices-virtual-block-dm\x2d0.device
     Loaded: loaded
     Active: active (plugged) since Tue 2025-11-25 10:17:49 UTC; 5min ago
      Until: Tue 2025-11-25 10:17:49 UTC; 5min ago
     Device: /sys/devices/virtual/block/dm-0

● dev-disk-by\x2did-dm\x2duuid\x2dLVM\x2dnwMuaNBvoyVi8kTRPXNxlq4HSi8eTIqq1YNYrmv3MBiGyCNehOyCbkjZvEfdeC1G.device - /dev/disk/by-id/dm-uuid-LVM-nwMuaNBvoyVi8kTRPXNxlq4HSi8eTIqq1YNYrmv3MBiGyCNehOyCbkjZvEfdeC1G
    Follows: unit currently follows state of sys-devices-virtual-block-dm\x2d1.device
     Loaded: loaded
     Active: active (plugged) since Tue 2025-11-25 10:17:54 UTC; 5min ago
      Until: Tue 2025-11-25 10:17:54 UTC; 5min ago
     Device: /sys/devices/virtual/block/dm-1

● dev-disk-by\x2did-lvm\x2dpv\x2duuid\x2dETocio\x2dXqsy\x2d1VL1\x2dCl8g\x2dHA7x\x2dQejM\x2d0vZTti.device - /dev/disk/by-id/lvm-pv-uuid-ETocio-Xqsy-1VL1-Cl8g-HA7x-QejM-0vZTti
    Follows: unit currently follows state of sys-devices-virtual-block-loop3.device
     Loaded: loaded
     Active: active (plugged) since Tue 2025-11-25 10:17:49 UTC; 5min ago
      Until: Tue 2025-11-25 10:17:49 UTC; 5min ago
     Device: /sys/devices/virtual/block/loop3

● dev-disk-by\x2did-lvm\x2dpv\x2duuid\x2dfPguzr\x2dQxjA\x2dnNaw\x2dUWsc\x2ds7rV\x2dvexf\x2d8TSzDR.device - /dev/disk/by-id/lvm-pv-uuid-fPguzr-QxjA-nNaw-UWsc-s7rV-vexf-8TSzDR
    Follows: unit currently follows state of sys-devices-virtual-block-loop5.device
     Loaded: loaded
     Active: active (plugged) since Tue 2025-11-25 10:17:59 UTC; 5min ago
      Until: Tue 2025-11-25 10:17:59 UTC; 5min ago
     Device: /sys/devices/virtual/block/loop5

● dev-disk-by\x2did-lvm\x2dpv\x2duuid\x2dwJZ1mT\x2d6vQf\x2dafLU\x2ddqav\x2dzLLx\x2d0040\x2dpLTAld.device - /dev/disk/by-id/lvm-pv-uuid-wJZ1mT-6vQf-afLU-dqav-zLLx-0040-pLTAld
    Follows: unit currently follows state of sys-devices-virtual-block-loop4.device
     Loaded: loaded
     Active: active (plugged) since Tue 2025-11-25 10:17:54 UTC; 5min ago
      Until: Tue 2025-11-25 10:17:54 UTC; 5min ago
     Device: /sys/devices/virtual/block/loop4

● dev-disk-by\x2dlabel-config\x2d2.device - QEMU_DVD-ROM config-2
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:01.1-ata1-host0-target0:0:0-0:0:0:0-block-sr0.device
     Loaded: loaded
     Active: active (plugged) since Tue 2025-11-25 09:43:56 UTC; 39min ago
      Until: Tue 2025-11-25 09:43:56 UTC; 39min ago
     Device: /sys/devices/pci0000:00/0000:00:01.1/ata1/host0/target0:0:0/0:0:0:0/block/sr0

● dev-disk-by\x2dpartuuid-aae2604d\x2d01.device - /dev/disk/by-partuuid/aae2604d-01
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:04.0-virtio2-block-vda-vda1.device
     Loaded: loaded
     Active: active (plugged) since Tue 2025-11-25 09:43:56 UTC; 39min ago
      Until: Tue 2025-11-25 09:43:56 UTC; 39min ago
     Device: /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda/vda1

● dev-disk-by\x2dpath-pci\x2d0000:00:01.1\x2data\x2d1.0.device - QEMU_DVD-ROM config-2
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:01.1-ata1-host0-target0:0:0-0:0:0:0-block-sr0.device
     Loaded: loaded
     Active: active (plugged) since Tue 2025-11-25 09:43:56 UTC; 39min ago
      Until: Tue 2025-11-25 09:43:56 UTC; 39min ago
     Device: /sys/devices/pci0000:00/0000:00:01.1/ata1/host0/target0:0:0/0:0:0:0/block/sr0

● dev-disk-by\x2dpath-pci\x2d0000:00:01.1\x2data\x2d1.device - QEMU_DVD-ROM config-2
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:01.1-ata1-host0-target0:0:0-0:0:0:0-block-sr0.device
     Loaded: loaded
     Active: active (plugged) since Tue 2025-11-25 09:43:56 UTC; 39min ago
      Until: Tue 2025-11-25 09:43:56 UTC; 39min ago
     Device: /sys/devices/pci0000:00/0000:00:01.1/ata1/host0/target0:0:0/0:0:0:0/block/sr0

● dev-disk-by\x2dpath-pci\x2d0000:00:04.0.device - /dev/disk/by-path/pci-0000:00:04.0
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:04.0-virtio2-block-vda.device
     Loaded: loaded
     Active: active (plugged) since Tue 2025-11-25 09:43:56 UTC; 39min ago
      Until: Tue 2025-11-25 09:43:56 UTC; 39min ago
     Device: /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda

● dev-disk-by\x2dpath-pci\x2d0000:00:04.0\x2dpart1.device - /dev/disk/by-path/pci-0000:00:04.0-part1
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:04.0-virtio2-block-vda-vda1.device
     Loaded: loaded
     Active: active (plugged) since Tue 2025-11-25 09:43:56 UTC; 39min ago
      Until: Tue 2025-11-25 09:43:56 UTC; 39min ago
     Device: /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda/vda1

● dev-disk-by\x2dpath-virtio\x2dpci\x2d0000:00:04.0.device - /dev/disk/by-path/virtio-pci-0000:00:04.0
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:04.0-virtio2-block-vda.device
     Loaded: loaded
     Active: active (plugged) since Tue 2025-11-25 09:43:56 UTC; 39min ago
      Until: Tue 2025-11-25 09:43:56 UTC; 39min ago
     Device: /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda

● dev-disk-by\x2dpath-virtio\x2dpci\x2d0000:00:04.0\x2dpart1.device - /dev/disk/by-path/virtio-pci-0000:00:04.0-part1
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:04.0-virtio2-block-vda-vda1.device
     Loaded: loaded
     Active: active (plugged) since Tue 2025-11-25 09:43:56 UTC; 39min ago
      Until: Tue 2025-11-25 09:43:56 UTC; 39min ago
     Device: /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda/vda1

● dev-disk-by\x2duuid-2025\x2d11\x2d25\x2d09\x2d43\x2d38\x2d00.device - QEMU_DVD-ROM config-2
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:01.1-ata1-host0-target0:0:0-0:0:0:0-block-sr0.device
     Loaded: loaded
     Active: active (plugged) since Tue 2025-11-25 09:43:56 UTC; 39min ago
      Until: Tue 2025-11-25 09:43:56 UTC; 39min ago
     Device: /sys/devices/pci0000:00/0000:00:01.1/ata1/host0/target0:0:0/0:0:0:0/block/sr0

● dev-disk-by\x2duuid-47e3724e\x2d7a1b\x2d439a\x2d9543\x2db98c9a290709.device - /dev/disk/by-uuid/47e3724e-7a1b-439a-9543-b98c9a290709
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:04.0-virtio2-block-vda-vda1.device
     Loaded: loaded
     Active: active (plugged) since Tue 2025-11-25 09:43:56 UTC; 39min ago
      Until: Tue 2025-11-25 09:43:56 UTC; 39min ago
     Device: /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda/vda1

Nov 25 09:43:51 localhost systemd[1]: Found device /dev/disk/by-uuid/47e3724e-7a1b-439a-9543-b98c9a290709.

● dev-dm\x2d0.device - /dev/dm-0
    Follows: unit currently follows state of sys-devices-virtual-block-dm\x2d0.device
     Loaded: loaded
     Active: active (plugged) since Tue 2025-11-25 10:17:49 UTC; 5min ago
      Until: Tue 2025-11-25 10:17:49 UTC; 5min ago
     Device: /sys/devices/virtual/block/dm-0

● dev-dm\x2d1.device - /dev/dm-1
    Follows: unit currently follows state of sys-devices-virtual-block-dm\x2d1.device
     Loaded: loaded
     Active: active (plugged) since Tue 2025-11-25 10:17:54 UTC; 5min ago
      Until: Tue 2025-11-25 10:17:54 UTC; 5min ago
     Device: /sys/devices/virtual/block/dm-1

● dev-dm\x2d2.device - /dev/dm-2
    Follows: unit currently follows state of sys-devices-virtual-block-dm\x2d2.device
     Loaded: loaded
     Active: active (plugged) since Tue 2025-11-25 10:17:59 UTC; 5min ago
      Until: Tue 2025-11-25 10:17:59 UTC; 5min ago
     Device: /sys/devices/virtual/block/dm-2

● dev-loop3.device - /dev/loop3
    Follows: unit currently follows state of sys-devices-virtual-block-loop3.device
     Loaded: loaded
     Active: active (plugged) since Tue 2025-11-25 10:17:48 UTC; 5min ago
      Until: Tue 2025-11-25 10:17:48 UTC; 5min ago
     Device: /sys/devices/virtual/block/loop3

● dev-loop4.device - /dev/loop4
    Follows: unit currently follows state of sys-devices-virtual-block-loop4.device
     Loaded: loaded
     Active: active (plugged) since Tue 2025-11-25 10:17:53 UTC; 5min ago
      Until: Tue 2025-11-25 10:17:53 UTC; 5min ago
     Device: /sys/devices/virtual/block/loop4

● dev-loop5.device - /dev/loop5
    Follows: unit currently follows state of sys-devices-virtual-block-loop5.device
     Loaded: loaded
     Active: active (plugged) since Tue 2025-11-25 10:17:58 UTC; 5min ago
      Until: Tue 2025-11-25 10:17:58 UTC; 5min ago
     Device: /sys/devices/virtual/block/loop5

● dev-mapper-ceph_vg0\x2dceph_lv0.device - /dev/mapper/ceph_vg0-ceph_lv0
    Follows: unit currently follows state of sys-devices-virtual-block-dm\x2d0.device
     Loaded: loaded
     Active: active (plugged) since Tue 2025-11-25 10:17:49 UTC; 5min ago
      Until: Tue 2025-11-25 10:17:49 UTC; 5min ago
     Device: /sys/devices/virtual/block/dm-0

● dev-mapper-ceph_vg1\x2dceph_lv1.device - /dev/mapper/ceph_vg1-ceph_lv1
    Follows: unit currently follows state of sys-devices-virtual-block-dm\x2d1.device
     Loaded: loaded
     Active: active (plugged) since Tue 2025-11-25 10:17:54 UTC; 5min ago
      Until: Tue 2025-11-25 10:17:54 UTC; 5min ago
     Device: /sys/devices/virtual/block/dm-1

● dev-mapper-ceph_vg2\x2dceph_lv2.device - /dev/mapper/ceph_vg2-ceph_lv2
    Follows: unit currently follows state of sys-devices-virtual-block-dm\x2d2.device
     Loaded: loaded
     Active: active (plugged) since Tue 2025-11-25 10:17:59 UTC; 5min ago
      Until: Tue 2025-11-25 10:17:59 UTC; 5min ago
     Device: /sys/devices/virtual/block/dm-2

● dev-rfkill.device - /dev/rfkill
    Follows: unit currently follows state of sys-devices-virtual-misc-rfkill.device
     Loaded: loaded
     Active: active (plugged) since Tue 2025-11-25 09:43:59 UTC; 39min ago
      Until: Tue 2025-11-25 09:43:59 UTC; 39min ago
     Device: /sys/devices/virtual/misc/rfkill

● dev-sr0.device - QEMU_DVD-ROM config-2
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:01.1-ata1-host0-target0:0:0-0:0:0:0-block-sr0.device
     Loaded: loaded
     Active: active (plugged) since Tue 2025-11-25 09:43:56 UTC; 39min ago
      Until: Tue 2025-11-25 09:43:56 UTC; 39min ago
     Device: /sys/devices/pci0000:00/0000:00:01.1/ata1/host0/target0:0:0/0:0:0:0/block/sr0

● dev-ttyS0.device - /dev/ttyS0
    Follows: unit currently follows state of sys-devices-pnp0-00:00-tty-ttyS0.device
     Loaded: loaded
     Active: active (plugged) since Tue 2025-11-25 09:43:56 UTC; 39min ago
      Until: Tue 2025-11-25 09:43:56 UTC; 39min ago
     Device: /sys/devices/pnp0/00:00/tty/ttyS0

Nov 25 09:43:56 localhost systemd[1]: Condition check resulted in /dev/ttyS0 being skipped.

● dev-ttyS1.device - /dev/ttyS1
    Follows: unit currently follows state of sys-devices-platform-serial8250-tty-ttyS1.device
     Loaded: loaded
     Active: active (plugged) since Tue 2025-11-25 09:43:56 UTC; 39min ago
      Until: Tue 2025-11-25 09:43:56 UTC; 39min ago
     Device: /sys/devices/platform/serial8250/tty/ttyS1

● dev-ttyS2.device - /dev/ttyS2
    Follows: unit currently follows state of sys-devices-platform-serial8250-tty-ttyS2.device
     Loaded: loaded
     Active: active (plugged) since Tue 2025-11-25 09:43:56 UTC; 39min ago
      Until: Tue 2025-11-25 09:43:56 UTC; 39min ago
     Device: /sys/devices/platform/serial8250/tty/ttyS2

● dev-ttyS3.device - /dev/ttyS3
    Follows: unit currently follows state of sys-devices-platform-serial8250-tty-ttyS3.device
     Loaded: loaded
     Active: active (plugged) since Tue 2025-11-25 09:43:56 UTC; 39min ago
      Until: Tue 2025-11-25 09:43:56 UTC; 39min ago
     Device: /sys/devices/platform/serial8250/tty/ttyS3

● dev-vda.device - /dev/vda
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:04.0-virtio2-block-vda.device
     Loaded: loaded
     Active: active (plugged) since Tue 2025-11-25 09:43:56 UTC; 39min ago
      Until: Tue 2025-11-25 09:43:56 UTC; 39min ago
     Device: /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda

● dev-vda1.device - /dev/vda1
    Follows: unit currently follows state of sys-devices-pci0000:00-0000:00:04.0-virtio2-block-vda-vda1.device
     Loaded: loaded
     Active: active (plugged) since Tue 2025-11-25 09:43:56 UTC; 39min ago
      Until: Tue 2025-11-25 09:43:56 UTC; 39min ago
     Device: /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda/vda1

● sys-devices-pci0000:00-0000:00:01.1-ata1-host0-target0:0:0-0:0:0:0-block-sr0.device - QEMU_DVD-ROM config-2
     Loaded: loaded
     Active: active (plugged) since Tue 2025-11-25 09:43:56 UTC; 39min ago
      Until: Tue 2025-11-25 09:43:56 UTC; 39min ago
     Device: /sys/devices/pci0000:00/0000:00:01.1/ata1/host0/target0:0:0/0:0:0:0/block/sr0

● sys-devices-pci0000:00-0000:00:03.0-virtio1-net-eth0.device - Virtio network device
     Loaded: loaded
     Active: active (plugged) since Tue 2025-11-25 09:43:56 UTC; 39min ago
      Until: Tue 2025-11-25 09:43:56 UTC; 39min ago
     Device: /sys/devices/pci0000:00/0000:00:03.0/virtio1/net/eth0

● sys-devices-pci0000:00-0000:00:04.0-virtio2-block-vda-vda1.device - /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda/vda1
     Loaded: loaded
     Active: active (plugged) since Tue 2025-11-25 09:43:56 UTC; 39min ago
      Until: Tue 2025-11-25 09:43:56 UTC; 39min ago
     Device: /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda/vda1

● sys-devices-pci0000:00-0000:00:04.0-virtio2-block-vda.device - /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda
     Loaded: loaded
     Active: active (plugged) since Tue 2025-11-25 09:43:56 UTC; 39min ago
      Until: Tue 2025-11-25 09:43:56 UTC; 39min ago
     Device: /sys/devices/pci0000:00/0000:00:04.0/virtio2/block/vda

● sys-devices-pci0000:00-0000:00:07.0-virtio5-net-eth1.device - Virtio network device
     Loaded: loaded
     Active: active (plugged) since Tue 2025-11-25 09:46:14 UTC; 37min ago
      Until: Tue 2025-11-25 09:46:14 UTC; 37min ago
     Device: /sys/devices/pci0000:00/0000:00:07.0/virtio5/net/eth1

● sys-devices-platform-serial8250-tty-ttyS1.device - /sys/devices/platform/serial8250/tty/ttyS1
     Loaded: loaded
     Active: active (plugged) since Tue 2025-11-25 09:43:56 UTC; 39min ago
      Until: Tue 2025-11-25 09:43:56 UTC; 39min ago
     Device: /sys/devices/platform/serial8250/tty/ttyS1

● sys-devices-platform-serial8250-tty-ttyS2.device - /sys/devices/platform/serial8250/tty/ttyS2
     Loaded: loaded
     Active: active (plugged) since Tue 2025-11-25 09:43:56 UTC; 39min ago
      Until: Tue 2025-11-25 09:43:56 UTC; 39min ago
     Device: /sys/devices/platform/serial8250/tty/ttyS2

● sys-devices-platform-serial8250-tty-ttyS3.device - /sys/devices/platform/serial8250/tty/ttyS3
     Loaded: loaded
     Active: active (plugged) since Tue 2025-11-25 09:43:56 UTC; 39min ago
      Until: Tue 2025-11-25 09:43:56 UTC; 39min ago
     Device: /sys/devices/platform/serial8250/tty/ttyS3

● sys-devices-pnp0-00:00-tty-ttyS0.device - /sys/devices/pnp0/00:00/tty/ttyS0
     Loaded: loaded
     Active: active (plugged) since Tue 2025-11-25 09:43:56 UTC; 39min ago
      Until: Tue 2025-11-25 09:43:56 UTC; 39min ago
     Device: /sys/devices/pnp0/00:00/tty/ttyS0

● sys-devices-virtual-block-dm\x2d0.device - /sys/devices/virtual/block/dm-0
     Loaded: loaded
     Active: active (plugged) since Tue 2025-11-25 10:17:49 UTC; 5min ago
      Until: Tue 2025-11-25 10:17:49 UTC; 5min ago
     Device: /sys/devices/virtual/block/dm-0

● sys-devices-virtual-block-dm\x2d1.device - /sys/devices/virtual/block/dm-1
     Loaded: loaded
     Active: active (plugged) since Tue 2025-11-25 10:17:54 UTC; 5min ago
      Until: Tue 2025-11-25 10:17:54 UTC; 5min ago
     Device: /sys/devices/virtual/block/dm-1

● sys-devices-virtual-block-dm\x2d2.device - /sys/devices/virtual/block/dm-2
     Loaded: loaded
     Active: active (plugged) since Tue 2025-11-25 10:17:59 UTC; 5min ago
      Until: Tue 2025-11-25 10:17:59 UTC; 5min ago
     Device: /sys/devices/virtual/block/dm-2

● sys-devices-virtual-block-loop3.device - /sys/devices/virtual/block/loop3
     Loaded: loaded
     Active: active (plugged) since Tue 2025-11-25 10:17:48 UTC; 5min ago
      Until: Tue 2025-11-25 10:17:48 UTC; 5min ago
     Device: /sys/devices/virtual/block/loop3

● sys-devices-virtual-block-loop4.device - /sys/devices/virtual/block/loop4
     Loaded: loaded
     Active: active (plugged) since Tue 2025-11-25 10:17:53 UTC; 5min ago
      Until: Tue 2025-11-25 10:17:53 UTC; 5min ago
     Device: /sys/devices/virtual/block/loop4

● sys-devices-virtual-block-loop5.device - /sys/devices/virtual/block/loop5
     Loaded: loaded
     Active: active (plugged) since Tue 2025-11-25 10:17:58 UTC; 5min ago
      Until: Tue 2025-11-25 10:17:58 UTC; 5min ago
     Device: /sys/devices/virtual/block/loop5

● sys-devices-virtual-misc-rfkill.device - /sys/devices/virtual/misc/rfkill
     Loaded: loaded
     Active: active (plugged) since Tue 2025-11-25 09:43:59 UTC; 39min ago
      Until: Tue 2025-11-25 09:43:59 UTC; 39min ago
     Device: /sys/devices/virtual/misc/rfkill

● sys-devices-virtual-net-br\x2dex.device - /sys/devices/virtual/net/br-ex
     Loaded: loaded
     Active: active (plugged) since Tue 2025-11-25 10:14:58 UTC; 8min ago
      Until: Tue 2025-11-25 10:14:58 UTC; 8min ago
     Device: /sys/devices/virtual/net/br-ex

● sys-devices-virtual-net-ovs\x2dsystem.device - /sys/devices/virtual/net/ovs-system
     Loaded: loaded
     Active: active (plugged) since Tue 2025-11-25 10:14:58 UTC; 8min ago
      Until: Tue 2025-11-25 10:14:58 UTC; 8min ago
     Device: /sys/devices/virtual/net/ovs-system

● sys-devices-virtual-net-vlan20.device - /sys/devices/virtual/net/vlan20
     Loaded: loaded
     Active: active (plugged) since Tue 2025-11-25 10:14:58 UTC; 8min ago
      Until: Tue 2025-11-25 10:14:58 UTC; 8min ago
     Device: /sys/devices/virtual/net/vlan20

● sys-devices-virtual-net-vlan21.device - /sys/devices/virtual/net/vlan21
     Loaded: loaded
     Active: active (plugged) since Tue 2025-11-25 10:14:58 UTC; 8min ago
      Until: Tue 2025-11-25 10:14:58 UTC; 8min ago
     Device: /sys/devices/virtual/net/vlan21

● sys-devices-virtual-net-vlan22.device - /sys/devices/virtual/net/vlan22
     Loaded: loaded
     Active: active (plugged) since Tue 2025-11-25 10:14:58 UTC; 8min ago
      Until: Tue 2025-11-25 10:14:58 UTC; 8min ago
     Device: /sys/devices/virtual/net/vlan22

● sys-devices-virtual-net-vlan23.device - /sys/devices/virtual/net/vlan23
     Loaded: loaded
     Active: active (plugged) since Tue 2025-11-25 10:14:58 UTC; 8min ago
      Until: Tue 2025-11-25 10:14:58 UTC; 8min ago
     Device: /sys/devices/virtual/net/vlan23

● sys-module-configfs.device - /sys/module/configfs
     Loaded: loaded
     Active: active (plugged) since Tue 2025-11-25 09:43:56 UTC; 39min ago
      Until: Tue 2025-11-25 09:43:56 UTC; 39min ago
     Device: /sys/module/configfs

● sys-module-fuse.device - /sys/module/fuse
     Loaded: loaded
     Active: active (plugged) since Tue 2025-11-25 09:43:56 UTC; 39min ago
      Until: Tue 2025-11-25 09:43:56 UTC; 39min ago
     Device: /sys/module/fuse

● sys-subsystem-net-devices-br\x2dex.device - /sys/subsystem/net/devices/br-ex
     Loaded: loaded
     Active: active (plugged) sincUnit boot.mount could not be found.
e Tue 2025-11-25 10:14:58 UTC; 8min ago
      Until: Tue 2025-11-25 10:14:58 UTC; 8min ago
     Device: /sys/devices/virtual/net/br-ex

● sys-subsystem-net-devices-eth0.device - Virtio network device
     Loaded: loaded
     Active: active (plugged) since Tue 2025-11-25 09:43:56 UTC; 39min ago
      Until: Tue 2025-11-25 09:43:56 UTC; 39min ago
     Device: /sys/devices/pci0000:00/0000:00:03.0/virtio1/net/eth0

● sys-subsystem-net-devices-eth1.device - Virtio network device
     Loaded: loaded
     Active: active (plugged) since Tue 2025-11-25 09:46:14 UTC; 37min ago
      Until: Tue 2025-11-25 09:46:14 UTC; 37min ago
     Device: /sys/devices/pci0000:00/0000:00:07.0/virtio5/net/eth1

● sys-subsystem-net-devices-ovs\x2dsystem.device - /sys/subsystem/net/devices/ovs-system
     Loaded: loaded
     Active: active (plugged) since Tue 2025-11-25 10:14:58 UTC; 8min ago
      Until: Tue 2025-11-25 10:14:58 UTC; 8min ago
     Device: /sys/devices/virtual/net/ovs-system

● sys-subsystem-net-devices-vlan20.device - /sys/subsystem/net/devices/vlan20
     Loaded: loaded
     Active: active (plugged) since Tue 2025-11-25 10:14:58 UTC; 8min ago
      Until: Tue 2025-11-25 10:14:58 UTC; 8min ago
     Device: /sys/devices/virtual/net/vlan20

● sys-subsystem-net-devices-vlan21.device - /sys/subsystem/net/devices/vlan21
     Loaded: loaded
     Active: active (plugged) since Tue 2025-11-25 10:14:58 UTC; 8min ago
      Until: Tue 2025-11-25 10:14:58 UTC; 8min ago
     Device: /sys/devices/virtual/net/vlan21

● sys-subsystem-net-devices-vlan22.device - /sys/subsystem/net/devices/vlan22
     Loaded: loaded
     Active: active (plugged) since Tue 2025-11-25 10:14:58 UTC; 8min ago
      Until: Tue 2025-11-25 10:14:58 UTC; 8min ago
     Device: /sys/devices/virtual/net/vlan22

● sys-subsystem-net-devices-vlan23.device - /sys/subsystem/net/devices/vlan23
     Loaded: loaded
     Active: active (plugged) since Tue 2025-11-25 10:14:58 UTC; 8min ago
      Until: Tue 2025-11-25 10:14:58 UTC; 8min ago
     Device: /sys/devices/virtual/net/vlan23

● -.mount - Root Mount
     Loaded: loaded (/etc/fstab; generated)
     Active: active (mounted)
      Where: /
       What: /dev/vda1
       Docs: man:fstab(5)
             man:systemd-fstab-generator(8)

● dev-hugepages.mount - Huge Pages File System
     Loaded: loaded (/usr/lib/systemd/system/dev-hugepages.mount; static)
     Active: active (mounted) since Tue 2025-11-25 09:43:55 UTC; 39min ago
      Until: Tue 2025-11-25 09:43:55 UTC; 39min ago
      Where: /dev/hugepages
       What: hugetlbfs
       Docs: https://docs.kernel.org/admin-guide/mm/hugetlbpage.html
             https://www.freedesktop.org/wiki/Software/systemd/APIFileSystems
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48640)
     Memory: 16.0K (peak: 536.0K)
        CPU: 8ms
     CGroup: /dev-hugepages.mount

Nov 25 09:43:55 localhost systemd[1]: Mounted Huge Pages File System.

● dev-hugepages1G.mount - /dev/hugepages1G
     Loaded: loaded (/etc/fstab; generated)
     Active: active (mounted) since Tue 2025-11-25 10:16:55 UTC; 6min ago
      Until: Tue 2025-11-25 10:16:55 UTC; 6min ago
      Where: /dev/hugepages1G
       What: none
       Docs: man:fstab(5)
             man:systemd-fstab-generator(8)

● dev-hugepages2M.mount - /dev/hugepages2M
     Loaded: loaded (/etc/fstab; generated)
     Active: active (mounted) since Tue 2025-11-25 10:16:56 UTC; 6min ago
      Until: Tue 2025-11-25 10:16:56 UTC; 6min ago
      Where: /dev/hugepages2M
       What: none
       Docs: man:fstab(5)
             man:systemd-fstab-generator(8)

● dev-mqueue.mount - POSIX Message Queue File System
     Loaded: loaded (/usr/lib/systemd/system/dev-mqueue.mount; static)
     Active: active (mounted) since Tue 2025-11-25 09:43:55 UTC; 39min ago
      Until: Tue 2025-11-25 09:43:55 UTC; 39min ago
      Where: /dev/mqueue
       What: mqueue
       Docs: man:mq_overview(7)
             https://www.freedesktop.org/wiki/Software/systemd/APIFileSystems
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48640)
     Memory: 40Unit home.mount could not be found.
.0K (peak: 548.0K)
        CPU: 7ms
     CGroup: /dev-mqueue.mount

Nov 25 09:43:55 localhost systemd[1]: Mounted POSIX Message Queue File System.

○ proc-fs-nfsd.mount - NFSD configuration filesystem
     Loaded: loaded (/usr/lib/systemd/system/proc-fs-nfsd.mount; static)
     Active: inactive (dead)
      Where: /proc/fs/nfsd
       What: nfsd

● proc-sys-fs-binfmt_misc.mount - Arbitrary Executable File Formats File System
     Loaded: loaded (/usr/lib/systemd/system/proc-sys-fs-binfmt_misc.mount; disabled; preset: disabled)
     Active: active (mounted) since Tue 2025-11-25 10:19:41 UTC; 3min 50s ago
      Until: Tue 2025-11-25 10:19:41 UTC; 3min 50s ago
TriggeredBy: ● proc-sys-fs-binfmt_misc.automount
      Where: /proc/sys/fs/binfmt_misc
       What: binfmt_misc
       Docs: https://docs.kernel.org/admin-guide/binfmt-misc.html
             https://www.freedesktop.org/wiki/Software/systemd/APIFileSystems
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48640)
     Memory: 8.0K (peak: 548.0K)
        CPU: 6ms
     CGroup: /proc-sys-fs-binfmt_misc.mount

Nov 25 10:19:41 compute-0 systemd[1]: Mounting Arbitrary Executable File Formats File System...
Nov 25 10:19:41 compute-0 systemd[1]: Mounted Arbitrary Executable File Formats File System.

● run-credentials-systemd\x2dsysctl.service.mount - /run/credentials/systemd-sysctl.service
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Tue 2025-11-25 10:13:49 UTC; 9min ago
      Until: Tue 2025-11-25 10:13:49 UTC; 9min ago
      Where: /run/credentials/systemd-sysctl.service
       What: none

● run-credentials-systemd\x2dsysusers.service.mount - /run/credentials/systemd-sysusers.service
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Tue 2025-11-25 09:43:55 UTC; 39min ago
      Until: Tue 2025-11-25 09:43:55 UTC; 39min ago
      Where: /run/credentials/systemd-sysusers.service
       What: none

● run-credentials-systemd\x2dtmpfiles\x2dsetup.service.mount - /run/credentials/systemd-tmpfiles-setup.service
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Tue 2025-11-25 09:43:56 UTC; 39min ago
      Until: Tue 2025-11-25 09:43:56 UTC; 39min ago
      Where: /run/credentials/systemd-tmpfiles-setup.service
       What: none

● run-credentials-systemd\x2dtmpfiles\x2dsetup\x2ddev.service.mount - /run/credentials/systemd-tmpfiles-setup-dev.service
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Tue 2025-11-25 09:43:55 UTC; 39min ago
      Until: Tue 2025-11-25 09:43:55 UTC; 39min ago
      Where: /run/credentials/systemd-tmpfiles-setup-dev.service
       What: none

● run-netns.mount - /run/netns
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Tue 2025-11-25 10:16:27 UTC; 7min ago
      Until: Tue 2025-11-25 10:16:27 UTC; 7min ago
      Where: /run/netns
       What: tmpfs

● run-user-1000.mount - /run/user/1000
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Tue 2025-11-25 09:44:39 UTC; 38min ago
      Until: Tue 2025-11-25 09:44:39 UTC; 38min ago
      Where: /run/user/1000
       What: tmpfs

● run-user-42477.mount - /run/user/42477
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Tue 2025-11-25 10:19:34 UTC; 3min 57s ago
      Until: Tue 2025-11-25 10:19:34 UTC; 3min 57s ago
      Where: /run/user/42477
       What: tmpfs

● sys-fs-fuse-connections.mount - FUSE Control File System
     Loaded: loaded (/usr/lib/systemd/system/sys-fs-fuse-connections.mount; static)
     Active: active (mounted) since Tue 2025-11-25 09:43:55 UTC; 39min ago
      Until: Tue 2025-11-25 09:43:55 UTC; 39min ago
      Where: /sys/fs/fuse/connections
       What: fusectl
       Docs: https://docs.kernel.org/filesystems/fuse.html
             https://www.freedesktop.org/wiki/Software/systemd/APIFileSystems
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48640)
     Memory: 4.0K (peak: 404.0K)
        CPU: 5ms
     CGroup: /sys-fs-fuse-connections.mount

Nov 2Unit sysroot.mount could not be found.
5 09:43:55 localhost systemd[1]: Mounting FUSE Control File System...
Nov 25 09:43:55 localhost systemd[1]: Mounted FUSE Control File System.

● sys-kernel-config.mount - Kernel Configuration File System
     Loaded: loaded (/proc/self/mountinfo; static)
     Active: active (mounted) since Tue 2025-11-25 09:43:54 UTC; 39min ago
      Until: Tue 2025-11-25 09:43:54 UTC; 39min ago
      Where: /sys/kernel/config
       What: configfs
       Docs: https://docs.kernel.org/filesystems/configfs.html
             https://www.freedesktop.org/wiki/Software/systemd/APIFileSystems

● sys-kernel-debug-tracing.mount - /sys/kernel/debug/tracing
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Tue 2025-11-25 10:23:11 UTC; 20s ago
      Until: Tue 2025-11-25 10:23:11 UTC; 20s ago
      Where: /sys/kernel/debug/tracing
       What: tracefs

● sys-kernel-debug.mount - Kernel Debug File System
     Loaded: loaded (/usr/lib/systemd/system/sys-kernel-debug.mount; static)
     Active: active (mounted) since Tue 2025-11-25 09:43:55 UTC; 39min ago
      Until: Tue 2025-11-25 09:43:55 UTC; 39min ago
      Where: /sys/kernel/debug
       What: debugfs
       Docs: https://docs.kernel.org/filesystems/debugfs.html
             https://www.freedesktop.org/wiki/Software/systemd/APIFileSystems
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48640)
     Memory: 4.0K (peak: 536.0K)
        CPU: 7ms
     CGroup: /sys-kernel-debug.mount

Nov 25 09:43:55 localhost systemd[1]: Mounted Kernel Debug File System.

● sys-kernel-tracing.mount - Kernel Trace File System
     Loaded: loaded (/usr/lib/systemd/system/sys-kernel-tracing.mount; static)
     Active: active (mounted) since Tue 2025-11-25 09:43:55 UTC; 39min ago
      Until: Tue 2025-11-25 09:43:55 UTC; 39min ago
      Where: /sys/kernel/tracing
       What: tracefs
       Docs: https://docs.kernel.org/trace/ftrace.html
             https://www.freedesktop.org/wiki/Software/systemd/APIFileSystems
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48640)
     Memory: 4.0K (peak: 540.0K)
        CPU: 4ms
     CGroup: /sys-kernel-tracing.mount

Nov 25 09:43:55 localhost systemd[1]: Mounted Kernel Trace File System.

○ tmp.mount - Temporary Directory /tmp
     Loaded: loaded (/usr/lib/systemd/system/tmp.mount; disabled; preset: disabled)
     Active: inactive (dead)
      Where: /tmp
       What: tmpfs
       Docs: https://systemd.io/TEMPORARY_DIRECTORIES
             man:file-hierarchy(7)
             https://www.freedesktop.org/wiki/Software/systemd/APIFileSystems

● var-lib-containers-storage-overlay-67c782d2c906197a7dfd544bd5e51d572dec53f1f9c4065218093dc19e3a9084-merged.mount - /var/lib/containers/storage/overlay/67c782d2c906197a7dfd544bd5e51d572dec53f1f9c4065218093dc19e3a9084/merged
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Tue 2025-11-25 10:21:18 UTC; 2min 13s ago
      Until: Tue 2025-11-25 10:21:18 UTC; 2min 13s ago
      Where: /var/lib/containers/storage/overlay/67c782d2c906197a7dfd544bd5e51d572dec53f1f9c4065218093dc19e3a9084/merged
       What: overlay

● var-lib-containers-storage-overlay-72fa4d15927080a6bfe978ce93c4babbf2e3e1c37b80c9b7ccd827ac007bc61e-merged.mount - /var/lib/containers/storage/overlay/72fa4d15927080a6bfe978ce93c4babbf2e3e1c37b80c9b7ccd827ac007bc61e/merged
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Tue 2025-11-25 10:18:43 UTC; 4min 48s ago
      Until: Tue 2025-11-25 10:18:43 UTC; 4min 48s ago
      Where: /var/lib/containers/storage/overlay/72fa4d15927080a6bfe978ce93c4babbf2e3e1c37b80c9b7ccd827ac007bc61e/merged
       What: overlay

● var-lib-containers-storage-overlay-76af8162433e67772d04c0354db99e372a55ce78d641e7fbcc953696a2af5167-merged.mount - /var/lib/containers/storage/overlay/76af8162433e67772d04c0354db99e372a55ce78d641e7fbcc953696a2af5167/merged
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Tue 2025-11-25 10:20:19 UTC; 3min 12s ago
      Until: Tue 2025-11-25 10:20:19 UTC; 3min 12s ago
      Where: /var/lib/containers/storage/overlay/76af8162433e67772d04c0354db99e372a55ce78d641e7fbcc953696a2af5167/merged
       What: overlay

● var-lib-containers-storage-overlay-819907b5076fbcecf9c10f88b509373855bc61745f4e000361c237b7aea697e1-merged.mount - /var/lib/containers/storage/overlay/819907b5076fbcecf9c10f88b509373855bc61745f4e000361c237b7aea697e1/merged
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Tue 2025-11-25 10:19:52 UTC; 3min 39s ago
      Until: Tue 2025-11-25 10:19:52 UTC; 3min 39s ago
      Where: /var/lib/containers/storage/overlay/819907b5076fbcecf9c10f88b509373855bc61745f4e000361c237b7aea697e1/merged
       What: overlay

● var-lib-containers-storage-overlay-92adbd4d5894e07d30010a8caee28f8ac879df050af6996b1daece83c0878d42-merged.mount - /var/lib/containers/storage/overlay/92adbd4d5894e07d30010a8caee28f8ac879df050af6996b1daece83c0878d42/merged
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Tue 2025-11-25 10:21:16 UTC; 2min 15s ago
      Until: Tue 2025-11-25 10:21:16 UTC; 2min 15s ago
      Where: /var/lib/containers/storage/overlay/92adbd4d5894e07d30010a8caee28f8ac879df050af6996b1daece83c0878d42/merged
       What: overlay

● var-lib-containers-storage-overlay-b22b2c6c449548faa74594989808ce0615efc615c198f11cc5dd5158f09f0fae-merged.mount - /var/lib/containers/storage/overlay/b22b2c6c449548faa74594989808ce0615efc615c198f11cc5dd5158f09f0fae/merged
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Tue 2025-11-25 10:20:29 UTC; 3min 2s ago
      Until: Tue 2025-11-25 10:20:29 UTC; 3min 2s ago
      Where: /var/lib/containers/storage/overlay/b22b2c6c449548faa74594989808ce0615efc615c198f11cc5dd5158f09f0fae/merged
       What: overlay

● var-lib-containers-storage-overlay-d6fea0d0986557a5105f09ca898d2886b3fc7fe482db18909177821c96b2ac8b-merged.mount - /var/lib/containers/storage/overlay/d6fea0d0986557a5105f09ca898d2886b3fc7fe482db18909177821c96b2ac8b/merged
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Tue 2025-11-25 10:18:41 UTC; 4min 51s ago
      Until: Tue 2025-11-25 10:18:41 UTC; 4min 51s ago
      Where: /var/lib/containers/storage/overlay/d6fea0d0986557a5105f09ca898d2886b3fc7fe482db18909177821c96b2ac8b/merged
       What: overlay

● var-lib-containers-storage-overlay-e8bae958d562a85119b1955dd653806d01f7d05bc3860885bd855f978cd76108-merged.mount - /var/lib/containers/storage/overlay/e8bae958d562a85119b1955dd653806d01f7d05bc3860885bd855f978cd76108/merged
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Tue 2025-11-25 10:20:24 UTC; 3min 7s ago
      Until: Tue 2025-11-25 10:20:24 UTC; 3min 7s ago
      Where: /var/lib/containers/storage/overlay/e8bae958d562a85119b1955dd653806d01f7d05bc3860885bd855f978cd76108/merged
       What: overlay

● var-lib-containers-storage-overlay.mount - /var/lib/containers/storage/overlay
     Loaded: loaded (/proc/self/mountinfo)
     Active: active (mounted) since Tue 2025-11-25 10:18:41 UTC; 4min 51s ago
      Until: Tue 2025-11-25 10:18:41 UTC; 4min 51s ago
      Where: /var/lib/containers/storage/overlay
       What: /dev/vda1

○ var-lib-machines.mount - Virtual Machine and Container Storage (Compatibility)
     Loaded: loaded (/usr/lib/systemd/system/var-lib-machines.mount; static)
     Active: inactive (dead)
      Where: /var/lib/machines
       What: /var/lib/machines.raw

● var-lib-nfs-rpc_pipefs.mount - RPC Pipe File System
     Loaded: loaded (/proc/self/mountinfo; static)
     Active: active (mounted) since Tue 2025-11-25 09:43:54 UTC; 39min ago
      Until: Tue 2025-11-25 09:43:54 UTC; 39min ago
      Where: /var/lib/nfs/rpc_pipefs
       What: rpc_pipefs

● systemd-ask-password-console.path - Dispatch Password Requests to Console Directory Watch
     Loaded: loaded (/usr/lib/systemd/system/systemd-ask-password-console.path; static)
     Active: active (waiting) since Tue 2025-11-25 09:43:54 UTC; 39min ago
      Until: Tue 2025-11-25 09:43:54 UTC; 39min ago
   Triggers: ● systemd-ask-password-console.service
       Docs: man:systemd-ask-password-console.path(8)

● systemd-ask-password-wall.path - Forward Password Requests to Wall Directory Watch
     Loaded: loaded (/usr/lib/systemd/system/systemd-ask-password-wall.path; static)
     Active: active (waiting) since Tue 2025-11-25 09:43:54 UTC; 39min ago
      Until: Tue 2025-11-25 09:43:54 UTC; 39min ago
   Triggers: ● systemd-ask-password-wall.service
       Docs: man:systemd-ask-password-wall.path(8)

● init.scope - System and Service Manager
     Loaded: loaded
  Transient: yes
     Active: active (running) since Tue 2025-11-25 09:43:50 UTC; 39min ago
       Docs: man:systemd(1)
         IO: 472.0K read, 0B written
      Tasks: 1 (limit: 48640)
     Memory: 42.2M (peak: 60.6M)
        CPU: 28.751s
     CGroup: /init.scope
             └─1 /usr/lib/systemd/systemd --switched-root --system --deserialize 31

Nov 25 10:22:45 compute-0 systemd[1]: var-lib-containers-storage-overlay-89fe432e4be816f92b04c86830e75b38fc47de13d02f7add26c604a0e90f4941-merged.mount: Deactivated successfully.
Nov 25 10:22:45 compute-0 systemd[1]: libpod-conmon-56584dd1178cbf0ac05ccd30ccb9408f8c01a098b370e0743280ab45dfd65d4b.scope: Deactivated successfully.
Nov 25 10:22:46 compute-0 systemd[1]: Started libpod-conmon-f87ebeb79efd0301f20165a35827c52b1fe3d2422e6d2d79040e29a142d471cc.scope.
Nov 25 10:22:46 compute-0 systemd[1]: Started libcrun container.
Nov 25 10:22:47 compute-0 systemd[1]: libpod-f87ebeb79efd0301f20165a35827c52b1fe3d2422e6d2d79040e29a142d471cc.scope: Deactivated successfully.
Nov 25 10:22:47 compute-0 systemd[1]: var-lib-containers-storage-overlay-a37b201cda1e39348ff64384d03f527a19d3ae981b1104bb0b7fab9c142c620d-merged.mount: Deactivated successfully.
Nov 25 10:22:47 compute-0 systemd[1]: libpod-conmon-f87ebeb79efd0301f20165a35827c52b1fe3d2422e6d2d79040e29a142d471cc.scope: Deactivated successfully.
Nov 25 10:23:08 compute-0 systemd[1]: Started Session 33 of User zuul.
Nov 25 10:23:19 compute-0 systemd[1]: Starting Hostname Service...
Nov 25 10:23:19 compute-0 systemd[1]: Started Hostname Service.

● session-1.scope - Session 1 of User zuul
     Loaded: loaded (/run/systemd/transient/session-1.scope; transient)
  Transient: yes
     Active: active (abandoned) since Tue 2025-11-25 09:44:39 UTC; 38min ago
         IO: 0B read, 0B written
      Tasks: 1
     Memory: 16.5M (peak: 37.9M)
        CPU: 1min 15.562s
     CGroup: /user.slice/user-1000.slice/session-1.scope
             └─4518 /usr/bin/python3

Nov 25 09:47:24 np0005534776.novalocal sudo[7377]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Nov 25 09:47:24 np0005534776.novalocal python3[7379]: ansible-ansible.legacy.stat Invoked with path=/etc/ci/env/networking-info.yml follow=False get_checksum=True checksum_algorithm=sha1 get_md5=False get_mime=True get_attributes=True
Nov 25 09:47:24 np0005534776.novalocal sudo[7377]: pam_unix(sudo:session): session closed for user root
Nov 25 09:47:24 np0005534776.novalocal sudo[7450]:     zuul : PWD=/home/zuul ; USER=root ; COMMAND=/bin/sh -c 'echo BECOME-SUCCESS-bqvwnahommndmtwpdynarbdwbwdlwfsw ; OS_CLOUD=vexxhost /usr/bin/python3'
Nov 25 09:47:24 np0005534776.novalocal sudo[7450]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Nov 25 09:47:24 np0005534776.novalocal python3[7452]: ansible-ansible.legacy.copy Invoked with dest=/etc/ci/env/networking-info.yml owner=root group=root mode=0644 src=/home/zuul/.ansible/tmp/ansible-tmp-1764064044.162185-267-96322701056476/source _original_basename=tmp13_fje0a follow=False checksum=da7c40c6905431b9cf795ed51e840f01f3f7d951 backup=False force=True unsafe_writes=False content=NOT_LOGGING_PARAMETER validate=None directory_mode=None remote_src=None local_follow=None seuser=None serole=None selevel=None setype=None attributes=None
Nov 25 09:47:24 np0005534776.novalocal sudo[7450]: pam_unix(sudo:session): session closed for user root
Nov 25 09:48:24 np0005534776.novalocal sshd-session[4317]: Received disconnect from 38.102.83.114 port 48690:11: disconnected by user
Nov 25 09:48:24 np0005534776.novalocal sshd-session[4317]: Disconnected from user zuul 38.102.83.114 port 48690
Nov 25 09:48:24 np0005534776.novalocal sshd-session[4303]: pam_unix(sshd:session): session closed for user zuul

● session-17.scope - Session 17 of User zuul
     Loaded: loaded (/run/systemd/transient/session-17.scope; transient)
  Transient: yes
     Active: active (running) since Tue 2025-11-25 10:17:39 UTC; 5min ago
         IO: 22.8M read, 1.6G written
      Tasks: 2
     Memory: 1.2G (peak: 1.7G)
        CPU: 1min 22.010s
     CGroup: /user.slice/user-1000.slice/session-17.scope
             ├─71412 "sshd-session: zuul [priv]"
             └─71415 "sshd-session: zuul@notty"

Nov 25 10:21:33 compute-0 sudo[103609]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Nov 25 10:21:33 compute-0 python3[103614]: ansible-ansible.legacy.command Invoked with _raw_params=podman run --rm --net=host --ipc=host   --volume /etc/ceph:/etc/ceph:z --volume /home/ceph-admin/assimilate_ceph.conf:/home/assimilate_ceph.conf:z    --entrypoint ceph quay.io/ceph/ceph:v18 --fsid 86651951-d7ad-54ed-8d1a-1eae0c6b599c -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring   versions -f json _uses_shell=False stdin_add_newline=True strip_empty_ends=True argv=None chdir=None executable=None creates=None removes=None stdin=None
Nov 25 10:21:33 compute-0 podman[103643]: 2025-11-25 10:21:33.407109233 +0000 UTC m=+0.061580068 container create e598db5de9f634ee14f58ccec8aec9e3c39459ce19a85ffa417416c9bddb013e (image=quay.io/ceph/ceph:v18, name=naughty_feistel, CEPH_REF=reef, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, OSD_FLAVOR=default, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Nov 25 10:21:33 compute-0 podman[103643]: 2025-11-25 10:21:33.370574541 +0000 UTC m=+0.025045416 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph:v18
Nov 25 10:21:33 compute-0 podman[103643]: 2025-11-25 10:21:33.485730475 +0000 UTC m=+0.140201290 container init e598db5de9f634ee14f58ccec8aec9e3c39459ce19a85ffa417416c9bddb013e (image=quay.io/ceph/ceph:v18, name=naughty_feistel, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, org.label-schema.license=GPLv2, OSD_FLAVOR=default)
Nov 25 10:21:33 compute-0 podman[103643]: 2025-11-25 10:21:33.492222997 +0000 UTC m=+0.146693792 container start e598db5de9f634ee14f58ccec8aec9e3c39459ce19a85ffa417416c9bddb013e (image=quay.io/ceph/ceph:v18, name=naughty_feistel, io.buildah.version=1.39.3, ceph=True, CEPH_REF=reef, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.license=GPLv2)
Nov 25 10:21:33 compute-0 podman[103643]: 2025-11-25 10:21:33.495572851 +0000 UTC m=+0.150043646 container attach e598db5de9f634ee14f58ccec8aec9e3c39459ce19a85ffa417416c9bddb013e (image=quay.io/ceph/ceph:v18, name=naughty_feistel, io.buildah.version=1.39.3, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Nov 25 10:21:34 compute-0 podman[103643]: 2025-11-25 10:21:34.096737857 +0000 UTC m=+0.751208652 container died e598db5de9f634ee14f58ccec8aec9e3c39459ce19a85ffa417416c9bddb013e (image=quay.io/ceph/ceph:v18, name=naughty_feistel, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Nov 25 10:21:34 compute-0 podman[103643]: 2025-11-25 10:21:34.133680889 +0000 UTC m=+0.788151684 container remove e598db5de9f634ee14f58ccec8aec9e3c39459ce19a85ffa417416c9bddb013e (image=quay.io/ceph/ceph:v18, name=naughty_feistel, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, ceph=True, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Nov 25 10:21:34 compute-0 sudo[103609]: pam_unix(sudo:session): session closed for user root

● session-20.scope - Session 20 of User ceph-admin
     Loaded: loaded (/run/systemd/transient/session-20.scope; transient)
  Transient: yes
     Active: active (running) since Tue 2025-11-25 10:19:34 UTC; 3min 57s ago
         IO: 0B read, 0B written
      Tasks: 2
     Memory: 1.0M (peak: 1.3M)
        CPU: 11ms
     CGroup: /user.slice/user-42477.slice/session-20.scope
             ├─76729 "sshd-session: ceph-admin [priv]"
             └─76752 "sshd-session: ceph-admin"

Nov 25 10:19:34 compute-0 systemd[1]: Started Session 20 of User ceph-admin.

● session-22.scope - Session 22 of User ceph-admin
     Loaded: loaded (/run/systemd/transient/session-22.scope; transient)
  Transient: yes
     Active: active (running) since Tue 2025-11-25 10:19:34 UTC; 3min 57s ago
         IO: 0B read, 0B written
      Tasks: 2
     Memory: 1.1M (peak: 4.4M)
        CPU: 114ms
     CGroup: /user.slice/user-42477.slice/session-22.scope
             ├─76747 "sshd-session: ceph-admin [priv]"
             └─76753 "sshd-session: ceph-admin@notty"

Nov 25 10:19:34 compute-0 systemd[1]: Started Session 22 of User ceph-admin.
Nov 25 10:19:34 compute-0 sudo[76754]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Nov 25 10:19:34 compute-0 sudo[76754]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Nov 25 10:19:34 compute-0 sudo[76754]: pam_unix(sudo:session): session closed for user root
Nov 25 10:19:34 compute-0 sudo[76779]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/which python3
Nov 25 10:19:34 compute-0 sudo[76779]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Nov 25 10:19:34 compute-0 sudo[76779]: pam_unix(sudo:session): session closed for user root

● session-23.scope - Session 23 of User ceph-admin
     Loaded: loaded (/run/systemd/transient/session-23.scope; transient)
  Transient: yes
     Active: active (running) since Tue 2025-11-25 10:19:34 UTC; 3min 57s ago
         IO: 0B read, 0B written
      Tasks: 2
     Memory: 1.1M (peak: 5.9M)
        CPU: 135ms
     CGroup: /user.slice/user-42477.slice/session-23.scope
             ├─76804 "sshd-session: ceph-admin [priv]"
             └─76807 "sshd-session: ceph-admin@notty"

Nov 25 10:19:34 compute-0 systemd[1]: Started Session 23 of User ceph-admin.
Nov 25 10:19:34 compute-0 sudo[76808]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Nov 25 10:19:34 compute-0 sudo[76808]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Nov 25 10:19:34 compute-0 sudo[76808]: pam_unix(sudo:session): session closed for user root
Nov 25 10:19:34 compute-0 sudo[76833]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/python3 /var/lib/ceph/86651951-d7ad-54ed-8d1a-1eae0c6b599c/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d --timeout 895 check-host --expect-hostname compute-0
Nov 25 10:19:34 compute-0 sudo[76833]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Nov 25 10:19:34 compute-0 sudo[76833]: pam_unix(sudo:session): session closed for user root

● session-24.scope - Session 24 of User ceph-admin
     Loaded: loaded (/run/systemd/transient/session-24.scope; transient)
  Transient: yes
     Active: active (running) since Tue 2025-11-25 10:19:35 UTC; 3min 57s ago
         IO: 0B read, 0B written
      Tasks: 2
     Memory: 1.1M (peak: 4.1M)
        CPU: 122ms
     CGroup: /user.slice/user-42477.slice/session-24.scope
             ├─76858 "sshd-session: ceph-admin [priv]"
             └─76861 "sshd-session: ceph-admin@notty"

Nov 25 10:19:35 compute-0 systemd[1]: Started Session 24 of User ceph-admin.
Nov 25 10:19:35 compute-0 sudo[76862]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Nov 25 10:19:35 compute-0 sudo[76862]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Nov 25 10:19:35 compute-0 sudo[76862]: pam_unix(sudo:session): session closed for user root
Nov 25 10:19:35 compute-0 sudo[76887]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/ls /var/lib/ceph/86651951-d7ad-54ed-8d1a-1eae0c6b599c/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d
Nov 25 10:19:35 compute-0 sudo[76887]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Nov 25 10:19:35 compute-0 sudo[76887]: pam_unix(sudo:session): session closed for user root

● session-25.scope - Session 25 of User ceph-admin
     Loaded: loaded (/run/systemd/transient/session-25.scope; transient)
  Transient: yes
     Active: active (running) since Tue 2025-11-25 10:19:35 UTC; 3min 56s ago
         IO: 0B read, 0B written
      Tasks: 2
     Memory: 1.1M (peak: 4.2M)
        CPU: 132ms
     CGroup: /user.slice/user-42477.slice/session-25.scope
             ├─76912 "sshd-session: ceph-admin [priv]"
             └─76915 "sshd-session: ceph-admin@notty"

Nov 25 10:19:35 compute-0 systemd[1]: Started Session 25 of User ceph-admin.
Nov 25 10:19:35 compute-0 sudo[76916]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Nov 25 10:19:35 compute-0 sudo[76916]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Nov 25 10:19:35 compute-0 sudo[76916]: pam_unix(sudo:session): session closed for user root
Nov 25 10:19:35 compute-0 sudo[76941]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/mkdir -p /var/lib/ceph/86651951-d7ad-54ed-8d1a-1eae0c6b599c
Nov 25 10:19:35 compute-0 sudo[76941]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Nov 25 10:19:35 compute-0 sudo[76941]: pam_unix(sudo:session): session closed for user root

● session-26.scope - Session 26 of User ceph-admin
     Loaded: loaded (/run/systemd/transient/session-26.scope; transient)
  Transient: yes
     Active: active (running) since Tue 2025-11-25 10:19:35 UTC; 3min 56s ago
         IO: 0B read, 0B written
      Tasks: 2
     Memory: 1.1M (peak: 4.6M)
        CPU: 128ms
     CGroup: /user.slice/user-42477.slice/session-26.scope
             ├─76966 "sshd-session: ceph-admin [priv]"
             └─76969 "sshd-session: ceph-admin@notty"

Nov 25 10:19:35 compute-0 systemd[1]: Started Session 26 of User ceph-admin.
Nov 25 10:19:35 compute-0 sudo[76970]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Nov 25 10:19:35 compute-0 sudo[76970]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Nov 25 10:19:35 compute-0 sudo[76970]: pam_unix(sudo:session): session closed for user root
Nov 25 10:19:35 compute-0 sudo[76995]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/mkdir -p /tmp/cephadm-86651951-d7ad-54ed-8d1a-1eae0c6b599c/var/lib/ceph/86651951-d7ad-54ed-8d1a-1eae0c6b599c
Nov 25 10:19:35 compute-0 sudo[76995]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Nov 25 10:19:35 compute-0 sudo[76995]: pam_unix(sudo:session): session closed for user root

● session-27.scope - Session 27 of User ceph-admin
     Loaded: loaded (/run/systemd/transient/session-27.scope; transient)
  Transient: yes
     Active: active (running) since Tue 2025-11-25 10:19:36 UTC; 3min 56s ago
         IO: 0B read, 0B written
      Tasks: 2
     Memory: 1.1M (peak: 4.4M)
        CPU: 159ms
     CGroup: /user.slice/user-42477.slice/session-27.scope
             ├─77020 "sshd-session: ceph-admin [priv]"
             └─77023 "sshd-session: ceph-admin@notty"

Nov 25 10:19:36 compute-0 systemd[1]: Started Session 27 of User ceph-admin.
Nov 25 10:19:36 compute-0 sudo[77024]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Nov 25 10:19:36 compute-0 sudo[77024]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Nov 25 10:19:36 compute-0 sudo[77024]: pam_unix(sudo:session): session closed for user root
Nov 25 10:19:36 compute-0 sudo[77049]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/touch /tmp/cephadm-86651951-d7ad-54ed-8d1a-1eae0c6b599c/var/lib/ceph/86651951-d7ad-54ed-8d1a-1eae0c6b599c/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d.new
Nov 25 10:19:36 compute-0 sudo[77049]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Nov 25 10:19:36 compute-0 sudo[77049]: pam_unix(sudo:session): session closed for user root

● session-28.scope - Session 28 of User ceph-admin
     Loaded: loaded (/run/systemd/transient/session-28.scope; transient)
  Transient: yes
     Active: active (running) since Tue 2025-11-25 10:19:36 UTC; 3min 55s ago
         IO: 0B read, 0B written
      Tasks: 2
     Memory: 1.1M (peak: 4.1M)
        CPU: 118ms
     CGroup: /user.slice/user-42477.slice/session-28.scope
             ├─77074 "sshd-session: ceph-admin [priv]"
             └─77077 "sshd-session: ceph-admin@notty"

Nov 25 10:19:36 compute-0 systemd[1]: Started Session 28 of User ceph-admin.
Nov 25 10:19:36 compute-0 sudo[77078]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Nov 25 10:19:36 compute-0 sudo[77078]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Nov 25 10:19:36 compute-0 sudo[77078]: pam_unix(sudo:session): session closed for user root
Nov 25 10:19:36 compute-0 sudo[77103]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/chown -R ceph-admin /tmp/cephadm-86651951-d7ad-54ed-8d1a-1eae0c6b599c
Nov 25 10:19:36 compute-0 sudo[77103]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Nov 25 10:19:36 compute-0 sudo[77103]: pam_unix(sudo:session): session closed for user root

● session-29.scope - Session 29 of User ceph-admin
     Loaded: loaded (/run/systemd/transient/session-29.scope; transient)
  Transient: yes
     Active: active (running) since Tue 2025-11-25 10:19:36 UTC; 3min 55s ago
         IO: 0B read, 0B written
      Tasks: 2
     Memory: 1.1M (peak: 4.4M)
        CPU: 115ms
     CGroup: /user.slice/user-42477.slice/session-29.scope
             ├─77128 "sshd-session: ceph-admin [priv]"
             └─77131 "sshd-session: ceph-admin@notty"

Nov 25 10:19:36 compute-0 systemd[1]: Started Session 29 of User ceph-admin.
Nov 25 10:19:37 compute-0 sudo[77132]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Nov 25 10:19:37 compute-0 sudo[77132]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Nov 25 10:19:37 compute-0 sudo[77132]: pam_unix(sudo:session): session closed for user root
Nov 25 10:19:37 compute-0 sudo[77157]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/chmod 644 /tmp/cephadm-86651951-d7ad-54ed-8d1a-1eae0c6b599c/var/lib/ceph/86651951-d7ad-54ed-8d1a-1eae0c6b599c/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d.new
Nov 25 10:19:37 compute-0 sudo[77157]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Nov 25 10:19:37 compute-0 sudo[77157]: pam_unix(sudo:session): session closed for user root

● session-30.scope - Session 30 of User ceph-admin
     Loaded: loaded (/run/systemd/transient/session-30.scope; transient)
  Transient: yes
     Active: active (running) since Tue 2025-11-25 10:19:37 UTC; 3min 54s ago
         IO: 0B read, 216.0K written
      Tasks: 2
     Memory: 1.4M (peak: 3.5M)
        CPU: 57ms
     CGroup: /user.slice/user-42477.slice/session-30.scope
             ├─77182 "sshd-session: ceph-admin [priv]"
             └─77185 "sshd-session: ceph-admin@notty"

Nov 25 10:19:37 compute-0 systemd[1]: Started Session 30 of User ceph-admin.

● session-31.scope - Session 31 of User ceph-admin
     Loaded: loaded (/run/systemd/transient/session-31.scope; transient)
  Transient: yes
     Active: active (running) since Tue 2025-11-25 10:19:37 UTC; 3min 54s ago
         IO: 0B read, 0B written
      Tasks: 2
     Memory: 1.1M (peak: 4.4M)
        CPU: 119ms
     CGroup: /user.slice/user-42477.slice/session-31.scope
             ├─77209 "sshd-session: ceph-admin [priv]"
             └─77212 "sshd-session: ceph-admin@notty"

Nov 25 10:19:37 compute-0 systemd[1]: Started Session 31 of User ceph-admin.
Nov 25 10:19:37 compute-0 sudo[77213]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Nov 25 10:19:37 compute-0 sudo[77213]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Nov 25 10:19:37 compute-0 sudo[77213]: pam_unix(sudo:session): session closed for user root
Nov 25 10:19:38 compute-0 sudo[77238]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/mv /tmp/cephadm-86651951-d7ad-54ed-8d1a-1eae0c6b599c/var/lib/ceph/86651951-d7ad-54ed-8d1a-1eae0c6b599c/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d.new /var/lib/ceph/86651951-d7ad-54ed-8d1a-1eae0c6b599c/cephadm.31206ab20142c8051b6384b731ef7ef7af2407447fac35b7291e90720452ed8d
Nov 25 10:19:38 compute-0 sudo[77238]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Nov 25 10:19:38 compute-0 sudo[77238]: pam_unix(sudo:session): session closed for user root

● session-32.scope - Session 32 of User ceph-admin
     Loaded: loaded (/run/systemd/transient/session-32.scope; transient)
  Transient: yes
     Active: active (running) since Tue 2025-11-25 10:19:38 UTC; 3min 54s ago
         IO: 48.0K read, 34.5M written
      Tasks: 2
     Memory: 3.6M (peak: 64.8M)
        CPU: 56.590s
     CGroup: /user.slice/user-42477.slice/session-32.scope
             ├─77263 "sshd-session: ceph-admin [priv]"
             └─77266 "sshd-session: ceph-admin@notty"

Nov 25 10:22:46 compute-0 podman[105409]: 2025-11-25 10:22:45.987120009 +0000 UTC m=+0.020799455 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Nov 25 10:22:46 compute-0 podman[105409]: 2025-11-25 10:22:46.086649459 +0000 UTC m=+0.120328915 container start f87ebeb79efd0301f20165a35827c52b1fe3d2422e6d2d79040e29a142d471cc (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=xenodochial_blackburn, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef, org.label-schema.schema-version=1.0)
Nov 25 10:22:46 compute-0 podman[105409]: 2025-11-25 10:22:46.090957435 +0000 UTC m=+0.124636861 container attach f87ebeb79efd0301f20165a35827c52b1fe3d2422e6d2d79040e29a142d471cc (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=xenodochial_blackburn, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2)
Nov 25 10:22:47 compute-0 sudo[105305]: pam_unix(sudo:session): session closed for user root
Nov 25 10:22:47 compute-0 sudo[105471]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Nov 25 10:22:47 compute-0 sudo[105471]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Nov 25 10:22:47 compute-0 sudo[105471]: pam_unix(sudo:session): session closed for user root
Nov 25 10:22:47 compute-0 sudo[105496]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/ls /etc/sysctl.d
Nov 25 10:22:47 compute-0 sudo[105496]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Nov 25 10:22:47 compute-0 sudo[105496]: pam_unix(sudo:session): session closed for user root

● session-33.scope - Session 33 of User zuul
     Loaded: loaded (/run/systemd/transient/session-33.scope; transient)
  Transient: yes
     Active: active (running) since Tue 2025-11-25 10:23:08 UTC; 24s ago
         IO: 97.6M read, 11.6M written
      Tasks: 15
     Memory: 381.2M (peak: 474.8M)
        CPU: 56.981s
     CGroup: /user.slice/user-1000.slice/session-33.scope
             ├─105525 "sshd-session: zuul [priv]"
             ├─105528 "sshd-session: zuul@notty"
             ├─105529 sudo bash -c "rm -rf /var/tmp/sos-osp && mkdir /var/tmp/sos-osp && sos report --batch --all-logs --tmp-dir=/var/tmp/sos-osp  -p container,openstack_edpm,system,storage,virt"
             ├─105553 /usr/bin/python3 -s /sbin/sos report --batch --all-logs --tmp-dir=/var/tmp/sos-osp -p container,openstack_edpm,system,storage,virt
             ├─108355 timeout 15s turbostat --deUnit apt-daily.service could not be found.
Unit auto-cpufreq.service could not be found.
Unit autofs.service could not be found.
bug sleep 10
             ├─108762 timeout 300s systemctl status --all
             ├─108763 systemctl status --all
             ├─108764 timeout 300s semanage interface -l
             └─108765 /usr/bin/python3 -EsI /usr/sbin/semanage interface -l

Nov 25 10:23:08 compute-0 systemd[1]: Started Session 33 of User zuul.
Nov 25 10:23:08 compute-0 sudo[105529]:     zuul : PWD=/home/zuul ; USER=root ; COMMAND=/bin/bash -c 'rm -rf /var/tmp/sos-osp && mkdir /var/tmp/sos-osp && sos report --batch --all-logs --tmp-dir=/var/tmp/sos-osp  -p container,openstack_edpm,system,storage,virt'
Nov 25 10:23:08 compute-0 sudo[105529]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Nov 25 10:23:12 compute-0 ovs-vsctl[105728]: ovs|00001|db_ctl_base|ERR|no key "dpdk-init" in Open_vSwitch record "." column other_config
Nov 25 10:23:23 compute-0 ovs-appctl[107646]: ovs|00001|daemon_unix|WARN|/var/run/openvswitch/ovs-monitor-ipsec.pid: open: No such file or directory
Nov 25 10:23:23 compute-0 ovs-appctl[107653]: ovs|00001|daemon_unix|WARN|/var/run/openvswitch/ovs-monitor-ipsec.pid: open: No such file or directory
Nov 25 10:23:23 compute-0 ovs-appctl[107658]: ovs|00001|daemon_unix|WARN|/var/run/openvswitch/ovs-monitor-ipsec.pid: open: No such file or directory

● auditd.service - Security Auditing Service
     Loaded: loaded (/usr/lib/systemd/system/auditd.service; enabled; preset: enabled)
     Active: active (running) since Tue 2025-11-25 09:43:55 UTC; 39min ago
       Docs: man:auditd(8)
             https://github.com/linux-audit/audit-documentation
   Main PID: 700 (auditd)
         IO: 0B read, 8.6M written
      Tasks: 4 (limit: 48640)
     Memory: 8.9M (peak: 9.4M)
        CPU: 1.884s
     CGroup: /system.slice/auditd.service
             ├─700 /sbin/auditd
             └─702 /usr/sbin/sedispatch

Nov 25 09:43:55 localhost augenrules[720]: enabled 1
Nov 25 09:43:55 localhost augenrules[720]: failure 1
Nov 25 09:43:55 localhost augenrules[720]: pid 700
Nov 25 09:43:55 localhost augenrules[720]: rate_limit 0
Nov 25 09:43:55 localhost augenrules[720]: backlog_limit 8192
Nov 25 09:43:55 localhost augenrules[720]: lost 0
Nov 25 09:43:55 localhost augenrules[720]: backlog 0
Nov 25 09:43:55 localhost augenrules[720]: backlog_wait_time 60000
Nov 25 09:43:55 localhost augenrules[720]: backlog_wait_time_actual 0
Nov 25 09:43:55 localhost systemd[1]: Started Security Auditing Service.

○ auth-rpcgss-module.service - Kernel Module supporting RPCSEC_GSS
     Loaded: loaded (/usr/lib/systemd/system/auth-rpcgss-module.service; static)
     Active: inactive (dead)
  Condition: start condition failed at Tue 2025-11-25 09:43:55 UTC; 39min ago

● ceph-86651951-d7ad-54ed-8d1a-1eae0c6b599c@crash.compute-0.service - Ceph crash.compute-0 for 86651951-d7ad-54ed-8d1a-1eae0c6b599c
     Loaded: loaded (/etc/systemd/system/ceph-86651951-d7ad-54ed-8d1a-1eae0c6b599c@.service; enabled; preset: disabled)
     Active: active (running) since Tue 2025-11-25 10:19:52 UTC; 3min 39s ago
   Main PID: 82982 (conmon)
         IO: 0B read, 1.1M written
      Tasks: 3 (limit: 48640)
     Memory: 12.2M (peak: 33.6M)
        CPU: 713ms
     CGroup: /system.slice/system-ceph\x2d86651951\x2dd7ad\x2d54ed\x2d8d1a\x2d1eae0c6b599c.slice/ceph-86651951-d7ad-54ed-8d1a-1eae0c6b599c@crash.compute-0.service
             ├─libpod-payload-b14c4d0f9a091cf8a47fce60a36e80426e9a0297b91362c53f6634918d26917f
             │ ├─82984 /run/podman-init -- /usr/bin/ceph-crash -n client.crash.compute-0
             │ └─82986 /usr/bin/python3 -s /usr/bin/ceph-crash -n client.crash.compute-0
             └─runtime
               └─82982 /usr/bin/conmon --api-version 1 -c b14c4d0f9a091cf8a47fce60a36e80426e9a0297b91362c53f6634918d26917f -u b14c4d0f9a091cf8a47fce60a36e80426e9a0297b91362c53f6634918d26917f -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/b14c4d0f9a091cf8a47fce60a36e80426e9a0297b91362c53f6634918d26917f/userdata -p /run/containers/storage/overlay-containers/b14c4d0f9a091cf8a47fce60a36e80426e9a0297b91362c53f6634918d26917f/userdata/pidfile -n ceph-86651951-d7ad-54ed-8d1a-1eae0c6b599c-crash-compute-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/b14c4d0f9a091cf8a47fce60a36e80426e9a0297b91362c53f6634918d26917f --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/b14c4d0f9a091cf8a47fce60a36e80426e9a0297b91362c53f6634918d26917f/userdata/oci-log --conmon-pidfile /run/ceph-86651951-d7ad-54ed-8d1a-1eae0c6b599c@crash.compute-0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg b14c4d0f9a091cf8a47fce60a36e80426e9a0297b91362c53f6634918d26917f

Nov 25 10:19:52 compute-0 systemd[1]: Started Ceph crash.compute-0 for 86651951-d7ad-54ed-8d1a-1eae0c6b599c.
Nov 25 10:19:52 compute-0 ceph-86651951-d7ad-54ed-8d1a-1eae0c6b599c-crash-compute-0[82982]: INFO:ceph-crash:pinging cluster to exercise our key
Nov 25 10:19:53 compute-0 ceph-86651951-d7ad-54ed-8d1a-1eae0c6b599c-crash-compute-0[82982]: 2025-11-25T10:19:53.119+0000 7f0c3bca2640 -1 auth: unable to find a keyring on /etc/ceph/ceph.client.admin.keyring,/etc/ceph/ceph.keyring,/etc/ceph/keyring,/etc/ceph/keyring.bin: (2) No such file or directory
Nov 25 10:19:53 compute-0 ceph-86651951-d7ad-54ed-8d1a-1eae0c6b599c-crash-compute-0[82982]: 2025-11-25T10:19:53.119+0000 7f0c3bca2640 -1 AuthRegistry(0x7f0c34067cf0) no keyring found at /etc/ceph/ceph.client.admin.keyring,/etc/ceph/ceph.keyring,/etc/ceph/keyring,/etc/ceph/keyring.bin, disabling cephx
Nov 25 10:19:53 compute-0 ceph-86651951-d7ad-54ed-8d1a-1eae0c6b599c-crash-compute-0[82982]: 2025-11-25T10:19:53.120+0000 7f0c3bca2640 -1 auth: unable to find a keyring on /etc/ceph/ceph.client.admin.keyring,/etc/ceph/ceph.keyring,/etc/ceph/keyring,/etc/ceph/keyring.bin: (2) No such file or directory
Nov 25 10:19:53 compute-0 ceph-86651951-d7ad-54ed-8d1a-1eae0c6b599c-crash-compute-0[82982]: 2025-11-25T10:19:53.120+0000 7f0c3bca2640 -1 AuthRegistry(0x7f0c3bca1000) no keyring found at /etc/ceph/ceph.client.admin.keyring,/etc/ceph/ceph.keyring,/etc/ceph/keyring,/etc/ceph/keyring.bin, disabling cephx
Nov 25 10:19:53 compute-0 ceph-86651951-d7ad-54ed-8d1a-1eae0c6b599c-crash-compute-0[82982]: 2025-11-25T10:19:53.121+0000 7f0c39a17640 -1 monclient(hunting): handle_auth_bad_method server allowed_methods [2] but i only support [1]
Nov 25 10:19:53 compute-0 ceph-86651951-d7ad-54ed-8d1a-1eae0c6b599c-crash-compute-0[82982]: 2025-11-25T10:19:53.121+0000 7f0c3bca2640 -1 monclient: authenticate NOTE: no keyring found; disabled cephx authentication
Nov 25 10:19:53 compute-0 ceph-86651951-d7ad-54ed-8d1a-1eae0c6b599c-crash-compute-0[82982]: [errno 13] RADOS permission denied (error connecting to the cluster)
Nov 25 10:19:53 compute-0 ceph-86651951-d7ad-54ed-8d1a-1eae0c6b599c-crash-compute-0[82982]: INFO:ceph-crash:monitoring path /var/lib/ceph/crash, delay 600s

● ceph-86651951-d7ad-54ed-8d1a-1eae0c6b599c@mds.cephfs.compute-0.avwmrm.service - Ceph mds.cephfs.compute-0.avwmrm for 86651951-d7ad-54ed-8d1a-1eae0c6b599c
     Loaded: loaded (/etc/systemd/system/ceph-86651951-d7ad-54ed-8d1a-1eae0c6b599c@.service; enabled; preset: disabled)
     Active: active (running) since Tue 2025-11-25 10:21:18 UTC; 2min 13s ago
    Process: 101280 ExecStartPre=/bin/rm -f /run/ceph-86651951-d7ad-54ed-8d1a-1eae0c6b599c@mds.cephfs.compute-0.avwmrm.service-pid /run/ceph-86651951-d7ad-54ed-8d1a-1eae0c6b599c@mds.cephfs.compute-0.avwmrm.service-cid (code=exited, status=0/SUCCESS)
    Process: 101281 ExecStart=/bin/bash /var/lib/ceph/86651951-d7ad-54ed-8d1a-1eae0c6b599c/mds.cephfs.compute-0.avwmrm/unit.run (code=exited, status=0/SUCCESS)
   Main PID: 101344 (conmon)
         IO: 0B read, 175.0K written
      Tasks: 28 (limit: 48640)
     Memory: 18.4M (peak: 20.0M)
        CPU: 606ms
     CGroup: /system.slice/system-ceph\x2d86651951\x2dd7ad\x2d54ed\x2d8d1a\x2d1eae0c6b599c.slice/ceph-86651951-d7ad-54ed-8d1a-1eae0c6b599c@mds.cephfs.compute-0.avwmrm.service
             ├─libpod-payload-ada03e038ff7afa81ce99514785a1f7f23ec1c616e608a0488afc3c61f4ad270
             │ ├─101346 /run/podman-init -- /usr/bin/ceph-mds -n mds.cephfs.compute-0.avwmrm -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ └─101348 /usr/bin/ceph-mds -n mds.cephfs.compute-0.avwmrm -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             └─runtime
               └─101344 /usr/bin/conmon --api-version 1 -c ada03e038ff7afa81ce99514785a1f7f23ec1c616e608a0488afc3c61f4ad270 -u ada03e038ff7afa81ce99514785a1f7f23ec1c616e608a0488afc3c61f4ad270 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/ada03e038ff7afa81ce99514785a1f7f23ec1c616e608a0488afc3c61f4ad270/userdata -p /run/containers/storage/overlay-containers/ada03e038ff7afa81ce99514785a1f7f23ec1c616e608a0488afc3c61f4ad270/userdata/pidfile -n ceph-86651951-d7ad-54ed-8d1a-1eae0c6b599c-mds-cephfs-compute-0-avwmrm --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/ada03e038ff7afa81ce99514785a1f7f23ec1c616e608a0488afc3c61f4ad270 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/ada03e038ff7afa81ce99514785a1f7f23ec1c616e608a0488afc3c61f4ad270/userdata/oci-log --conmon-pidfile /run/ceph-86651951-d7ad-54ed-8d1a-1eae0c6b599c@mds.cephfs.compute-0.avwmrm.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg ada03e038ff7afa81ce99514785a1f7f23ec1c616e608a0488afc3c61f4ad270

Nov 25 10:21:20 compute-0 ceph-mds[101348]: mds.0.cache creating system inode with ino:0x606
Nov 25 10:21:20 compute-0 ceph-mds[101348]: mds.0.cache creating system inode with ino:0x607
Nov 25 10:21:20 compute-0 ceph-mds[101348]: mds.0.cache creating system inode with ino:0x608
Nov 25 10:21:20 compute-0 ceph-mds[101348]: mds.0.cache creating system inode with ino:0x609
Nov 25 10:21:20 compute-0 ceph-mds[101348]: mds.0.4 creating_done
Nov 25 10:21:21 compute-0 ceph-mds[101348]: mds.cephfs.compute-0.avwmrm Updating MDS map to version 5 from mon.0
Nov 25 10:21:21 compute-0 ceph-mds[101348]: mds.0.4 handle_mds_map i am now mds.0.4
Nov 25 10:21:21 compute-0 ceph-mds[101348]: mds.0.4 handle_mds_map state change up:creating --> up:active
Nov 25 10:21:21 compute-0 ceph-mds[101348]: mds.0.4 recovery_done -- successful recovery!
Nov 25 10:21:21 compute-0 ceph-mds[101348]: mds.0.4 active_start

● ceph-86651951-d7ad-54ed-8d1a-1eae0c6b599c@mgr.compute-0.oomwtk.service - Ceph mgr.compute-0.oomwtk for 86651951-d7ad-54ed-8d1a-1eae0c6b599c
     Loaded: loaded (/etc/systemd/system/ceph-86651951-d7ad-54ed-8d1a-1eae0c6b599c@.service; enabled; preset: disabled)
     Active: active (running) since Tue 2025-11-25 10:18:43 UTC; 4min 48s ago
   Main PID: 75452 (conmon)
         IO: 0B read, 411.5K written
      Tasks: 149 (limit: 48640)
     Memory: 510.0M (peak: 510.7M)
        CPU: 49.874s
     CGroup: /system.slice/system-ceph\x2d86651951\x2dd7ad\x2d54ed\x2d8d1a\x2d1eae0c6b599c.slice/ceph-86651951-d7ad-54ed-8d1a-1eae0c6b599c@mgr.compute-0.oomwtk.service
             ├─libpod-payload-50fa81e3de3bbed9014b0ada17a3661d687d8263d45aa4c2cb331c53aa6e66c5
             │ ├─75454 /run/podman-init -- /usr/bin/ceph-mgr -n mgr.compute-0.oomwtk -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ └─75456 /usr/bin/ceph-mgr -n mgr.compute-0.oomwtk -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             └─runtime
               └─75452 /usr/bin/conmon --api-version 1 -c 50fa81e3de3bbed9014b0ada17a3661d687d8263d45aa4c2cb331c53aa6e66c5 -u 50fa81e3de3bbed9014b0ada17a3661d687d8263d45aa4c2cb331c53aa6e66c5 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/50fa81e3de3bbed9014b0ada17a3661d687d8263d45aa4c2cb331c53aa6e66c5/userdata -p /run/containers/storage/overlay-containers/50fa81e3de3bbed9014b0ada17a3661d687d8263d45aa4c2cb331c53aa6e66c5/userdata/pidfile -n ceph-86651951-d7ad-54ed-8d1a-1eae0c6b599c-mgr-compute-0-oomwtk --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/50fa81e3de3bbed9014b0ada17a3661d687d8263d45aa4c2cb331c53aa6e66c5 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/50fa81e3de3bbed9014b0ada17a3661d687d8263d45aa4c2cb331c53aa6e66c5/userdata/oci-log --conmon-pidfile /run/ceph-86651951-d7ad-54ed-8d1a-1eae0c6b599c@mgr.compute-0.oomwtk.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 50fa81e3de3bbed9014b0ada17a3661d687d8263d45aa4c2cb331c53aa6e66c5

Nov 25 10:23:28 compute-0 ceph-mgr[75456]: [rbd_support INFO root] load_schedules: volumes, start_after=
Nov 25 10:23:28 compute-0 ceph-mgr[75456]: [rbd_support INFO root] load_schedules: backups, start_after=
Nov 25 10:23:28 compute-0 ceph-mgr[75456]: [rbd_support INFO root] TrashPurgeScheduleHandler: load_schedules
Nov 25 10:23:28 compute-0 ceph-mgr[75456]: [rbd_support INFO root] load_schedules: vms, start_after=
Nov 25 10:23:28 compute-0 ceph-mgr[75456]: [rbd_support INFO root] load_schedules: volumes, start_after=
Nov 25 10:23:28 compute-0 ceph-mgr[75456]: [rbd_support INFO root] load_schedules: backups, start_after=
Nov 25 10:23:28 compute-0 ceph-mgr[75456]: [rbd_support INFO root] load_schedules: images, start_after=
Nov 25 10:23:28 compute-0 ceph-mgr[75456]: [rbd_support INFO root] load_schedules: images, start_after=
Nov 25 10:23:30 compute-0 ceph-mgr[75456]: log_channel(cluster) log [DBG] : pgmap v227: 305 pgs: 305 active+clean; 455 KiB data, 140 MiB used, 60 GiB / 60 GiB avail; 18 B/s, 1 objects/s recovering
Nov 25 10:23:32 compute-0 ceph-mgr[75456]: log_channel(cluster) log [DBG] : pgmap v230: 305 pgs: 305 active+clean; 455 KiB data, 140 MiB used, 60 GiB / 60 GiB avail

● ceph-86651951-d7ad-54ed-8d1a-1eae0c6b599c@mon.compute-0.service - Ceph mon.compute-0 for 86651951-d7ad-54ed-8d1a-1eae0c6b599c
     Loaded: loaded (/etc/systemd/system/ceph-86651951-d7ad-54ed-8d1a-1eae0c6b599c@.service; enabled; preset: disabled)
     Active: active (running) since Tue 2025-11-25 10:18:40 UTC; 4min 51s ago
   Main PID: 75162 (conmon)
         IO: 0B read, 22.1M written
      Tasks: 26 (limit: 48640)
     Memory: 52.9M (peak: 58.7M)
        CPU: 5.437s
     CGroup: /system.slice/system-ceph\x2d86651951\x2dd7ad\x2d54ed\x2d8d1a\x2d1eae0c6b599c.slice/ceph-86651951-d7ad-54ed-8d1a-1eae0c6b599c@mon.compute-0.service
             ├─libpod-payload-da26e6b5eb184e66ff0f01e858a9ba09086081279bc61dcc18804f12bf2746e5
             │ ├─75164 /run/podman-init -- /usr/bin/ceph-mon -n mon.compute-0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false
             │ └─75166 /usr/bin/ceph-mon -n mon.compute-0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false
             └─runtime
               └─75162 /usr/bin/conmon --api-version 1 -c da26e6b5eb184e66ff0f01e858a9ba09086081279bc61dcc18804f12bf2746e5 -u da26e6b5eb184e66ff0f01e858a9ba09086081279bc61dcc18804f12bf2746e5 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/da26e6b5eb184e66ff0f01e858a9ba09086081279bc61dcc18804f12bf2746e5/userdata -p /run/containers/storage/overlay-containers/da26e6b5eb184e66ff0f01e858a9ba09086081279bc61dcc18804f12bf2746e5/userdata/pidfile -n ceph-86651951-d7ad-54ed-8d1a-1eae0c6b599c-mon-compute-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/da26e6b5eb184e66ff0f01e858a9ba09086081279bc61dcc18804f12bf2746e5 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/da26e6b5eb184e66ff0f01e858a9ba09086081279bc61dcc18804f12bf2746e5/userdata/oci-log --conmon-pidfile /run/ceph-86651951-d7ad-54ed-8d1a-1eae0c6b599c@mon.compute-0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg da26e6b5eb184e66ff0f01e858a9ba09086081279bc61dcc18804f12bf2746e5

Nov 25 10:23:31 compute-0 ceph-mon[75166]: mon.compute-0@0(leader).osd e117 e117: 3 total, 3 up, 3 in
Nov 25 10:23:31 compute-0 ceph-mon[75166]: log_channel(cluster) log [DBG] : osdmap e117: 3 total, 3 up, 3 in
Nov 25 10:23:31 compute-0 ceph-mon[75166]: pgmap v227: 305 pgs: 305 active+clean; 455 KiB data, 140 MiB used, 60 GiB / 60 GiB avail; 18 B/s, 1 objects/s recovering
Nov 25 10:23:31 compute-0 ceph-mon[75166]: from='mgr.14130 192.168.122.100:0/1840679491' entity='mgr.compute-0.oomwtk' cmd='[{"prefix": "osd pool set", "pool": "default.rgw.log", "var": "pgp_num_actual", "val": "30"}]': finished
Nov 25 10:23:31 compute-0 ceph-mon[75166]: osdmap e116: 3 total, 3 up, 3 in
Nov 25 10:23:31 compute-0 ceph-mon[75166]: 9.10 deep-scrub starts
Nov 25 10:23:31 compute-0 ceph-mon[75166]: 9.10 deep-scrub ok
Nov 25 10:23:31 compute-0 ceph-mon[75166]: osdmap e117: 3 total, 3 up, 3 in
Nov 25 10:23:32 compute-0 ceph-mon[75166]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd pool set", "pool": "default.rgw.log", "var": "pgp_num_actual", "val": "31"} v 0) v1
Nov 25 10:23:32 compute-0 ceph-mon[75166]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/1840679491' entity='mgr.compute-0.oomwtk' cmd=[{"prefix": "osd pool set", "pool": "default.rgw.log", "var": "pgp_num_actual", "val": "31"}]: dispatch

● ceph-86651951-d7ad-54ed-8d1a-1eae0c6b599c@osd.0.service - Ceph osd.0 for 86651951-d7ad-54ed-8d1a-1eae0c6b599c
     Loaded: loaded (/etc/systemd/system/ceph-86651951-d7ad-54ed-8d1a-1eae0c6b599c@.service; enabled; preset: disabled)
     Active: active (running) since Tue 2025-11-25 10:20:19 UTC; 3min 12s ago
   Main PID: 88856 (conmon)
         IO: 4.8M read, 1.2G written
      Tasks: 60 (limit: 48640)
     Memory: 489.3M (peak: 508.1M)
        CPU: 3.439s
     CGroup: /system.slice/system-ceph\x2d86651951\x2dd7ad\x2d54ed\x2d8d1a\x2d1eae0c6b599c.slice/ceph-86651951-d7ad-54ed-8d1a-1eae0c6b599c@osd.0.service
             ├─libpod-payload-509125080fc4a991dad78f48c3bcc9760616adb551f766f6768ee97cdf6f087c
             │ ├─88858 /run/podman-init -- /usr/bin/ceph-osd -n osd.0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ └─88860 /usr/bin/ceph-osd -n osd.0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             └─runtime
               └─88856 /usr/bin/conmon --api-version 1 -c 509125080fc4a991dad78f48c3bcc9760616adb551f766f6768ee97cdf6f087c -u 509125080fc4a991dad78f48c3bcc9760616adb551f766f6768ee97cdf6f087c -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/509125080fc4a991dad78f48c3bcc9760616adb551f766f6768ee97cdf6f087c/userdata -p /run/containers/storage/overlay-containers/509125080fc4a991dad78f48c3bcc9760616adb551f766f6768ee97cdf6f087c/userdata/pidfile -n ceph-86651951-d7ad-54ed-8d1a-1eae0c6b599c-osd-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/509125080fc4a991dad78f48c3bcc9760616adb551f766f6768ee97cdf6f087c --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/509125080fc4a991dad78f48c3bcc9760616adb551f766f6768ee97cdf6f087c/userdata/oci-log --conmon-pidfile /run/ceph-86651951-d7ad-54ed-8d1a-1eae0c6b599c@osd.0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 509125080fc4a991dad78f48c3bcc9760616adb551f766f6768ee97cdf6f087c

Nov 25 10:23:22 compute-0 ceph-osd[88860]: osd.0 pg_epoch: 111 pg[9.19( v 45'385 (0'0,45'385] local-lis/les=110/111 n=5 ec=50/35 lis/c=58/58 les/c/f=59/59/0 sis=110) [2]/[0] async=[2] r=0 lpr=110 pi=[58,110)/1 crt=45'385 mlcod 0'0 active+remapped mbc={255={(0+1)=7}}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Nov 25 10:23:23 compute-0 ceph-osd[88860]: osd.0 pg_epoch: 112 pg[9.19( v 45'385 (0'0,45'385] local-lis/les=110/111 n=5 ec=50/35 lis/c=110/58 les/c/f=111/59/0 sis=112 pruub=15.005048752s) [2] async=[2] r=-1 lpr=112 pi=[58,112)/1 crt=45'385 mlcod 45'385 active pruub 197.138351440s@ mbc={255={}}] start_peering_interval up [2] -> [2], acting [0] -> [2], acting_primary 0 -> 2, up_primary 2 -> 2, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Nov 25 10:23:23 compute-0 ceph-osd[88860]: osd.0 pg_epoch: 112 pg[9.19( v 45'385 (0'0,45'385] local-lis/les=110/111 n=5 ec=50/35 lis/c=110/58 les/c/f=111/59/0 sis=112 pruub=15.004802704s) [2] r=-1 lpr=112 pi=[58,112)/1 crt=45'385 mlcod 0'0 unknown NOTIFY pruub 197.138351440s@ mbc={}] state<Start>: transitioning to Stray
Nov 25 10:23:29 compute-0 ceph-osd[88860]: osd.0 pg_epoch: 114 pg[9.1c( empty local-lis/les=0/0 n=0 ec=50/35 lis/c=85/85 les/c/f=86/86/0 sis=114) [0] r=0 lpr=114 pi=[85,114)/1 crt=0'0 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Nov 25 10:23:29 compute-0 ceph-osd[88860]: osd.0 pg_epoch: 115 pg[9.1c( empty local-lis/les=0/0 n=0 ec=50/35 lis/c=85/85 les/c/f=86/86/0 sis=115) [0]/[2] r=-1 lpr=115 pi=[85,115)/1 crt=0'0 mlcod 0'0 remapped mbc={}] start_peering_interval up [0] -> [0], acting [0] -> [2], acting_primary 0 -> 2, up_primary 0 -> 0, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Nov 25 10:23:29 compute-0 ceph-osd[88860]: osd.0 pg_epoch: 115 pg[9.1c( empty local-lis/les=0/0 n=0 ec=50/35 lis/c=85/85 les/c/f=86/86/0 sis=115) [0]/[2] r=-1 lpr=115 pi=[85,115)/1 crt=0'0 mlcod 0'0 remapped NOTIFY mbc={}] state<Start>: transitioning to Stray
Nov 25 10:23:31 compute-0 ceph-osd[88860]: osd.0 pg_epoch: 117 pg[9.1c( v 45'385 (0'0,45'385] local-lis/les=0/0 n=5 ec=50/35 lis/c=115/85 les/c/f=116/86/0 sis=117) [0] r=0 lpr=117 pi=[85,117)/1 luod=0'0 crt=45'385 mlcod 0'0 active mbc={}] start_peering_interval up [0] -> [0], acting [2] -> [0], acting_primary 2 -> 0, up_primary 0 -> 0, role -1 -> 0, features acting 4540138322906710015 upacting 4540138322906710015
Nov 25 10:23:31 compute-0 ceph-osd[88860]: osd.0 pg_epoch: 117 pg[9.1c( v 45'385 (0'0,45'385] local-lis/les=0/0 n=5 ec=50/35 lis/c=115/85 les/c/f=116/86/0 sis=117) [0] r=0 lpr=117 pi=[85,117)/1 crt=45'385 mlcod 0'0 unknown mbc={}] state<Start>: transitioning to Primary
Nov 25 10:23:32 compute-0 ceph-osd[88860]: log_channel(cluster) log [DBG] : 10.9 scrub starts
Nov 25 10:23:32 compute-0 ceph-osd[88860]: log_channel(cluster) log [DBG] : 10.9 scrub ok

● ceph-86651951-d7ad-54ed-8d1a-1eae0c6b599c@osd.1.service - Ceph osd.1 for 86651951-d7ad-54ed-8d1a-1eae0c6b599c
     Loaded: loaded (/etc/systemd/system/ceph-86651951-d7ad-54ed-8d1a-1eae0c6b599c@.service; enabled; preset: disabled)
     Active: active (running) since Tue 2025-11-25 10:20:24 UTC; 3min 7s ago
   Main PID: 89877 (conmon)
         IO: 4.7M read, 1.2G written
      Tasks: 60 (limit: 48640)
     Memory: 496.8M (peak: 517.3M)
        CPU: 6.258s
     CGroup: /system.slice/system-ceph\x2d86651951\x2dd7ad\x2d54ed\x2d8d1a\x2d1eae0c6b599c.slice/ceph-86651951-d7ad-54ed-8d1a-1eae0c6b599c@osd.1.service
             ├─libpod-payload-53527432356f881cb3f7f18463b7737b38bc93fa92677dff67e318c4cdec67e3
             │ ├─89879 /run/podman-init -- /usr/bin/ceph-osd -n osd.1 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ └─89881 /usr/bin/ceph-osd -n osd.1 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             └─runtime
               └─89877 /usr/bin/conmon --api-version 1 -c 53527432356f881cb3f7f18463b7737b38bc93fa92677dff67e318c4cdec67e3 -u 53527432356f881cb3f7f18463b7737b38bc93fa92677dff67e318c4cdec67e3 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/53527432356f881cb3f7f18463b7737b38bc93fa92677dff67e318c4cdec67e3/userdata -p /run/containers/storage/overlay-containers/53527432356f881cb3f7f18463b7737b38bc93fa92677dff67e318c4cdec67e3/userdata/pidfile -n ceph-86651951-d7ad-54ed-8d1a-1eae0c6b599c-osd-1 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/53527432356f881cb3f7f18463b7737b38bc93fa92677dff67e318c4cdec67e3 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/53527432356f881cb3f7f18463b7737b38bc93fa92677dff67e318c4cdec67e3/userdata/oci-log --conmon-pidfile /run/ceph-86651951-d7ad-54ed-8d1a-1eae0c6b599c@osd.1.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 53527432356f881cb3f7f18463b7737b38bc93fa92677dff67e318c4cdec67e3

Nov 25 10:23:23 compute-0 ceph-osd[89881]: log_channel(cluster) log [DBG] : 11.1d scrub starts
Nov 25 10:23:23 compute-0 ceph-osd[89881]: log_channel(cluster) log [DBG] : 11.1d scrub ok
Nov 25 10:23:27 compute-0 ceph-osd[89881]: log_channel(cluster) log [DBG] : 9.4 deep-scrub starts
Nov 25 10:23:27 compute-0 ceph-osd[89881]: log_channel(cluster) log [DBG] : 9.4 deep-scrub ok
Nov 25 10:23:28 compute-0 ceph-osd[89881]: log_channel(cluster) log [DBG] : 8.1e scrub starts
Nov 25 10:23:28 compute-0 ceph-osd[89881]: log_channel(cluster) log [DBG] : 8.1e scrub ok
Nov 25 10:23:29 compute-0 ceph-osd[89881]: log_channel(cluster) log [DBG] : 9.a scrub starts
Nov 25 10:23:29 compute-0 ceph-osd[89881]: log_channel(cluster) log [DBG] : 9.a scrub ok
Nov 25 10:23:30 compute-0 ceph-osd[89881]: log_channel(cluster) log [DBG] : 9.10 deep-scrub starts
Nov 25 10:23:30 compute-0 ceph-osd[89881]: log_channel(cluster) log [DBG] : 9.10 deep-scrub ok

● ceph-86651951-d7ad-54ed-8d1a-1eae0c6b599c@osd.2.service - Ceph osd.2 for 86651951-d7ad-54ed-8d1a-1eae0c6b599c
     Loaded: loaded (/etc/systemd/system/ceph-86651951-d7ad-54ed-8d1a-1eae0c6b599c@.service; enabled; preset: disabled)
     Active: active (running) since Tue 2025-11-25 10:20:29 UTC; 3min 2s ago
   Main PID: 90885 (conmon)
         IO: 4.7M read, 1.2G written
      Tasks: 60 (limit: 48640)
     Memory: 489.7M (peak: 508.4M)
        CPU: 3.052s
     CGroup: /system.slice/system-ceph\x2d86651951\x2dd7ad\x2d54ed\x2d8d1a\x2d1eae0c6b599c.slice/ceph-86651951-d7ad-54ed-8d1a-1eae0c6b599c@osd.2.service
             ├─libpod-payload-a38f2f8c61e9843df057582eeccd529fb52fee544a2f2daa3caa5266718af181
             │ ├─90887 /run/podman-init -- /usr/bin/ceph-osd -n osd.2 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ └─90889 /usr/bin/ceph-osd -n osd.2 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             └─runtime
               └─90885 /usr/bin/conmon --api-version 1 -c a38f2f8c61e9843df057582eeccd529fb52fee544a2f2daa3caa5266718af181 -u a38f2f8c61e9843df057582eeccd529fb52fee544a2f2daa3caa5266718af181 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/a38f2f8c61e9843df057582eeccd529fb52fee544a2f2daa3caa5266718af181/userdata -p /run/containers/storage/overlay-containers/a38f2f8c61e9843df057582eeccd529fb52fee544a2f2daa3caa5266718af181/userdata/pidfile -n ceph-86651951-d7ad-54ed-8d1a-1eae0c6b599c-osd-2 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/a38f2f8c61e9843df057582eeccd529fb52fee544a2f2daa3caa5266718af181 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/a38f2f8c61e9843df057582eeccd529fb52fee544a2f2daa3caa5266718af181/userdata/oci-log --conmon-pidfile /run/ceph-86651951-d7ad-54ed-8d1a-1eae0c6b599c@osd.2.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg a38f2f8c61e9843df057582eeccd529fb52fee544a2f2daa3caa5266718af181

Nov 25 10:23:28 compute-0 ceph-osd[90889]: log_channel(cluster) log [DBG] : 8.15 scrub ok
Nov 25 10:23:29 compute-0 ceph-osd[90889]: osd.2 pg_epoch: 114 pg[9.1c( v 45'385 (0'0,45'385] local-lis/les=85/86 n=5 ec=50/35 lis/c=85/85 les/c/f=86/86/0 sis=114 pruub=15.446064949s) [0] r=-1 lpr=114 pi=[85,114)/1 crt=45'385 mlcod 0'0 active pruub 193.480224609s@ mbc={}] start_peering_interval up [2] -> [0], acting [2] -> [0], acting_primary 2 -> 0, up_primary 2 -> 0, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Nov 25 10:23:29 compute-0 ceph-osd[90889]: osd.2 pg_epoch: 114 pg[9.1c( v 45'385 (0'0,45'385] local-lis/les=85/86 n=5 ec=50/35 lis/c=85/85 les/c/f=86/86/0 sis=114 pruub=15.445990562s) [0] r=-1 lpr=114 pi=[85,114)/1 crt=45'385 mlcod 0'0 unknown NOTIFY pruub 193.480224609s@ mbc={}] state<Start>: transitioning to Stray
Nov 25 10:23:29 compute-0 ceph-osd[90889]: osd.2 pg_epoch: 115 pg[9.1c( v 45'385 (0'0,45'385] local-lis/les=85/86 n=5 ec=50/35 lis/c=85/85 les/c/f=86/86/0 sis=115) [0]/[2] r=0 lpr=115 pi=[85,115)/1 crt=45'385 mlcod 0'0 remapped NOTIFY mbc={}] start_peering_interval up [0] -> [0], acting [0] -> [2], acting_primary 0 -> 2, up_primary 0 -> 0, role -1 -> 0, features acting 4540138322906710015 upacting 4540138322906710015
Nov 25 10:23:29 compute-0 ceph-osd[90889]: osd.2 pg_epoch: 115 pg[9.1c( v 45'385 (0'0,45'385] local-lis/les=85/86 n=5 ec=50/35 lis/c=85/85 les/c/f=86/86/0 sis=115) [0]/[2] r=0 lpr=115 pi=[85,115)/1 crt=45'385 mlcod 0'0 remapped mbc={}] state<Start>: transitioning to Primary
Nov 25 10:23:29 compute-0 ceph-osd[90889]: log_channel(cluster) log [DBG] : 7.1a scrub starts
Nov 25 10:23:29 compute-0 ceph-osd[90889]: log_channel(cluster) log [DBG] : 7.1a scrub ok
Nov 25 10:23:30 compute-0 ceph-osd[90889]: osd.2 pg_epoch: 116 pg[9.1c( v 45'385 (0'0,45'385] local-lis/les=115/116 n=5 ec=50/35 lis/c=85/85 les/c/f=86/86/0 sis=115) [0]/[2] async=[0] r=0 lpr=115 pi=[85,115)/1 crt=45'385 mlcod 0'0 active+remapped mbc={255={(0+1)=7}}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete
Nov 25 10:23:31 compute-0 ceph-osd[90889]: osd.2 pg_epoch: 117 pg[9.1c( v 45'385 (0'0,45'385] local-lis/les=115/116 n=5 ec=50/35 lis/c=115/85 les/c/f=116/86/0 sis=117 pruub=15.512598991s) [0] async=[0] r=-1 lpr=117 pi=[85,117)/1 crt=45'385 mlcod 45'385 active pruub 195.751464844s@ mbc={255={}}] start_peering_interval up [0] -> [0], acting [2] -> [0], acting_primary 2 -> 0, up_primary 0 -> 0, role 0 -> -1, features acting 4540138322906710015 upacting 4540138322906710015
Nov 25 10:23:31 compute-0 ceph-osd[90889]: osd.2 pg_epoch: 117 pg[9.1c( v 45'385 (0'0,45'385] local-lis/les=115/116 n=5 ec=50/35 lis/c=115/85 les/c/f=116/86/0 sis=117 pruub=15.512359619s) [0] r=-1 lpr=117 pi=[85,117)/1 crt=45'385 mlcod 0'0 unknown NOTIFY pruub 195.751464844s@ mbc={}] state<Start>: transitioning to Stray

● ceph-86651951-d7ad-54ed-8d1a-1eae0c6b599c@rgw.rgw.compute-0.nmzhvu.service - Ceph rgw.rgw.compute-0.nmzhvu for 86651951-d7ad-54ed-8d1a-1eae0c6b599c
     Loaded: loaded (/etc/systemd/system/ceph-86651951-d7ad-54ed-8d1a-1eae0c6b599c@.service; enabled; preset: disabled)
     Active: active (running) since Tue 2025-11-25 10:21:16 UTC; 2min 15s ago
   Main PID: 100882 (conmon)
         IO: 0B read, 199.5K written
      Tasks: 605 (limit: 48640)
     Memory: 89.5M (peak: 90.3M)
        CPU: 1.104s
     CGroup: /system.slice/system-ceph\x2d86651951\x2dd7ad\x2d54ed\x2d8d1a\x2d1eae0c6b599c.slice/ceph-86651951-d7ad-54ed-8d1a-1eae0c6b599c@rgw.rgw.compute-0.nmzhvu.service
             ├─libpod-payload-37d8d4d4e35122aa7398826ce5305d40a9c1b412301945f14e7b5b9738d21ef6
             │ ├─100884 /run/podman-init -- /usr/bin/radosgw -n client.rgw.rgw.compute-0.nmzhvu -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ └─100886 /usr/bin/radosgw -n client.rgw.rgw.compute-0.nmzhvu -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             └─runtime
               └─100882 /usr/bin/conmon --api-version 1 -c 37d8d4d4e35122aa7398826ce5305d40a9c1b412301945f14e7b5b9738d21ef6 -u 37d8d4d4e35122aa7398826ce5305d40a9c1b412301945f14e7b5b9738d21ef6 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/37d8d4d4e35122aa7398826ce5305d40a9c1b412301945f14e7b5b9738d21ef6/userdata -p /run/containers/storage/overlay-containers/37d8d4d4e35122aa7398826ce5305d40a9c1b412301945f14e7b5b9738d21ef6/userdata/pidfile -n ceph-86651951-d7ad-54ed-8d1a-1eae0c6b599c-rgw-rgw-compute-0-nmzhvu --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/37d8d4d4e35122aa7398826ce5305d40a9c1b412301945f14e7b5b9738d21ef6 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/37d8d4d4e35122aa7398826ce5305d40a9c1b412301945f14e7b5b9738d21ef6/userdata/oci-log --conmon-pidfile /run/ceph-86651951-d7ad-54ed-8d1a-1eae0c6b599c@rgw.rgw.compute-0.nmzhvu.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 37d8d4d4e35122aa7398826ce5305d40a9c1b412301945f14e7b5b9738d21ef6

Nov 25 10:21:16 compute-0 radosgw[100886]: framework conf key: endpoint, val: 192.168.122.100:8082
Nov 25 10:21:16 compute-0 radosgw[100886]: init_numa not setting numa affinity
Nov 25 10:21:28 compute-0 radosgw[100886]: LDAP not started since no server URIs were provided in the configuration.
Nov 25 10:21:28 compute-0 ceph-86651951-d7ad-54ed-8d1a-1eae0c6b599c-rgw-rgw-compute-0-nmzhvu[100882]: 2025-11-25T10:21:28.102+0000 7f102d4a1940 -1 LDAP not started since no server URIs were provided in the configuration.
Nov 25 10:21:28 compute-0 radosgw[100886]: framework: beast
Nov 25 10:21:28 compute-0 radosgw[100886]: framework conf key: ssl_certificate, val: config://rgw/cert/$realm/$zone.crt
Nov 25 10:21:28 compute-0 radosgw[100886]: framework conf key: ssl_private_key, val: config://rgw/cert/$realm/$zone.key
Nov 25 10:21:28 compute-0 radosgw[100886]: starting handler: beast
Nov 25 10:21:28 compute-0 radosgw[100886]: set uid:gid to 167:167 (ceph:ceph)
Nov 25 10:21:28 compute-0 radosgw[100886]: mgrc service_daemon_register rgw.14267 metadata {arch=x86_64,ceph_release=reef,ceph_version=ceph version 18.2.7 (6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad) reef (stable),ceph_version_short=18.2.7,container_hostname=compute-0,container_image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0,cpu=AMD EPYC-Rome Processor,distro=centos,distro_description=CentOS Stream 9,distro_version=9,frontend_config#0=beast endpoint=192.168.122.100:8082,frontend_type#0=beast,hostname=compute-0,id=rgw.compute-0.nmzhvu,kernel_description=#1 SMP PREEMPT_DYNAMIC Thu Nov 20 14:15:03 UTC 2025,kernel_version=5.14.0-642.el9.x86_64,mem_swap_kb=1048572,mem_total_kb=7864320,num_handles=1,os=Linux,pid=2,realm_id=,realm_name=,zone_id=e06fe2d3-3f79-471b-8522-fa386149d462,zone_name=default,zonegroup_id=f3dc1b8d-076f-4ab7-8bd0-d56d8d8e7c4c,zonegroup_name=default}

● ceph-osd-losetup-0.service - Ceph OSD losetup
     Loaded: loaded (/etc/systemd/system/ceph-osd-losetup-0.service; enabled; preset: disabled)
     Active: active (exited) since Tue 2025-11-25 10:17:51 UTC; 5min ago
   Main PID: 72590 (code=exited, status=0/SUCCESS)
        CPU: 24ms

Nov 25 10:17:51 compute-0 systemd[1]: Starting Ceph OSD losetup...
Nov 25 10:17:51 compute-0 bash[72591]: /dev/loop3: [64513]:4327754 (/var/lib/ceph-osd-0.img)
Nov 25 10:17:51 compute-0 systemd[1]: Finished Ceph OSD losetup.

● ceph-osd-losetup-1.service - Ceph OSD losetup
     Loaded: loaded (/etc/systemd/system/ceph-osd-losetup-1.service; enabled; preset: disabled)
     Active: active (exited) since Tue 2025-11-25 10:17:55 UTC; 5min ago
   Main PID: 72960 (code=exited, status=0/SUCCESS)
        CPU: 17ms

Nov 25 10:17:55 compute-0 systemd[1]: Starting Ceph OSD losetup...
Nov 25 10:17:55 compute-0 bash[72961]: /dev/loop4: [64513]:4327909 (/var/lib/ceph-osd-1.img)
Nov 25 10:17:55 compute-0 systemd[1]: Finished Ceph OSD losetup.

● ceph-osd-losetup-2.service - Ceph OSD losetup
     Loaded: loaded (/etc/systemd/system/ceph-osd-losetup-2.service; enabled; preset: disabled)
     Active: active (exited) since Tue 2025-11-25 10:18:01 UTC; 5min ago
   Main PID: 73332 (code=exited, status=0/SUCCESS)
        CPU: 19ms

Nov 25 10:18:01 compute-0 systemd[1]: Starting Ceph OSD losetup...
Nov 25 10:18:01 compute-0 bash[73333]: /dev/loop5: [64513]:4327912 (/var/lib/ceph-osd-2.img)
Nov 25 10:18:01 compute-0 systemd[1]: Finished Ceph OSD losetup.

● chronyd.service - NTP client/server
     Loaded: loaded (/usr/lib/systemd/system/chronyd.service; enabled; preset: enabled)
     Active: active (running) since Tue 2025-11-25 10:15:56 UTC; 7min ago
       Docs: man:chronyd(8)
             man:chrony.conf(5)
   Main PID: 58618 (chronyd)
         IO: 0B read, 0B written
      Tasks: 1 (limit: 48640)
     Memory: 1004.0K (peak: 1.9M)
        CPU: 73ms
     CGroup: /system.slice/chronyd.service
             └─58618 /usr/sbin/chronyd -F 2

Nov 25 10:15:56 compute-0 systemd[1]: Starting NTP client/server...
Nov 25 10:15:56 compute-0 chronyd[58618]: chronyd version 4.8 starting (+CMDMON +REFCLOCK +RTC +PRIVDROP +SCFILTER +SIGND +NTS +SECHASH +IPV6 +DEBUG)
Nov 25 10:15:56 compute-0 chronyd[58618]: Frequency -24.853 +/- 1.649 ppm read from /var/lib/chrony/drift
Nov 25 10:15:56 compute-0 chronyd[58618]: Loaded seccomp filter (level 2)
Nov 25 10:15:56 compute-0 systemd[1]: Started NTP client/server.
Nov 25 10:18:05 compute-0 chronyd[58618]: Selected source 144.217.4.129 (pool.ntp.org)

● cloud-config.service - Cloud-init: Config Stage
     Loaded: loaded (/usr/lib/systemd/system/cloud-config.service; enabled; preset: disabled)
     Active: active (exited) since Tue 2025-11-25 09:44:04 UTC; 39min ago
   Main PID: 1005 (code=exited, status=0/SUCCESS)
        CPU: 443ms

Nov 25 09:44:04 np0005534776.novalocal systemd[1]: Starting Cloud-init: Config Stage...
Nov 25 09:44:04 np0005534776.novalocal cloud-init[1125]: Cloud-init v. 24.4-7.el9 running 'modules:config' at Tue, 25 Nov 2025 09:44:04 +0000. Up 15.29 seconds.
Nov 25 09:44:04 np0005534776.novalocal systemd[1]: Finished Cloud-init: Config Stage.

● cloud-final.service - Cloud-init: Final Stage
     Loaded: loaded (/usr/lib/systemd/system/cloud-final.service; enabled; preset: disabled)
     Active: active (exited) since Tue 2025-11-25 09:44:05 UTC; 39min ago
   Main PID: 1164 (code=exited, status=0/SUCCESS)
        CPU: 553ms

Nov 25 09:44:04 np0005534776.novalocal systemd[1]: Starting Cloud-init: Final Stage...
Nov 25 09:44:05 np0005534776.novalocal cloud-init[1297]: Cloud-init v. 24.4-7.el9 running 'modules:final' at Tue, 25 Nov 2025 09:44:05 +0000. Up 15.76 seconds.
Nov 25 09:44:05 np0005534776.novalocal cloud-init[1307]: -----BEGIN SSH HOST KEY FINGERPRINTS-----
Nov 25 09:44:05 np0005534776.novalocal cloud-init[1309]: 256 SHA256:OauK4qta5wR2jed9Bai6V/CZBweuv2YHD7zp9idyMPQ root@np0005534776.novalocal (ECDSA)
Nov 25 09:44:05 np0005534776.novalocal cloud-init[1311]: 256 SHA256:HG8YJXe8G4WtnuyYB5LZvCal2BBhVaYX1EhpuzWTgkM root@np0005534776.novalocal (ED25519)
Nov 25 09:44:05 np0005534776.novalocal cloud-init[1313]: 3072 SHA256:cmLzSQBIV39pbDcixy4LAyf2H8gZbfOBc5RrPhFko2U root@np0005534776.novalocal (RSA)
Nov 25 09:44:05 np0005534776.novalocal cloud-init[1314]: -----END SSH HOST KEY FINGERPRINTS-----
Nov 25 09:44:05 np0005534776.novalocal cloud-init[1315]: #############################################################
Nov 25 09:44:05 np0005534776.novalocal cloud-init[1297]: Cloud-init v. 24.4-7.el9 finished at Tue, 25 Nov 2025 09:44:05 +0000. Datasource DataSourceConfigDrive [net,ver=2][source=/dev/sr0].  Up 15.96 seconds
Nov 25 09:44:05 np0005534776.novalocal systemd[1]: Finished Cloud-init: Final Stage.

● cloud-init-local.service - Cloud-init: Local Stage (pre-network)
     Loaded: loaded (/usr/lib/systemd/system/cloud-init-local.service; enabled; preset: disabled)
     Active: active (exited) since Tue 2025-11-25 09:43:59 UTC; 39min ago
   Main PID: 813 (code=exited, status=0/SUCCESS)
        CPU: 853ms

Nov 25 09:43:56 localhost systemd[1]: Starting Cloud-init: Local Stage (pre-network)...
Nov 25 09:43:58 localhost cloud-init[838]: Cloud-init v. 24.4-7.el9 running 'init-local' at Tue, 25 Nov 2025 09:43:58 +0000. Up 9.47 seconds.
Nov 25 09:43:59 np0005534776.novalocal systemd[1]: Finished Cloud-init: Local Stage (pre-network).

● cloud-init.service - Cloud-init: Network Stage
     Loaded: loaded (/usr/lib/systemd/system/cloud-init.service; enabled; preset: disabled)
     Active: active (exited) since Tue 2025-11-25 09:44:04 UTC; 39min ago
   Main PID: 904 (code=exited, status=0/SUCCESS)
        CPU: 1.324s

Nov 25 09:44:04 np0005534776.novalocal cloud-init[924]: |      o E*+o+ o  |
Nov 25 09:44:04 np0005534776.novalocal cloud-init[924]: |     . o+o.. =   |
Nov 25 09:44:04 np0005534776.novalocal cloud-init[924]: |      ..o*o O    |
Nov 25 09:44:04 np0005534776.novalocal cloud-init[924]: |       .S*o* *   |
Nov 25 09:44:04 np0005534776.novalocal cloud-init[924]: |      . +.* =    |
Nov 25 09:44:04 np0005534776.novalocal cloud-init[924]: |       + + *     |
Nov 25 09:44:04 np0005534776.novalocal cloud-init[924]: |      . + = o    |
Nov 25 09:44:04 np0005534776.novalocal cloud-init[924]: |         o .     |
Nov 25 09:44:04 np0005534776.novalocal cloud-init[924]: +----[SHA256]-----+
Nov 25 09:44:04 np0005534776.novalocal systemd[1]: Finished Cloud-init: Network Stage.

○ cpupower.service - Configure CPU power related settings
     Loaded: loaded (/usr/lib/systemd/system/cpupower.service; disabled; preset: disabled)
     Active: inactive (dead)

● crond.service - Command Scheduler
     Loaded: loaded (/usr/lib/systemd/system/crond.service; enabled; preset: enabled)
     Active: active (running) since Tue 2025-11-25 09:44:04 UTC; 39min ago
   Main PID: 1012 (crond)
         IO: 160.0K read, 0B written
      Tasks: 2 (limit: 48640)
     Memory: 1.5M (peak: 5.1M)
        CPU: 85ms
     CGroup: /system.slice/crond.service
             ├─ 1012 /usr/sbin/crond -n
             └─30899 /usr/sbin/anacron -s

Nov 25 09:44:04 np0005534776.novalocal crond[1012]: (CRON) INFO (RANDOM_DELAY will be scaled with factor 6% if used.)
Nov 25 09:44:04 np0005534776.novalocal crond[1012]: (CRON) INFO (running with inotify support)
Nov 25 10:01:01 compute-0 CROND[30888]: (root) CMD (run-parts /etc/cron.hourly)
Nov 25 10:01:01 compute-0 anacron[30899]: Anacron started on 2025-11-25
Nov 25 10:01:01 compute-0 anacron[30899]: Will run job `cron.daily' in 43 min.
Nov 25 10:01:01 compute-0 anacron[30899]: Will run job `cron.weekly' in 63 min.
Nov 25 10:01:01 compute-0 anacron[30899]: Will run job `cron.monthly' in 83 min.
Nov 25 10:01:01 compute-0 anacron[30899]: Jobs will be executed sequentially
Nov 25 10:01:01 compute-0 run-parts[30901]: (/etc/cron.hourly) finished 0anacron
Nov 25 10:01:01 compute-0 CROND[30887]: (root) CMDEND (run-parts /etc/cron.hourly)

● dbus-broker.service - D-Bus System Message Bus
     Loaded: loaded (/usr/lib/systemd/system/dbus-broker.service; enabled; preset: enabled)
     Active: active (running) since Tue 2025-11-25 09:43:56 UTC; 39min ago
TriggeredBy: ● dbus.socket
       Docs: man:dbus-broker-launch(1)
   Main PID: 810 (dbus-broker-lau)
         IO: 0B read, 0B written
      Tasks: 2 (limit: 48640)
     Memory: 2.9M (peak: 3.4M)
        CPU: 2.726s
     CGroup: /system.slice/dbus-broker.service
             ├─810 /usr/bin/dbus-broker-launch --scope system --audit
             └─811 dbus-broker --log 4 --controller 9 --machine-id fee38d0f94bf6f4b17ec77ba536bd6ab --max-bytes 536870912 --max-fds 4096 --max-matches 131072 --audit

Nov 25 09:43:56 localhost dbus-broker-lau[8Unit display-manager.service could not be found.
10]: Ready
Nov 25 09:54:04 np0005534776.novalocal dbus-broker-launch[811]: avc:  op=load_policy lsm=selinux seqno=6 res=1
Nov 25 10:11:54 compute-0 dbus-broker-launch[810]: Noticed file-system modification, trigger reload.
Nov 25 10:11:54 compute-0 dbus-broker-launch[810]: Noticed file-system modification, trigger reload.
Nov 25 10:11:54 compute-0 dbus-broker-launch[810]: Noticed file-system modification, trigger reload.
Nov 25 10:12:57 compute-0 dbus-broker-launch[811]: avc:  op=load_policy lsm=selinux seqno=8 res=1
Nov 25 10:13:34 compute-0 dbus-broker-launch[810]: Noticed file-system modification, trigger reload.
Nov 25 10:13:34 compute-0 dbus-broker-launch[810]: Noticed file-system modification, trigger reload.
Nov 25 10:14:20 compute-0 dbus-broker-launch[811]: avc:  op=load_policy lsm=selinux seqno=9 res=1
Nov 25 10:14:30 compute-0 dbus-broker-launch[811]: avc:  op=load_policy lsm=selinux seqno=10 res=1

○ dm-event.service - Device-mapper event daemon
     Loaded: loaded (/usr/lib/systemd/system/dm-event.service; static)
     Active: inactive (dead)
TriggeredBy: ● dm-event.socket
       Docs: man:dmeventd(8)

○ dnf-makecache.service - dnf makecache
     Loaded: loaded (/usr/lib/systemd/system/dnf-makecache.service; static)
     Active: inactive (dead) since Tue 2025-11-25 10:11:56 UTC; 11min ago
TriggeredBy: ● dnf-makecache.timer
   Main PID: 34090 (code=exited, status=0/SUCCESS)
        CPU: 1.930s

Nov 25 10:11:55 compute-0 dnf[34090]: NFV SIG OpenvSwitch                             119 kB/s | 3.0 kB     00:00
Nov 25 10:11:55 compute-0 dnf[34090]: repo-setup-centos-appstream                     157 kB/s | 4.4 kB     00:00
Nov 25 10:11:55 compute-0 dnf[34090]: repo-setup-centos-baseos                        103 kB/s | 3.9 kB     00:00
Nov 25 10:11:55 compute-0 dnf[34090]: repo-setup-centos-highavailability              159 kB/s | 3.9 kB     00:00
Nov 25 10:11:55 compute-0 dnf[34090]: repo-setup-centos-powertools                    180 kB/s | 4.3 kB     00:00
Nov 25 10:11:56 compute-0 dnf[34090]: Extra Packages for Enterprise Linux 9 - x86_64   94 kB/s |  31 kB     00:00
Nov 25 10:11:56 compute-0 dnf[34090]: Metadata cache created.
Nov 25 10:11:56 compute-0 systemd[1]: dnf-makecache.service: Deactivated successfully.
Nov 25 10:11:56 compute-0 systemd[1]: Finished dnf makecache.
Nov 25 10:11:56 compute-0 systemd[1]: dnf-makecache.service: Consumed 1.930s CPU time.

○ dracut-cmdline.service - dracut cmdline hook
     Loaded: loaded (/usr/lib/systemd/system/dracut-cmdline.service; static)
     Active: inactive (dead) since Tue 2025-11-25 09:43:53 UTC; 39min ago
   Duration: 2.152s
       Docs: man:dracut-cmdline.service(8)
             man:dracut.bootup(7)
   Main PID: 327 (code=exited, status=0/SUCCESS)
        CPU: 135ms

Nov 25 09:43:51 localhost systemd[1]: Starting dracut cmdline hook...
Nov 25 09:43:51 localhost dracut-cmdline[327]: dracut-9 dracut-057-102.git20250818.el9
Nov 25 09:43:51 localhost dracut-cmdline[327]: Using kernel command line parameters:    BOOT_IMAGE=(hd0,msdos1)/boot/vmlinuz-5.14.0-642.el9.x86_64 root=UUID=47e3724e-7a1b-439a-9543-b98c9a290709 ro console=ttyS0,115200n8 no_timer_check net.ifnames=0 crashkernel=1G-2G:192M,2G-64G:256M,64G-:512M
Nov 25 09:43:51 localhost systemd[1]: Finished dracut cmdline hook.
Nov 25 09:43:53 localhost systemd[1]: dracut-cmdline.service: Deactivated successfully.
Nov 25 09:43:53 localhost systemd[1]: Stopped dracut cmdline hook.

○ dracut-initqueue.service - dracut initqueue hook
     Loaded: loaded (/usr/lib/systemd/system/dracut-initqueue.service; static)
     Active: inactive (dead) since Tue 2025-11-25 09:43:53 UTC; 39min ago
   Duration: 1.278s
       Docs: man:dracut-initqueue.service(8)
             man:dracut.bootup(7)
   Main PID: 498 (code=exited, status=0/SUCCESS)
        CPU: 34ms

Nov 25 09:43:51 localhost systemd[1]: Starting dracut initqueue hook...
Nov 25 09:43:52 localhost systemd[1]: Finished dracut initqueue hook.
Nov 25 09:43:53 localhost systemd[1]: dracut-initqueue.service: Deactivated successfully.
Nov 25 09:43:53 localhost systemd[1]: Stopped dracut initqueue hook.

○ dracut-mount.service - dracut mount hook
     Loaded: loaded (/usr/lib/systemd/system/dracut-mount.service; static)
     Active: inactive (dead) since Tue 2025-11-25 09:43:53 UTC; 39min ago
   Duration: 191ms
       Docs: man:dracut-mount.service(8)
             man:dracut.bootup(7)
   Main PID: 571 (code=exited, status=0/SUCCESS)
        CPU: 16ms

Nov 25 09:43:53 localhost systemd[1]: Starting dracut mount hook...
Nov 25 09:43:53 localhost systemd[1]: Finished dracut mount hook.
Nov 25 09:43:53 localhost systemd[1]: dracut-mount.service: Deactivated successfully.
Nov 25 09:43:53 localhost systemd[1]: Stopped dracut mount hook.

○ dracut-pre-mount.service - dracut pre-mount hook
     Loaded: loaded (/usr/lib/systemd/system/dracut-pre-mount.service; static)
     Active: inactive (dead) since Tue 2025-11-25 09:43:53 UTC; 39min ago
   Duration: 1.243s
       Docs: man:dracut-pre-mount.service(8)
             man:dracut.bootup(7)
   Main PID: 548 (code=exited, status=0/SUCCESS)
        CPU: 10ms

Nov 25 09:43:52 localhost systemd[1]: Starting dracut pre-mount hook...
Nov 25 09:43:52 localhost systemd[1]: Finished dracut pre-mount hook.
Nov 25 09:43:53 localhost systemd[1]: dracut-pre-mount.service: Deactivated successfully.
Nov 25 09:43:53 localhost systemd[1]: Stopped dracut pre-mount hook.

○ dracut-pre-pivot.service - dracut pre-pivot and cleanup hook
     Loaded: loaded (/usr/lib/systemd/system/dracut-pre-pivot.service; static)
     Active: inactive (dead) since Tue 2025-11-25 09:43:53 UTC; 39min ago
   Duration: 32ms
       Docs: man:dracut-pre-pivot.service(8)
             man:dracut.bootup(7)
   Main PID: 576 (code=exited, status=0/SUCCESS)
        CPU: 107ms

Nov 25 09:43:53 localhost systemd[1]: Starting dracut pre-pivot and cleanup hook...
Nov 25 09:43:53 localhost systemd[1]: Finished dracut pre-pivot and cleanup hook.
Nov 25 09:43:53 localhost systemd[1]: dracut-pre-pivot.service: Deactivated successfully.
Nov 25 09:43:53 localhost systemd[1]: Stopped dracut pre-pivot and cleanup hook.

○ dracut-pre-trigger.service - dracut pre-trigger hook
     Loaded: loaded (/usr/lib/systemd/system/dracut-pre-trigger.service; static)
     Active: inactive (dead) since Tue 2025-11-25 09:43:53 UTC; 39min ago
   Duration: 1.790s
       Docs: man:dracut-pre-trigger.service(8)
             man:dracut.bootup(7)
   Main PID: 466 (code=exited, status=0/SUCCESS)
        CPU: 17ms

Nov 25 09:43:51 localhost systemd[1]: Starting dracut pre-trigger hook...
Nov 25 09:43:51 localhost systemd[1]: Finished dracut pre-trigger hook.
Nov 25 09:43:53 localhost systemd[1]: dracut-pre-trigger.service: Deactivated successfully.
Nov 25 09:43:53 localhost systemd[1]: Stopped dracut pre-trigger hook.

○ dracut-pre-udev.service - dracut pre-udev hook
     Loaded: loaded (/usr/lib/systemd/system/dracut-pre-udev.service; static)
     Active: inactive (dead) since Tue 2025-11-25 09:43:53 UTC; 39min ago
   Duration: 1.892s
       Docs: man:dracut-pre-udev.service(8)
             man:dracut.bootup(7)
   Main PID: 415 (code=exited, status=0/SUCCESS)
        CPU: 257ms

Nov 25 09:43:51 localhost systemd[1]: Starting dracut pre-udev hook...
Nov 25 09:43:51 localhost rpc.statd[442]: Version 2.5.4 starting
Nov 25 09:43:51 localhost rpc.statd[442]: Initializing NSM state
Nov 25 09:43:51 localhost rpc.idmapd[447]: Setting log level to 0
Nov 25 09:43:51 localhost systemd[1]: Finished dracut pre-udev hook.
Nov 25 09:43:53 localhost rpc.idmapd[447]: exiting on signal 15
Nov 25 09:43:53 localhost systemd[1]: dracut-pre-udev.service: Deactivated successfully.
Nov 25 09:43:53 localhost systemd[1]: Stopped dracut pre-udev hook.

○ dracut-shutdown-onfailure.service - Service executing upon dracut-shutdown failure to perform cleanup
     Loaded: loaded (/usr/lib/systemd/system/dracut-shutdown-onfailure.service; static)
     Active: inactive (dead)
       Docs: man:dracut-shutdown.service(8)

● dracut-shutdown.service - Restore /run/initramfs on shutdown
     Loaded: loaded (/usr/lib/systemd/system/dracut-shutdown.servUnit hv_kvp_daemon.service could not be found.
ice; static)
     Active: active (exited) since Tue 2025-11-25 09:43:56 UTC; 39min ago
       Docs: man:dracut-shutdown.service(8)
   Main PID: 814 (code=exited, status=0/SUCCESS)
        CPU: 4ms

Nov 25 09:43:56 localhost systemd[1]: Starting Restore /run/initramfs on shutdown...
Nov 25 09:43:56 localhost systemd[1]: Finished Restore /run/initramfs on shutdown.

● edpm-container-shutdown.service - EDPM Container Shutdown
     Loaded: loaded (/etc/systemd/system/edpm-container-shutdown.service; enabled; preset: enabled)
     Active: active (exited) since Tue 2025-11-25 10:16:23 UTC; 7min ago
       Docs: https://github.com/openstack-k8s-operators/docs
   Main PID: 61615 (code=exited, status=0/SUCCESS)
        CPU: 3ms

Nov 25 10:16:23 compute-0 systemd[1]: Starting EDPM Container Shutdown...
Nov 25 10:16:23 compute-0 systemd[1]: Finished EDPM Container Shutdown.

○ emergency.service - Emergency Shell
     Loaded: loaded (/usr/lib/systemd/system/emergency.service; static)
     Active: inactive (dead)
       Docs: man:sulogin(8)

● getty@tty1.service - Getty on tty1
     Loaded: loaded (/usr/lib/systemd/system/getty@.service; enabled; preset: enabled)
     Active: active (running) since Tue 2025-11-25 09:44:04 UTC; 39min ago
       Docs: man:agetty(8)
             man:systemd-getty-generator(8)
             http://0pointer.de/blog/projects/serial-console.html
   Main PID: 1013 (agetty)
         IO: 0B read, 0B written
      Tasks: 1 (limit: 48640)
     Memory: 280.0K (peak: 520.0K)
        CPU: 9ms
     CGroup: /system.slice/system-getty.slice/getty@tty1.service
             └─1013 /sbin/agetty -o "-p -- \\u" --noclear - linux

Nov 25 09:44:04 np0005534776.novalocal systemd[1]: Started Getty on tty1.

● gssproxy.service - GSSAPI Proxy Daemon
     Loaded: loaded (/usr/lib/systemd/system/gssproxy.service; disabled; preset: disabled)
     Active: active (running) since Tue 2025-11-25 09:43:59 UTC; 39min ago
   Main PID: 885 (gssproxy)
         IO: 0B read, 0B written
      Tasks: 6 (limit: 48640)
     Memory: 1.8M (peak: 3.5M)
        CPU: 17ms
     CGroup: /system.slice/gssproxy.service
             └─885 /usr/sbin/gssproxy -D

Nov 25 09:43:59 np0005534776.novalocal systemd[1]: Starting GSSAPI Proxy Daemon...
Nov 25 09:43:59 np0005534776.novalocal systemd[1]: Started GSSAPI Proxy Daemon.

○ import-state.service - Import network configuration from initramfs
     Loaded: loaded (/usr/lib/systemd/system/import-state.service; enabled; preset: enabled)
     Active: inactive (dead)

○ initrd-cleanup.service - Cleaning Up and Shutting Down Daemons
     Loaded: loaded (/usr/lib/systemd/system/initrd-cleanup.service; static)
     Active: inactive (dead) since Tue 2025-11-25 09:43:53 UTC; 39min ago
   Main PID: 618 (code=exited, status=0/SUCCESS)
        CPU: 14ms

Nov 25 09:43:53 localhost systemd[1]: Starting Cleaning Up and Shutting Down Daemons...
Nov 25 09:43:53 localhost systemd[1]: initrd-cleanup.service: Deactivated successfully.
Nov 25 09:43:53 localhost systemd[1]: Finished Cleaning Up and Shutting Down Daemons.

○ initrd-parse-etc.service - Mountpoints Configured in the Real Root
     Loaded: loaded (/usr/lib/systemd/system/initrd-parse-etc.service; static)
     Active: inactive (dead) since Tue 2025-11-25 09:43:53 UTC; 39min ago
   Main PID: 570 (code=exited, status=0/SUCCESS)
        CPU: 16ms

Nov 25 09:43:53 localhost systemd[1]: Starting Mountpoints Configured in the Real Root...
Nov 25 09:43:53 localhost systemd[1]: initrd-parse-etc.service: Deactivated successfully.
Nov 25 09:43:53 localhost systemd[1]: Finished Mountpoints Configured in the Real Root.

○ initrd-switch-root.service - Switch Root
     Loaded: loaded (/usr/lib/systemd/system/initrd-switch-root.service; static)
     Active: inactive (dead) since Tue 2025-11-25 09:43:54 UTC; 39min ago
   Main PID: 622 (code=exited, status=0/SUCCESS)
        CPU: 12ms

Nov 25 09:43:53 localhost systemd[1]: Starting Switch Root...

○ initrd-udevadm-cleanup-db.service - Cleanup udev Database
     Loaded: loaded (/usr/lib/systemd/system/initrd-udevadm-cleanup-db.service; static)
     Active: inactive (dead) since Tue 2025-11-25 09:43:53 UTC; 39min ago
   Main PID: 621 (code=exited, status=0/SUCCESS)
        CPU: 13ms

Nov 25 09:43:53 localhost systemd[1]: Starting Cleanup udev Database...
Nov 25 09:43:53 localhost systemd[1]: initrd-udevadm-cleanup-db.service: Deactivated successfully.
Nov 25 09:43:53 localhost systemd[1]: Finished Cleanup udev Database.

○ ip6tables.service - IPv6 firewall with ip6tables
     Loaded: loaded (/usr/lib/systemd/system/ip6tables.service; disabled; preset: disabled)
     Active: inactive (dead)

○ iptables.service - IPv4 firewall with iptables
     Loaded: loaded (/usr/lib/systemd/system/iptables.service; disabled; preset: disabled)
     Active: inactive (dead) since Tue 2025-11-25 10:16:32 UTC; 7min ago
   Duration: 32min 34.963s
   Main PID: 815 (code=exited, status=0/SUCCESS)
        CPU: 95ms

Nov 25 09:43:56 localhost systemd[1]: Starting IPv4 firewall with iptables...
Nov 25 09:43:57 localhost iptables.init[815]: iptables: Applying firewall rules: [  OK  ]
Nov 25 09:43:57 localhost systemd[1]: Finished IPv4 firewall with iptables.
Nov 25 10:16:31 compute-0 systemd[1]: Stopping IPv4 firewall with iptables...
Nov 25 10:16:32 compute-0 iptables.init[62863]: iptables: Setting chains to policy ACCEPT: raw mangle filter nat [  OK  ]
Nov 25 10:16:32 compute-0 iptables.init[62863]: iptables: Flushing firewall rules: [  OK  ]
Nov 25 10:16:32 compute-0 systemd[1]: iptables.service: Deactivated successfully.
Nov 25 10:16:32 compute-0 systemd[1]: Stopped IPv4 firewall with iptables.

● irqbalance.service - irqbalance daemon
     Loaded: loaded (/usr/lib/systemd/system/irqbalance.service; enabled; preset: enabled)
     Active: active (running) since Tue 2025-11-25 09:43:56 UTC; 39min ago
       Docs: man:irqbalance(1)
             https://github.com/Irqbalance/irqbalance
   Main PID: 816 (irqbalance)
         IO: 0B read, 0B written
      Tasks: 2 (limit: 48640)
     Memory: 1.1M (peak: 1.4M)
        CPU: 249ms
     CGroup: /system.slice/irqbalance.service
             └─816 /usr/sbin/irqbalance

Nov 25 09:44:07 np0005534776.novalocal irqbalance[816]: Cannot change IRQ 32 affinity: Operation not permitted
Nov 25 09:44:07 np0005534776.novalocal irqbalance[816]: IRQ 32 affinity is now unmanaged
Nov 25 09:44:07 np0005534776.novalocal irqbalance[816]: Cannot change IRQ 30 affinity: Operation not permitted
Nov 25 09:44:07 np0005534776.novalocal irqbalance[816]: IRQ 30 affinity is now unmanaged
Nov 25 09:44:07 np0005534776.novalocal irqbalance[816]: Cannot change IRQ 29 affinity: Operation not permitted
Nov 25 09:44:07 np0005534776.novalocal irqbalance[816]: IRQ 29 affinity is now unmanaged
Nov 25 09:54:47 compute-0 irqbalance[816]: Cannot change IRQ 27 affinity: Operation not permitted
Nov 25 09:54:47 compute-0 irqbalance[816]: IRQ 27 affinity is now unmanaged
Nov 25 10:15:07 compute-0 irqbalance[816]: Cannot change IRQ 26 affinity: Operation not permitted
Nov 25 10:15:07 compute-0 irqbalance[816]: IRQ 26 affinity is now unmanaged

● kdump.service - Crash recovery kernel arming
     Loaded: loaded (/usr/lib/systemd/system/kdump.service; enabled; preset: enabled)
     Active: active (exited) since Tue 2025-11-25 09:44:18 UTC; 39min ago
   Main PID: 1011 (code=exited, status=0/SUCCESS)
        CPU: 17.874s

Nov 25 09:44:17 np0005534776.novalocal dracut[1290]: Linked:         0 files
Nov 25 09:44:17 np0005534776.novalocal dracut[1290]: Compared:       0 xattrs
Nov 25 09:44:17 np0005534776.novalocal dracut[1290]: Compared:       0 files
Nov 25 09:44:17 np0005534776.novalocal dracut[1290]: Saved:          0 B
Nov 25 09:44:17 np0005534776.novalocal dracut[1290]: Duration:       0.000517 seconds
Nov 25 09:44:17 np0005534776.novalocal dracut[1290]: *** Hardlinking files done ***
Nov 25 09:44:17 np0005534776.novalocal dracut[1290]: *** Creating initramfs image file '/boot/initramfs-5.14.0-642.el9.x86_64kdump.img' done ***
Nov 25 09:44:18 np0005534776.novalocal kdumpctl[1017]: kdump: kexec: loaded kdump kernel
Nov 25 09:44:18 np0005534776.novalocal kdumpctl[1017]: kdump: Starting kdump: [OK]
Nov 25 09:44:18 np0005534776.novalocal systemd[1]: Finished Crash recovery kernel arming.

● kmod-static-nodes.service - Create List of Static Device Nodes
     Loaded: loaded (/usr/lib/systemd/system/kmod-static-nodes.service; static)
     Active: active (exited) since Tue 2025-11-25 09:43:55 UTC; 39min ago
   Main PID: 672 (code=exited, status=0/SUCCESS)
        CPU: 7ms

Nov 25 09:43:55 localhost systemd[1]: Finished Create List of Static Device Nodes.

● ldconfig.service - Rebuild Dynamic Linker Cache
     Loaded: loaded (/usr/lib/systemd/system/ldconfig.service; static)
     Active: active (exited) since Tue 2025-11-25 09:43:56 UTC; 39min ago
       Docs: man:ldconfig(8)
   Main PID: 693 (code=exited, status=0/SUCCESS)
        CPU: 85ms

Nov 25 09:43:55 localhost systemd[1]: Starting Rebuild Dynamic Linker Cache...
Nov 25 09:43:56 localhost systemd[1]: Finished Rebuild Dynamic Linker Cache.

○ loadmodules.service - Load legacy module configuration
     Loaded: loaded (/usr/lib/systemd/system/loadmodules.service; enabled; preset: enabled)
     Active: inactive (dead)

○ logrotate.service - Rotate log files
     Loaded: loaded (/usr/lib/systemd/system/logrotate.service; static)
     Active: inactive (dead)
TriggeredBy: ● logrotate.timer
       Docs: man:logrotate(8)
             man:logrotate.conf(5)

○ lvm2-lvmpolld.service - LVM2 poll daemon
     Loaded: loaded (/usr/lib/systemd/system/lvm2-lvmpolld.service; static)
     Active: inactive (dead)
TriggeredBy: ● lvm2-lvmpolld.socket
       Docs: man:lvmpolld(8)

● lvm2-monitor.service - Monitoring of LVM2 mirrors, snapshots etc. using dmeventd or progress polling
     Loaded: loaded (/usr/lib/systemd/system/lvm2-monitor.service; enabled; preset: enabled)
     Active: active (exited) since Tue 2025-11-25 10:11:53 UTC; 11min ago
       Docs: man:dmeventd(8)
             man:lvcreate(8)
             man:lvchange(8)
             man:vgchange(8)
   Main PID: 34051 (code=exited, status=0/SUCCESS)
        CPU: 31ms

Nov 25 10:11:53 compute-0 systemd[1]: Starting Monitoring of LVM2 mirrors, snapshots etc. using dmeventd or progress polling...
Nov 25 10:11:53 compute-0 systemd[1]: Finished Monitoring of LVM2 mirrors, snapshots etc. using dmeventd or progress polling.

○ microcode.service - Load CPU microcode update
     Loaded: loaded (/usr/lib/systemd/system/microcode.service; enabled; preset: enabled)
     Active: inactive (dead)
  Condition: start condition failed at Tue 2025-11-25 09:43:56 UTC; 39min ago

Nov 25 09:43:56 localhost systemd[1]: Load CPU microcode update was skipped because of an unmet condition check (ConditionPathExists=/sys/devices/system/cpu/microcode/reload).

○ modprobe@configfs.service - Load Kernel Module configfs
     Loaded: loaded (/usr/lib/systemd/system/modprobe@.service; static)
     Active: inactive (dead) since Tue 2025-11-25 09:43:56 UTC; 39min ago
       Docs: man:modprobe(8)
   Main PID: 739 (code=exited, status=0/SUCCESS)
        CPU: 5ms

Nov 25 09:43:56 localhost systemd[1]: Starting Load Kernel Module configfs...
Nov 25 09:43:56 localhost systemd[1]: modprobe@configfs.service: Deactivated successfully.
Nov 25 09:43:56 localhost systemd[1]: Finished Load Kernel Module configfs.

○ modprobe@drm.service - Load Kernel Module drm
     Loaded: loaded (/usr/lib/systemd/system/modprobe@.service; static)
     Active: inactive (dead) since Tue 2025-11-25 09:43:55 UTC; 39min ago
       Docs: man:modprobe(8)
   Main PID: 674 (code=exited, status=0/SUCCESS)
        CPU: 86ms

Nov 25 09:43:55 localhost systemd[1]: modprobe@drm.service: Deactivated successfully.
Nov 25 09:43:55 localhost systemd[1]: Finished Load Kernel Module drm.

○ modprobe@efi_pstore.service - Load Kernel Module efi_pstore
     Loaded: loaded (/usr/lib/systemd/system/modprobe@.service; static)
     Active: inactive (dead) since Tue 2025-11-25 09:43:55 UTC; 39min ago
       Docs: man:modprobe(8)
   Main PID: 675 (code=exited, status=0/SUCCESS)
        CPU: 4ms

Nov 25 09:43:55 localhost systemd[1]: modprobe@efi_pstore.service: Deactivated successfully.
Nov 25 09:43:55 localhost systemd[1]: Finished Load Kernel Module efi_pstore.

○ modprobe@fuse.service - Load Kernel Module fuse
     Loaded: loaded (/usr/lib/systemd/system/modprobe@.service; static)
     Active: inactive (dead) since Tue 2025-11-25 09:43:55 UTC; 39min ago
       Docs: man:modprobe(8)
   Main PID: 676 (code=exited, status=0/SUCCESS)
        CPU: 38ms

Nov 25 09:43:55 localhost systemd[1]: modprobe@fuse.service: Deactivated successfully.
Nov 25 09:43:55 localhost systemd[1]: Finished Load Kernel Module fuse.

○ netns-placeholder.service - Create netns directory
     Loaded: loaded (/etc/systemd/system/netns-placeholder.service; enabled; preset: enabled)
     Active: inactive (dead) since Tue 2025-11-25 10:16:27 UTC; 7min ago
   Main PID: 62391 (code=exited, status=0/SUCCESS)
        CPU: 10ms

Nov 25 10:16:27 compute-0 systemd[1]: Starting Create netns directory...
Nov 25 10:16:27 compute-0 systemd[1]: netns-placeholder.service: Deactivated successfully.
Nov 25 10:16:27 compute-0 systemd[1]: Finished Create netns directory.

○ network.service - LSB: Bring up/down networking
     Loaded: loaded (/etc/rc.d/init.d/network; generated)
     Active: inactive (dead)
       Docs: man:systemd-sysv-generator(8)

● NetworkManager-wait-online.service - Network Manager Wait Online
     Loaded: loaded (/usr/lib/systemd/system/NetworkManager-wait-online.service; enabled; preset: disabled)
     Active: active (exited) since Tue 2025-11-25 10:14:39 UTC; 8min ago
       Docs: man:NetworkManager-wait-online.service(8)
   Main PID: 49063 (code=exited, status=0/SUCCESS)
        CPU: 28ms

Nov 25 10:14:39 compute-0 systemd[1]: Starting Network Manager Wait Online...
Nov 25 10:14:39 compute-0 systemd[1]: Finished Network Manager Wait Online.

● NetworkManager.service - Network Manager
     Loaded: loaded (/usr/lib/systemd/system/NetworkManager.service; enabled; preset: enabled)
    Drop-In: /usr/lib/systemd/system/NetworkManager.service.d
             └─NetworkManager-ovs.conf
     Active: active (running) since Tue 2025-11-25 10:14:39 UTC; 8min ago
       Docs: man:NetworkManager(8)
   Main PID: 49043 (NetworkManager)
         IO: 104.0K read, 306.5K written
      Tasks: 3 (limit: 48640)
     Memory: 5.1M (peak: 6.6M)
        CPU: 5.120s
     CGroup: /system.slice/NetworkManager.service
             └─49043 /usr/sbin/NetworkManager --no-daemon

Nov 25 10:15:00 compute-0 NetworkManager[49043]: <info>  [1764065700.6248] audit: op="networking-control" arg="global-dns-configuration" pid=51823 uid=0 result="success"
Nov 25 10:15:00 compute-0 NetworkManager[49043]: <info>  [1764065700.6277] config: signal: SET_VALUES,values,values-intern,global-dns-config (/etc/NetworkManager/NetworkManager.conf, /run/NetworkManager/conf.d/15-carrier-timeout.conf)
Nov 25 10:15:00 compute-0 NetworkManager[49043]: <info>  [1764065700.6310] audit: op="networking-control" arg="global-dns-configuration" pid=51823 uid=0 result="success"
Nov 25 10:15:00 compute-0 NetworkManager[49043]: <info>  [1764065700.6347] audit: op="checkpoint-adjust-rollback-timeout" arg="/org/freedesktop/NetworkManager/Checkpoint/2" pid=51823 uid=0 result="success"
Nov 25 10:15:00 compute-0 NetworkManager[49043]: <info>  [1764065700.7833] checkpoint[0x564d2bc16a20]: destroy /org/freedesktop/NetworkManager/Checkpoint/2
Nov 25 10:15:00 compute-0 NetworkManager[49043]: <info>  [1764065700.7837] audit: op="checkpoint-destroy" arg="/org/freedesktop/NetworkManager/Checkpoint/2" pid=51823 uid=0 result="success"
Nov 25 10:15:07 compute-0 systemd[1]: Reloading Network Manager...
Nov 25 10:15:07 compute-0 NetworkManager[49043]: <info>  [1764065707.5599] audit: op="reload" arg="0" pid=53095 uid=0 result="success"
Nov 25 10:15:07 compute-0 NetworkManager[49043]: <info>  [1764065707.5605] config: signal: SIGHUP,config-files,values,values-user,no-auto-default (/etc/NetworkManager/NetworkManager.conf, /usr/lib/NetworkManager/conf.d/00-server.conf, /run/NetworkManager/conf.d/15-carrier-timeout.conf, /var/lib/NetworkManager/NetworkManageUnit ntpd.service could not be found.
Unit ntpdate.service could not be found.
r-intern.conf)
Nov 25 10:15:07 compute-0 systemd[1]: Reloaded Network Manager.

○ nfs-idmapd.service - NFSv4 ID-name mapping service
     Loaded: loaded (/usr/lib/systemd/system/nfs-idmapd.service; static)
     Active: inactive (dead)
       Docs: man:idmapd(8)

○ nfs-mountd.service - NFS Mount Daemon
     Loaded: loaded (/usr/lib/systemd/system/nfs-mountd.service; static)
     Active: inactive (dead)
       Docs: man:rpc.mountd(8)

○ nfs-server.service - NFS server and services
     Loaded: loaded (/usr/lib/systemd/system/nfs-server.service; disabled; preset: disabled)
     Active: inactive (dead)
       Docs: man:rpc.nfsd(8)
             man:exportfs(8)

○ nfs-utils.service - NFS server and client services
     Loaded: loaded (/usr/lib/systemd/system/nfs-utils.service; static)
     Active: inactive (dead)

○ nfsdcld.service - NFSv4 Client Tracking Daemon
     Loaded: loaded (/usr/lib/systemd/system/nfsdcld.service; static)
     Active: inactive (dead)
       Docs: man:nfsdcld(8)

● nftables.service - Netfilter Tables
     Loaded: loaded (/usr/lib/systemd/system/nftables.service; enabled; preset: disabled)
     Active: active (exited) since Tue 2025-11-25 10:16:34 UTC; 6min ago
       Docs: man:nft(8)
   Main PID: 63255 (code=exited, status=0/SUCCESS)
        CPU: 39ms

Nov 25 10:16:34 compute-0 systemd[1]: Starting Netfilter Tables...
Nov 25 10:16:34 compute-0 systemd[1]: Finished Netfilter Tables.

● nis-domainname.service - Read and set NIS domainname from /etc/sysconfig/network
     Loaded: loaded (/usr/lib/systemd/system/nis-domainname.service; enabled; preset: enabled)
     Active: active (exited) since Tue 2025-11-25 09:43:55 UTC; 39min ago
   Main PID: 677 (code=exited, status=0/SUCCESS)
        CPU: 5ms

Nov 25 09:43:55 localhost systemd[1]: Finished Read and set NIS domainname from /etc/sysconfig/network.

● openvswitch.service - Open vSwitch
     Loaded: loaded (/usr/lib/systemd/system/openvswitch.service; enabled; preset: disabled)
     Active: active (exited) since Tue 2025-11-25 10:14:25 UTC; 9min ago
   Main PID: 47356 (code=exited, status=0/SUCCESS)
        CPU: 2ms

Nov 25 10:14:25 compute-0 systemd[1]: Starting Open vSwitch...
Nov 25 10:14:25 compute-0 systemd[1]: Finished Open vSwitch.

● ovs-delete-transient-ports.service - Open vSwitch Delete Transient Ports
     Loaded: loaded (/usr/lib/systemd/system/ovs-delete-transient-ports.service; static)
     Active: active (exited) since Tue 2025-11-25 10:14:25 UTC; 9min ago
   Main PID: 47293 (code=exited, status=0/SUCCESS)
        CPU: 27ms

Nov 25 10:14:25 compute-0 systemd[1]: Starting Open vSwitch Delete Transient Ports...
Nov 25 10:14:25 compute-0 systemd[1]: Finished Open vSwitch Delete Transient Ports.

● ovs-vswitchd.service - Open vSwitch Forwarding Unit
     Loaded: loaded (/usr/lib/systemd/system/ovs-vswitchd.service; static)
     Active: active (running) since Tue 2025-11-25 10:14:25 UTC; 9min ago
   Main PID: 47347 (ovs-vswitchd)
         IO: 3.4M read, 16.0K written
      Tasks: 13 (limit: 48640)
     Memory: 242.7M (peak: 248.7M)
        CPU: 1.237s
     CGroup: /system.slice/ovs-vswitchd.service
             └─47347 ovs-vswitchd unix:/var/run/openvswitch/db.sock -vconsole:emer -vsyslog:err -vfile:info --mlockall --user openvswitch:hugetlbfs --no-chdir --log-file=/var/log/openvswitch/ovs-vswitchd.log --pidfile=/var/run/openvswitch/ovs-vswitchd.pid --detach

Nov 25 10:14:25 compute-0 systemd[1]: Starting Open vSwitch Forwarding Unit...
Nov 25 10:14:25 compute-0 ovs-ctl[47336]: Inserting openvswitch module [  OK  ]
Nov 25 10:14:25 compute-0 ovs-ctl[47305]: Starting ovs-vswitchd [  OK  ]
Nov 25 10:14:25 compute-0 ovs-vsctl[47355]: ovs|00001|vsctl|INFO|Called as ovs-vsctl --no-wait add Open_vSwitch . external-ids hostname=compute-0
Nov 25 10:14:25 compute-0 ovs-ctl[47305]: Enabling remote OVSDB managers [  OK  ]
Nov 25 10:14:25 compute-0 systemd[1]: Started Open vSwitch Forwarding Unit.

● ovsdb-server.service - Open vSwitch Database Unit
     Loaded: loaded (/usr/lib/systemd/system/ovsdb-server.service; static)
     AcUnit pacemaker.service could not be found.
Unit plymouth-quit-wait.service could not be found.
Unit plymouth-start.service could not be found.
Unit power-profiles-daemon.service could not be found.
tive: active (running) since Tue 2025-11-25 10:14:25 UTC; 9min ago
   Main PID: 47265 (ovsdb-server)
         IO: 1.2M read, 54.5K written
      Tasks: 1 (limit: 48640)
     Memory: 4.3M (peak: 38.8M)
        CPU: 1.146s
     CGroup: /system.slice/ovsdb-server.service
             └─47265 ovsdb-server /etc/openvswitch/conf.db -vconsole:emer -vsyslog:err -vfile:info --remote=punix:/var/run/openvswitch/db.sock --private-key=db:Open_vSwitch,SSL,private_key --certificate=db:Open_vSwitch,SSL,certificate --bootstrap-ca-cert=db:Open_vSwitch,SSL,ca_cert --user openvswitch:hugetlbfs --no-chdir --log-file=/var/log/openvswitch/ovsdb-server.log --pidfile=/var/run/openvswitch/ovsdb-server.pid --detach

Nov 25 10:14:25 compute-0 chown[47211]: /usr/bin/chown: cannot access '/run/openvswitch': No such file or directory
Nov 25 10:14:25 compute-0 ovs-ctl[47216]: /etc/openvswitch/conf.db does not exist ... (warning).
Nov 25 10:14:25 compute-0 ovs-ctl[47216]: Creating empty database /etc/openvswitch/conf.db [  OK  ]
Nov 25 10:14:25 compute-0 ovs-ctl[47216]: Starting ovsdb-server [  OK  ]
Nov 25 10:14:25 compute-0 ovs-vsctl[47266]: ovs|00001|vsctl|INFO|Called as ovs-vsctl --no-wait -- init -- set Open_vSwitch . db-version=8.5.1
Nov 25 10:14:25 compute-0 ovs-vsctl[47286]: ovs|00001|vsctl|INFO|Called as ovs-vsctl --no-wait set Open_vSwitch . ovs-version=3.3.5-115.el9s "external-ids:system-id=\"95c81c1b-c02a-4bed-bd5c-c6cbddc229dd\"" "external-ids:rundir=\"/var/run/openvswitch\"" "system-type=\"centos\"" "system-version=\"9\""
Nov 25 10:14:25 compute-0 ovs-ctl[47216]: Configuring Open vSwitch system IDs [  OK  ]
Nov 25 10:14:25 compute-0 ovs-ctl[47216]: Enabling remote OVSDB managers [  OK  ]
Nov 25 10:14:25 compute-0 systemd[1]: Started Open vSwitch Database Unit.
Nov 25 10:14:25 compute-0 ovs-vsctl[47292]: ovs|00001|vsctl|INFO|Called as ovs-vsctl --no-wait add Open_vSwitch . external-ids hostname=compute-0

● polkit.service - Authorization Manager
     Loaded: loaded (/usr/lib/systemd/system/polkit.service; static)
     Active: active (running) since Tue 2025-11-25 10:13:39 UTC; 9min ago
       Docs: man:polkit(8)
   Main PID: 43525 (polkitd)
         IO: 18.6M read, 0B written
      Tasks: 12 (limit: 48640)
     Memory: 24.3M (peak: 26.9M)
        CPU: 606ms
     CGroup: /system.slice/polkit.service
             └─43525 /usr/lib/polkit-1/polkitd --no-debug

Nov 25 10:13:39 compute-0 systemd[1]: Starting Authorization Manager...
Nov 25 10:13:39 compute-0 polkitd[43525]: Started polkitd version 0.117
Nov 25 10:13:39 compute-0 polkitd[43525]: Loading rules from directory /etc/polkit-1/rules.d
Nov 25 10:13:39 compute-0 polkitd[43525]: Loading rules from directory /usr/share/polkit-1/rules.d
Nov 25 10:13:39 compute-0 polkitd[43525]: Finished loading, compiling and executing 2 rules
Nov 25 10:13:39 compute-0 systemd[1]: Started Authorization Manager.
Nov 25 10:13:39 compute-0 polkitd[43525]: Acquired the name org.freedesktop.PolicyKit1 on the system bus

○ rc-local.service - /etc/rc.d/rc.local Compatibility
     Loaded: loaded (/usr/lib/systemd/system/rc-local.service; static)
     Active: inactive (dead)
       Docs: man:systemd-rc-local-generator(8)

○ rescue.service - Rescue Shell
     Loaded: loaded (/usr/lib/systemd/system/rescue.service; static)
     Active: inactive (dead)
       Docs: man:sulogin(8)

○ rpc-gssd.service - RPC security service for NFS client and server
     Loaded: loaded (/usr/lib/systemd/system/rpc-gssd.service; static)
     Active: inactive (dead)
  Condition: start condition failed at Tue 2025-11-25 09:43:59 UTC; 39min ago
       Docs: man:rpc.gssd(8)

Nov 25 09:43:59 np0005534776.novalocal systemd[1]: RPC security service for NFS client and server was skipped because of an unmet condition check (ConditionPathExists=/etc/krb5.keytab).

● rpc-statd-notify.service - Notify NFS peers of a restart
     Loaded: loaded (/usr/lib/systemd/system/rpc-statd-notify.service; static)
     Active: active (exited) since Tue 2025-11-25 09:44:04 UTC; 39min ago
       Docs: man:sm-notify(8)
             man:rpc.statd(8)
Unit rpc-svcgssd.service could not be found.
        CPU: 8ms

Nov 25 09:44:04 np0005534776.novalocal systemd[1]: Starting Notify NFS peers of a restart...
Nov 25 09:44:04 np0005534776.novalocal sm-notify[1007]: Version 2.5.4 starting
Nov 25 09:44:04 np0005534776.novalocal systemd[1]: Started Notify NFS peers of a restart.

○ rpc-statd.service - NFS status monitor for NFSv2/3 locking.
     Loaded: loaded (/usr/lib/systemd/system/rpc-statd.service; static)
     Active: inactive (dead)
       Docs: man:rpc.statd(8)

● rpcbind.service - RPC Bind
     Loaded: loaded (/usr/lib/systemd/system/rpcbind.service; enabled; preset: enabled)
     Active: active (running) since Tue 2025-11-25 09:43:55 UTC; 39min ago
TriggeredBy: ● rpcbind.socket
       Docs: man:rpcbind(8)
   Main PID: 698 (rpcbind)
         IO: 0B read, 0B written
      Tasks: 1 (limit: 48640)
     Memory: 3.2M (peak: 3.5M)
        CPU: 30ms
     CGroup: /system.slice/rpcbind.service
             └─698 /usr/bin/rpcbind -w -f

Nov 25 09:43:55 localhost systemd[1]: Starting RPC Bind...
Nov 25 09:43:55 localhost systemd[1]: Started RPC Bind.

● rsyslog.service - System Logging Service
     Loaded: loaded (/usr/lib/systemd/system/rsyslog.service; enabled; preset: enabled)
     Active: active (running) since Tue 2025-11-25 09:44:04 UTC; 39min ago
       Docs: man:rsyslogd(8)
             https://www.rsyslog.com/doc/
   Main PID: 1008 (rsyslogd)
         IO: 0B read, 5.1M written
      Tasks: 3 (limit: 48640)
     Memory: 7.3M (peak: 8.0M)
        CPU: 2.416s
     CGroup: /system.slice/rsyslog.service
             └─1008 /usr/sbin/rsyslogd -n

Nov 25 09:44:04 np0005534776.novalocal systemd[1]: Starting System Logging Service...
Nov 25 09:44:04 np0005534776.novalocal rsyslogd[1008]: [origin software="rsyslogd" swVersion="8.2510.0-2.el9" x-pid="1008" x-info="https://www.rsyslog.com"] start
Nov 25 09:44:04 np0005534776.novalocal rsyslogd[1008]: imjournal: No statefile exists, /var/lib/rsyslog/imjournal.state will be created (ignore if this is first run): No such file or directory [v8.2510.0-2.el9 try https://www.rsyslog.com/e/2040 ]
Nov 25 09:44:04 np0005534776.novalocal systemd[1]: Started System Logging Service.
Nov 25 09:44:04 np0005534776.novalocal rsyslogd[1008]: imjournal: journal files changed, reloading...  [v8.2510.0-2.el9 try https://www.rsyslog.com/e/0 ]
Nov 25 10:13:13 compute-0 rsyslogd[1008]: imjournal: journal files changed, reloading...  [v8.2510.0-2.el9 try https://www.rsyslog.com/e/0 ]
Nov 25 10:17:30 compute-0 rsyslogd[1008]: imjournal: journal files changed, reloading...  [v8.2510.0-2.el9 try https://www.rsyslog.com/e/0 ]
Nov 25 10:17:30 compute-0 rsyslogd[1008]: imjournal: journal files changed, reloading...  [v8.2510.0-2.el9 try https://www.rsyslog.com/e/0 ]
Nov 25 10:21:24 compute-0 rsyslogd[1008]: message too long (8192) with configured size 8096, begin of message is: [{"container_id": "b14c4d0f9a09", "container_image_digests": ["quay.io/ceph/ceph [v8.2510.0-2.el9 try https://www.rsyslog.com/e/2445 ]
Nov 25 10:21:27 compute-0 rsyslogd[1008]: imjournal: journal files changed, reloading...  [v8.2510.0-2.el9 try https://www.rsyslog.com/e/0 ]

○ selinux-autorelabel-mark.service - Mark the need to relabel after reboot
     Loaded: loaded (/usr/lib/systemd/system/selinux-autorelabel-mark.service; enabled; preset: enabled)
     Active: inactive (dead)
  Condition: start condition failed at Tue 2025-11-25 09:43:55 UTC; 39min ago

Nov 25 09:43:55 localhost systemd[1]: Mark the need to relabel after reboot was skipped because of an unmet condition check (ConditionSecurity=!selinux).

● serial-getty@ttyS0.service - Serial Getty on ttyS0
     Loaded: loaded (/usr/lib/systemd/system/serial-getty@.service; enabled-runtime; preset: disabled)
     Active: active (running) since Tue 2025-11-25 09:44:09 UTC; 39min ago
       Docs: man:agetty(8)
             man:systemd-getty-generator(8)
             http://0pointer.de/blog/projects/serial-console.html
   Main PID: 2566 (agetty)
         IO: 0B read, 0B written
      Tasks: 1 (limit: 48640)
     Memory: 240.0K (peak: 728.0K)
        CPU: 6ms
  Unit sntp.service could not be found.
Unit sshd-keygen.service could not be found.
   CGroup: /system.slice/system-serial\x2dgetty.slice/serial-getty@ttyS0.service
             └─2566 /sbin/agetty -o "-p -- \\u" --keep-baud 115200,57600,38400,9600 - vt220

Nov 25 09:44:09 np0005534776.novalocal systemd[1]: Started Serial Getty on ttyS0.

○ sshd-keygen@ecdsa.service - OpenSSH ecdsa Server Key Generation
     Loaded: loaded (/usr/lib/systemd/system/sshd-keygen@.service; disabled; preset: disabled)
    Drop-In: /usr/lib/systemd/system/sshd-keygen@.service.d
             └─disable-sshd-keygen-if-cloud-init-active.conf
     Active: inactive (dead)
  Condition: start condition failed at Tue 2025-11-25 09:43:56 UTC; 39min ago

Nov 25 09:43:56 localhost systemd[1]: OpenSSH ecdsa Server Key Generation was skipped because of an unmet condition check (ConditionPathExists=!/run/systemd/generator.early/multi-user.target.wants/cloud-init.target).

○ sshd-keygen@ed25519.service - OpenSSH ed25519 Server Key Generation
     Loaded: loaded (/usr/lib/systemd/system/sshd-keygen@.service; disabled; preset: disabled)
    Drop-In: /usr/lib/systemd/system/sshd-keygen@.service.d
             └─disable-sshd-keygen-if-cloud-init-active.conf
     Active: inactive (dead)
  Condition: start condition failed at Tue 2025-11-25 09:43:56 UTC; 39min ago

Nov 25 09:43:56 localhost systemd[1]: OpenSSH ed25519 Server Key Generation was skipped because of an unmet condition check (ConditionPathExists=!/run/systemd/generator.early/multi-user.target.wants/cloud-init.target).

○ sshd-keygen@rsa.service - OpenSSH rsa Server Key Generation
     Loaded: loaded (/usr/lib/systemd/system/sshd-keygen@.service; disabled; preset: disabled)
    Drop-In: /usr/lib/systemd/system/sshd-keygen@.service.d
             └─disable-sshd-keygen-if-cloud-init-active.conf
     Active: inactive (dead)
  Condition: start condition failed at Tue 2025-11-25 09:43:56 UTC; 39min ago

Nov 25 09:43:56 localhost systemd[1]: OpenSSH rsa Server Key Generation was skipped because of an unmet condition check (ConditionPathExists=!/run/systemd/generator.early/multi-user.target.wants/cloud-init.target).

● sshd.service - OpenSSH server daemon
     Loaded: loaded (/usr/lib/systemd/system/sshd.service; enabled; preset: enabled)
     Active: active (running) since Tue 2025-11-25 09:44:04 UTC; 39min ago
       Docs: man:sshd(8)
             man:sshd_config(5)
   Main PID: 1009 (sshd)
         IO: 32.0K read, 164.0K written
      Tasks: 1 (limit: 48640)
     Memory: 34.3M (peak: 35.6M)
        CPU: 4.009s
     CGroup: /system.slice/sshd.service
             └─1009 "sshd: /usr/sbin/sshd -D [listener] 0 of 10-100 startups"

Nov 25 10:22:40 compute-0 sshd-session[104634]: Invalid user kingbase from 36.95.221.140 port 40944
Nov 25 10:22:40 compute-0 sshd-session[104634]: Received disconnect from 36.95.221.140 port 40944:11: Bye Bye [preauth]
Nov 25 10:22:40 compute-0 sshd-session[104634]: Disconnected from invalid user kingbase 36.95.221.140 port 40944 [preauth]
Nov 25 10:22:49 compute-0 sshd-session[105521]: Invalid user root2 from 20.40.73.192 port 53248
Nov 25 10:22:49 compute-0 sshd-session[105521]: Received disconnect from 20.40.73.192 port 53248:11: Bye Bye [preauth]
Nov 25 10:22:49 compute-0 sshd-session[105521]: Disconnected from invalid user root2 20.40.73.192 port 53248 [preauth]
Nov 25 10:23:01 compute-0 sshd-session[105523]: Received disconnect from 200.8.228.57 port 56966:11: Bye Bye [preauth]
Nov 25 10:23:01 compute-0 sshd-session[105523]: Disconnected from authenticating user root 200.8.228.57 port 56966 [preauth]
Nov 25 10:23:08 compute-0 sshd-session[105525]: Accepted publickey for zuul from 192.168.122.10 port 57774 ssh2: ECDSA SHA256:oovU9KaTaKTik/Ga7gdISZ8d5PJoBUjNphenPJURrck
Nov 25 10:23:08 compute-0 sshd-session[105525]: pam_unix(sshd:session): session opened for user zuul(uid=1000) by zuul(uid=0)

○ sssd-kcm.service - SSSD Kerberos Cache Manager
     Loaded: loaded (/usr/lib/systemd/system/sssd-kcm.service; indirect; preset: disabled)
     Active: inactive (dead)
TriggeredBy: ● sssd-kcm.socket
       Docs: man:sssd-kcm(5)

○ sssd.serUnit syslog.service could not be found.
vice - System Security Services Daemon
     Loaded: loaded (/usr/lib/systemd/system/sssd.service; enabled; preset: enabled)
     Active: inactive (dead)
  Condition: start condition failed at Tue 2025-11-25 09:43:56 UTC; 39min ago

Nov 25 09:43:56 localhost systemd[1]: System Security Services Daemon was skipped because no trigger condition checks were met.

○ sysstat-collect.service - system activity accounting tool
     Loaded: loaded (/usr/lib/systemd/system/sysstat-collect.service; static)
     Active: inactive (dead)
TriggeredBy: ○ sysstat-collect.timer
       Docs: man:sa1(8)

○ sysstat-summary.service - Generate a daily summary of process accounting
     Loaded: loaded (/usr/lib/systemd/system/sysstat-summary.service; static)
     Active: inactive (dead)
TriggeredBy: ○ sysstat-summary.timer
       Docs: man:sa2(8)

○ sysstat.service - Resets System Activity Logs
     Loaded: loaded (/usr/lib/systemd/system/sysstat.service; enabled; preset: enabled)
     Active: inactive (dead)

○ systemd-ask-password-console.service - Dispatch Password Requests to Console
     Loaded: loaded (/usr/lib/systemd/system/systemd-ask-password-console.service; static)
     Active: inactive (dead)
TriggeredBy: ● systemd-ask-password-console.path
       Docs: man:systemd-ask-password-console.service(8)

○ systemd-ask-password-wall.service - Forward Password Requests to Wall
     Loaded: loaded (/usr/lib/systemd/system/systemd-ask-password-wall.service; static)
     Active: inactive (dead)
TriggeredBy: ● systemd-ask-password-wall.path
       Docs: man:systemd-ask-password-wall.service(8)

○ systemd-binfmt.service - Set Up Additional Binary Formats
     Loaded: loaded (/usr/lib/systemd/system/systemd-binfmt.service; static)
     Active: inactive (dead)
  Condition: start condition failed at Tue 2025-11-25 09:43:55 UTC; 39min ago
       Docs: man:systemd-binfmt.service(8)
             man:binfmt.d(5)
             https://docs.kernel.org/admin-guide/binfmt-misc.html
             https://www.freedesktop.org/wiki/Software/systemd/APIFileSystems

Nov 25 09:43:55 localhost systemd[1]: Set Up Additional Binary Formats was skipped because no trigger condition checks were met.

○ systemd-boot-random-seed.service - Update Boot Loader Random Seed
     Loaded: loaded (/usr/lib/systemd/system/systemd-boot-random-seed.service; static)
     Active: inactive (dead)
  Condition: start condition failed at Tue 2025-11-25 09:43:55 UTC; 39min ago
       Docs: man:systemd-boot-random-seed.service(8)
             man:random(4)

Nov 25 09:43:55 localhost systemd[1]: Update Boot Loader Random Seed was skipped because no trigger condition checks were met.

● systemd-boot-update.service - Automatic Boot Loader Update
     Loaded: loaded (/usr/lib/systemd/system/systemd-boot-update.service; enabled; preset: enabled)
     Active: active (exited) since Tue 2025-11-25 09:43:55 UTC; 39min ago
       Docs: man:bootctl(1)
   Main PID: 694 (code=exited, status=0/SUCCESS)
        CPU: 12ms

Nov 25 09:43:55 localhost systemd[1]: Starting Automatic Boot Loader Update...
Nov 25 09:43:55 localhost bootctl[694]: Couldn't find EFI system partition, skipping.
Nov 25 09:43:55 localhost systemd[1]: Finished Automatic Boot Loader Update.

○ systemd-firstboot.service - First Boot Wizard
     Loaded: loaded (/usr/lib/systemd/system/systemd-firstboot.service; static)
     Active: inactive (dead)
  Condition: start condition failed at Tue 2025-11-25 09:43:55 UTC; 39min ago
       Docs: man:systemd-firstboot(1)

Nov 25 09:43:55 localhost systemd[1]: First Boot Wizard was skipped because of an unmet condition check (ConditionFirstBoot=yes).

○ systemd-fsck-root.service - File System Check on Root Device
     Loaded: loaded (/usr/lib/systemd/system/systemd-fsck-root.service; static)
     Active: inactive (dead) since Tue 2025-11-25 09:43:55 UTC; 39min ago
   Duration: 2.771s
       Docs: man:systemd-fsck-root.service(8)
   Main PID: 553 (code=exited, status=0/SUCCESS)
        CPU: 17ms

Nov 25 09:43:52 localhost systemd[1]: Starting File System Check on /dev/disk/by-uuid/47e3724e-7a1b-439a-9543-b98c9a290709...
Nov 25 09:43:52 localhost systemd-fsck[555]: /usr/sbin/fsck.xfs: XFS file system.
Nov 25 09:43:52 localhost systemd[1]: Finished File System Check on /dev/disk/by-uuid/47e3724e-7a1b-439a-9543-b98c9a290709.

● systemd-hostnamed.service - Hostname Service
     Loaded: loaded (/usr/lib/systemd/system/systemd-hostnamed.service; static)
    Drop-In: /usr/lib/systemd/system/systemd-hostnamed.service.d
             └─disable-privatedevices.conf
     Active: active (running) since Tue 2025-11-25 10:23:19 UTC; 13s ago
       Docs: man:systemd-hostnamed.service(8)
             man:hostname(5)
             man:machine-info(5)
             man:org.freedesktop.resolve1(5)
   Main PID: 106828 (systemd-hostnam)
         IO: 0B read, 0B written
      Tasks: 1 (limit: 48640)
     Memory: 2.7M (peak: 3.8M)
        CPU: 98ms
     CGroup: /system.slice/systemd-hostnamed.service
             └─106828 /usr/lib/systemd/systemd-hostnamed

Nov 25 10:23:19 compute-0 systemd[1]: Starting Hostname Service...
Nov 25 10:23:19 compute-0 systemd[1]: Started Hostname Service.

● systemd-hwdb-update.service - Rebuild Hardware Database
     Loaded: loaded (/usr/lib/systemd/system/systemd-hwdb-update.service; static)
     Active: active (exited) since Tue 2025-11-25 09:43:56 UTC; 39min ago
       Docs: man:hwdb(7)
             man:systemd-hwdb(8)
   Main PID: 686 (code=exited, status=0/SUCCESS)
        CPU: 584ms

Nov 25 09:43:55 localhost systemd[1]: Starting Rebuild Hardware Database...
Nov 25 09:43:56 localhost systemd[1]: Finished Rebuild Hardware Database.

○ systemd-initctl.service - initctl Compatibility Daemon
     Loaded: loaded (/usr/lib/systemd/system/systemd-initctl.service; static)
     Active: inactive (dead)
TriggeredBy: ● systemd-initctl.socket
       Docs: man:systemd-initctl.service(8)

● systemd-journal-catalog-update.service - Rebuild Journal Catalog
     Loaded: loaded (/usr/lib/systemd/system/systemd-journal-catalog-update.service; static)
     Active: active (exited) since Tue 2025-11-25 09:43:55 UTC; 39min ago
       Docs: man:systemd-journald.service(8)
             man:journald.conf(5)
   Main PID: 699 (code=exited, status=0/SUCCESS)
        CPU: 18ms

Nov 25 09:43:55 localhost systemd[1]: Starting Rebuild Journal Catalog...
Nov 25 09:43:55 localhost systemd[1]: Finished Rebuild Journal Catalog.

● systemd-journal-flush.service - Flush Journal to Persistent Storage
     Loaded: loaded (/usr/lib/systemd/system/systemd-journal-flush.service; static)
     Active: active (exited) since Tue 2025-11-25 09:43:55 UTC; 39min ago
       Docs: man:systemd-journald.service(8)
             man:journald.conf(5)
   Main PID: 687 (code=exited, status=0/SUCCESS)
        CPU: 12ms

Nov 25 09:43:55 localhost systemd[1]: Starting Flush Journal to Persistent Storage...
Nov 25 09:43:55 localhost systemd[1]: Finished Flush Journal to Persistent Storage.

● systemd-journald.service - Journal Service
     Loaded: loaded (/usr/lib/systemd/system/systemd-journald.service; static)
     Active: active (running) since Tue 2025-11-25 09:43:55 UTC; 39min ago
TriggeredBy: ● systemd-journald-dev-log.socket
             ● systemd-journald.socket
       Docs: man:systemd-journald.service(8)
             man:journald.conf(5)
   Main PID: 678 (systemd-journal)
     Status: "Processing requests..."
         IO: 0B read, 0B written
      Tasks: 1 (limit: 48640)
     Memory: 24.4M (peak: 31.2M)
        CPU: 3.210s
     CGroup: /system.slice/systemd-journald.service
             └─678 /usr/lib/systemd/systemd-journald

Nov 25 09:43:55 localhost systemd-journald[678]: Journal started
Nov 25 09:43:55 localhost systemd-journald[678]: Runtime Journal (/run/log/journal/fee38d0f94bf6f4b17ec77ba536bd6ab) is 8.0M, max 153.6M, 145.6M free.
Nov 25 09:43:54 localhost systemd[1]: systemd-journald.service: Deactivated successfully.
Nov 25 09:43:55 localhost systemd-journald[678]: Runtime Journal (/run/log/journal/fee38d0f94bf6f4b17ec77ba536bd6ab) is 8.0M, max 153.6M, 145.6M free.
Nov 25 09:43:55 localhost systUnit systemd-networkd-wait-online.service could not be found.
emd-journald[678]: Received client request to flush runtime journal.

● systemd-logind.service - User Login Management
     Loaded: loaded (/usr/lib/systemd/system/systemd-logind.service; static)
    Drop-In: /usr/lib/systemd/system/systemd-logind.service.d
             └─10-grub2-logind-service.conf
     Active: active (running) since Tue 2025-11-25 09:43:56 UTC; 39min ago
       Docs: man:sd-login(3)
             man:systemd-logind.service(8)
             man:logind.conf(5)
             man:org.freedesktop.login1(5)
   Main PID: 820 (systemd-logind)
     Status: "Processing requests..."
         IO: 0B read, 0B written
      Tasks: 1 (limit: 48640)
     Memory: 6.4M (peak: 8.1M)
        CPU: 1.321s
     CGroup: /system.slice/systemd-logind.service
             └─820 /usr/lib/systemd/systemd-logind

Nov 25 10:19:35 compute-0 systemd-logind[820]: New session 24 of user ceph-admin.
Nov 25 10:19:35 compute-0 systemd-logind[820]: New session 25 of user ceph-admin.
Nov 25 10:19:35 compute-0 systemd-logind[820]: New session 26 of user ceph-admin.
Nov 25 10:19:36 compute-0 systemd-logind[820]: New session 27 of user ceph-admin.
Nov 25 10:19:36 compute-0 systemd-logind[820]: New session 28 of user ceph-admin.
Nov 25 10:19:36 compute-0 systemd-logind[820]: New session 29 of user ceph-admin.
Nov 25 10:19:37 compute-0 systemd-logind[820]: New session 30 of user ceph-admin.
Nov 25 10:19:37 compute-0 systemd-logind[820]: New session 31 of user ceph-admin.
Nov 25 10:19:38 compute-0 systemd-logind[820]: New session 32 of user ceph-admin.
Nov 25 10:23:08 compute-0 systemd-logind[820]: New session 33 of user zuul.

○ systemd-machine-id-commit.service - Commit a transient machine-id on disk
     Loaded: loaded (/usr/lib/systemd/system/systemd-machine-id-commit.service; static)
     Active: inactive (dead)
  Condition: start condition failed at Tue 2025-11-25 09:43:55 UTC; 39min ago
       Docs: man:systemd-machine-id-commit.service(8)

Nov 25 09:43:55 localhost systemd[1]: Commit a transient machine-id on disk was skipped because of an unmet condition check (ConditionPathIsMountPoint=/etc/machine-id).

● systemd-modules-load.service - Load Kernel Modules
     Loaded: loaded (/usr/lib/systemd/system/systemd-modules-load.service; static)
     Active: active (exited) since Tue 2025-11-25 10:13:23 UTC; 10min ago
       Docs: man:systemd-modules-load.service(8)
             man:modules-load.d(5)
   Main PID: 38698 (code=exited, status=0/SUCCESS)
        CPU: 65ms

Nov 25 10:13:23 compute-0 systemd[1]: Starting Load Kernel Modules...
Nov 25 10:13:23 compute-0 systemd-modules-load[38698]: Inserted module 'br_netfilter'
Nov 25 10:13:23 compute-0 systemd[1]: Finished Load Kernel Modules.

● systemd-network-generator.service - Generate network units from Kernel command line
     Loaded: loaded (/usr/lib/systemd/system/systemd-network-generator.service; enabled; preset: enabled)
     Active: active (exited) since Tue 2025-11-25 09:43:55 UTC; 39min ago
       Docs: man:systemd-network-generator.service(8)
   Main PID: 679 (code=exited, status=0/SUCCESS)
        CPU: 10ms

Nov 25 09:43:55 localhost systemd[1]: Finished Generate network units from Kernel command line.

○ systemd-pcrmachine.service - TPM2 PCR Machine ID Measurement
     Loaded: loaded (/usr/lib/systemd/system/systemd-pcrmachine.service; static)
     Active: inactive (dead)
  Condition: start condition failed at Tue 2025-11-25 09:43:55 UTC; 39min ago
       Docs: man:systemd-pcrmachine.service(8)

○ systemd-pcrphase-initrd.service - TPM2 PCR Barrier (initrd)
     Loaded: loaded (/usr/lib/systemd/system/systemd-pcrphase-initrd.service; static)
     Active: inactive (dead)
       Docs: man:systemd-pcrphase-initrd.service(8)

○ systemd-pcrphase-sysinit.service - TPM2 PCR Barrier (Initialization)
     Loaded: loaded (/usr/lib/systemd/system/systemd-pcrphase-sysinit.service; static)
     Active: inactive (dead)
  Condition: start condition failed at Tue 2025-11-25 09:43:56 UTC; 39min ago
       Docs: man:systemd-pcrphase-sysinit.service(8)

Nov 25 09:43:56 localhost sysUnit systemd-timesyncd.service could not be found.
temd[1]: TPM2 PCR Barrier (Initialization) was skipped because of an unmet condition check (ConditionPathExists=/sys/firmware/efi/efivars/StubPcrKernelImage-4a67b082-0a4c-41cf-b6c7-440b29bb8c4f).

○ systemd-pcrphase.service - TPM2 PCR Barrier (User)
     Loaded: loaded (/usr/lib/systemd/system/systemd-pcrphase.service; static)
     Active: inactive (dead)
  Condition: start condition failed at Tue 2025-11-25 09:43:59 UTC; 39min ago
       Docs: man:systemd-pcrphase.service(8)

Nov 25 09:43:59 np0005534776.novalocal systemd[1]: TPM2 PCR Barrier (User) was skipped because of an unmet condition check (ConditionPathExists=/sys/firmware/efi/efivars/StubPcrKernelImage-4a67b082-0a4c-41cf-b6c7-440b29bb8c4f).

○ systemd-pstore.service - Platform Persistent Storage Archival
     Loaded: loaded (/usr/lib/systemd/system/systemd-pstore.service; enabled; preset: enabled)
     Active: inactive (dead)
  Condition: start condition failed at Tue 2025-11-25 09:43:55 UTC; 39min ago
       Docs: man:systemd-pstore(8)

Nov 25 09:43:55 localhost systemd[1]: Platform Persistent Storage Archival was skipped because of an unmet condition check (ConditionDirectoryNotEmpty=/sys/fs/pstore).

● systemd-random-seed.service - Load/Save OS Random Seed
     Loaded: loaded (/usr/lib/systemd/system/systemd-random-seed.service; static)
     Active: active (exited) since Tue 2025-11-25 09:43:55 UTC; 39min ago
       Docs: man:systemd-random-seed.service(8)
             man:random(4)
   Main PID: 688 (code=exited, status=0/SUCCESS)
        CPU: 12ms

Nov 25 09:43:55 localhost systemd[1]: Starting Load/Save OS Random Seed...
Nov 25 09:43:55 localhost systemd[1]: Finished Load/Save OS Random Seed.

● systemd-remount-fs.service - Remount Root and Kernel File Systems
     Loaded: loaded (/usr/lib/systemd/system/systemd-remount-fs.service; enabled-runtime; preset: disabled)
     Active: active (exited) since Tue 2025-11-25 09:43:55 UTC; 39min ago
       Docs: man:systemd-remount-fs.service(8)
             https://www.freedesktop.org/wiki/Software/systemd/APIFileSystems
   Main PID: 680 (code=exited, status=0/SUCCESS)
        CPU: 17ms

Nov 25 09:43:55 localhost systemd[1]: Finished Remount Root and Kernel File Systems.

○ systemd-repart.service - Repartition Root Disk
     Loaded: loaded (/usr/lib/systemd/system/systemd-repart.service; static)
     Active: inactive (dead)
  Condition: start condition failed at Tue 2025-11-25 09:43:55 UTC; 39min ago
       Docs: man:systemd-repart.service(8)

○ systemd-rfkill.service - Load/Save RF Kill Switch Status
     Loaded: loaded (/usr/lib/systemd/system/systemd-rfkill.service; static)
     Active: inactive (dead)
TriggeredBy: ● systemd-rfkill.socket
       Docs: man:systemd-rfkill.service(8)

● systemd-sysctl.service - Apply Kernel Variables
     Loaded: loaded (/usr/lib/systemd/system/systemd-sysctl.service; static)
     Active: active (exited) since Tue 2025-11-25 10:13:49 UTC; 9min ago
       Docs: man:systemd-sysctl.service(8)
             man:sysctl.d(5)
   Main PID: 45014 (code=exited, status=0/SUCCESS)
        CPU: 31ms

Nov 25 10:13:49 compute-0 systemd[1]: Starting Apply Kernel Variables...
Nov 25 10:13:49 compute-0 systemd[1]: Finished Apply Kernel Variables.

○ systemd-sysext.service - Merge System Extension Images into /usr/ and /opt/
     Loaded: loaded (/usr/lib/systemd/system/systemd-sysext.service; disabled; preset: disabled)
     Active: inactive (dead)
       Docs: man:systemd-sysext.service(8)

● systemd-sysusers.service - Create System Users
     Loaded: loaded (/usr/lib/systemd/system/systemd-sysusers.service; static)
     Active: active (exited) since Tue 2025-11-25 09:43:55 UTC; 39min ago
       Docs: man:sysusers.d(5)
             man:systemd-sysusers.service(8)
   Main PID: 689 (code=exited, status=0/SUCCESS)
        CPU: 20ms

Nov 25 09:43:55 localhost systemd[1]: Starting Create System Users...
Nov 25 09:43:55 localhost systemd[1]: Finished Create System Users.

○ systemd-tmpfiles-clean.service - Cleanup of Temporary Directories
     Loaded: loaded (/usr/lib/systemd/system/syUnit systemd-tmpfiles.service could not be found.
stemd-tmpfiles-clean.service; static)
     Active: inactive (dead) since Tue 2025-11-25 09:59:04 UTC; 24min ago
TriggeredBy: ● systemd-tmpfiles-clean.timer
       Docs: man:tmpfiles.d(5)
             man:systemd-tmpfiles(8)
   Main PID: 29961 (code=exited, status=0/SUCCESS)
        CPU: 81ms

Nov 25 09:59:04 compute-0 systemd[1]: Starting Cleanup of Temporary Directories...
Nov 25 09:59:04 compute-0 systemd[1]: systemd-tmpfiles-clean.service: Deactivated successfully.
Nov 25 09:59:04 compute-0 systemd[1]: Finished Cleanup of Temporary Directories.

● systemd-tmpfiles-setup-dev.service - Create Static Device Nodes in /dev
     Loaded: loaded (/usr/lib/systemd/system/systemd-tmpfiles-setup-dev.service; static)
     Active: active (exited) since Tue 2025-11-25 09:43:55 UTC; 39min ago
       Docs: man:tmpfiles.d(5)
             man:systemd-tmpfiles(8)
   Main PID: 691 (code=exited, status=0/SUCCESS)
        CPU: 37ms

Nov 25 09:43:55 localhost systemd[1]: Starting Create Static Device Nodes in /dev...
Nov 25 09:43:55 localhost systemd[1]: Finished Create Static Device Nodes in /dev.

● systemd-tmpfiles-setup.service - Create Volatile Files and Directories
     Loaded: loaded (/usr/lib/systemd/system/systemd-tmpfiles-setup.service; static)
     Active: active (exited) since Tue 2025-11-25 09:43:55 UTC; 39min ago
       Docs: man:tmpfiles.d(5)
             man:systemd-tmpfiles(8)
   Main PID: 695 (code=exited, status=0/SUCCESS)
        CPU: 113ms

Nov 25 09:43:55 localhost systemd[1]: Starting Create Volatile Files and Directories...
Nov 25 09:43:55 localhost systemd[1]: Finished Create Volatile Files and Directories.

○ systemd-udev-settle.service - Wait for udev To Complete Device Initialization
     Loaded: loaded (/usr/lib/systemd/system/systemd-udev-settle.service; static)
     Active: inactive (dead)
       Docs: man:systemd-udev-settle.service(8)

● systemd-udev-trigger.service - Coldplug All udev Devices
     Loaded: loaded (/usr/lib/systemd/system/systemd-udev-trigger.service; static)
    Drop-In: /usr/lib/systemd/system/systemd-udev-trigger.service.d
             └─systemd-udev-trigger-no-reload.conf
     Active: active (exited) since Tue 2025-11-25 09:43:55 UTC; 39min ago
       Docs: man:udev(7)
             man:systemd-udevd.service(8)
   Main PID: 683 (code=exited, status=0/SUCCESS)
        CPU: 96ms

Nov 25 09:43:55 localhost systemd[1]: Finished Coldplug All udev Devices.

● systemd-udevd.service - Rule-based Manager for Device Events and Files
     Loaded: loaded (/usr/lib/systemd/system/systemd-udevd.service; static)
     Active: active (running) since Tue 2025-11-25 09:43:56 UTC; 39min ago
TriggeredBy: ● systemd-udevd-kernel.socket
             ● systemd-udevd-control.socket
       Docs: man:systemd-udevd.service(8)
             man:udev(7)
   Main PID: 728 (systemd-udevd)
     Status: "Processing with 32 children at max"
         IO: 124.6M read, 45.4M written
      Tasks: 1
     Memory: 33.7M (peak: 90.3M)
        CPU: 4.539s
     CGroup: /system.slice/systemd-udevd.service
             └─udev
               └─728 /usr/lib/systemd/systemd-udevd

Nov 25 10:20:06 compute-0 lvm[86188]: PV /dev/loop4 online, VG ceph_vg1 is complete.
Nov 25 10:20:06 compute-0 lvm[86188]: VG ceph_vg1 finished
Nov 25 10:20:10 compute-0 lvm[87125]: PV /dev/loop5 online, VG ceph_vg2 is complete.
Nov 25 10:20:10 compute-0 lvm[87125]: VG ceph_vg2 finished
Nov 25 10:23:14 compute-0 lvm[106058]: PV /dev/loop5 online, VG ceph_vg2 is complete.
Nov 25 10:23:14 compute-0 lvm[106058]: VG ceph_vg2 finished
Nov 25 10:23:14 compute-0 lvm[106079]: PV /dev/loop3 online, VG ceph_vg0 is complete.
Nov 25 10:23:14 compute-0 lvm[106079]: VG ceph_vg0 finished
Nov 25 10:23:14 compute-0 lvm[106094]: PV /dev/loop4 online, VG ceph_vg1 is complete.
Nov 25 10:23:14 compute-0 lvm[106094]: VG ceph_vg1 finished

● systemd-update-done.service - Update is Completed
     Loaded: loaded (/usr/lib/systemd/system/systemd-update-done.service; static)
     Active: active (exited) since Tue 2025-11-25 09:43:56 UTC; 39min ago
       Docs: mUnit tlp.service could not be found.
an:systemd-update-done.service(8)
   Main PID: 808 (code=exited, status=0/SUCCESS)
        CPU: 13ms

Nov 25 09:43:56 localhost systemd[1]: Starting Update is Completed...
Nov 25 09:43:56 localhost systemd[1]: Finished Update is Completed.

○ systemd-update-utmp-runlevel.service - Record Runlevel Change in UTMP
     Loaded: loaded (/usr/lib/systemd/system/systemd-update-utmp-runlevel.service; static)
     Active: inactive (dead) since Tue 2025-11-25 09:44:04 UTC; 39min ago
       Docs: man:systemd-update-utmp-runlevel.service(8)
             man:utmp(5)
   Main PID: 1024 (code=exited, status=0/SUCCESS)
        CPU: 11ms

Nov 25 09:44:04 np0005534776.novalocal systemd[1]: Starting Record Runlevel Change in UTMP...
Nov 25 09:44:04 np0005534776.novalocal systemd[1]: systemd-update-utmp-runlevel.service: Deactivated successfully.
Nov 25 09:44:04 np0005534776.novalocal systemd[1]: Finished Record Runlevel Change in UTMP.

● systemd-update-utmp.service - Record System Boot/Shutdown in UTMP
     Loaded: loaded (/usr/lib/systemd/system/systemd-update-utmp.service; static)
     Active: active (exited) since Tue 2025-11-25 09:43:55 UTC; 39min ago
       Docs: man:systemd-update-utmp.service(8)
             man:utmp(5)
   Main PID: 727 (code=exited, status=0/SUCCESS)
        CPU: 8ms

Nov 25 09:43:55 localhost systemd[1]: Starting Record System Boot/Shutdown in UTMP...
Nov 25 09:43:55 localhost systemd[1]: Finished Record System Boot/Shutdown in UTMP.

● systemd-user-sessions.service - Permit User Sessions
     Loaded: loaded (/usr/lib/systemd/system/systemd-user-sessions.service; static)
     Active: active (exited) since Tue 2025-11-25 09:44:04 UTC; 39min ago
       Docs: man:systemd-user-sessions.service(8)
   Main PID: 1010 (code=exited, status=0/SUCCESS)
        CPU: 11ms

Nov 25 09:44:04 np0005534776.novalocal systemd[1]: Starting Permit User Sessions...
Nov 25 09:44:04 np0005534776.novalocal systemd[1]: Finished Permit User Sessions.

○ systemd-vconsole-setup.service - Setup Virtual Console
     Loaded: loaded (/usr/lib/systemd/system/systemd-vconsole-setup.service; static)
     Active: inactive (dead) since Tue 2025-11-25 09:43:53 UTC; 39min ago
   Duration: 2.278s
       Docs: man:systemd-vconsole-setup.service(8)
             man:vconsole.conf(5)
   Main PID: 314 (code=exited, status=0/SUCCESS)
        CPU: 191ms

Nov 25 09:43:51 localhost systemd[1]: Finished Setup Virtual Console.
Nov 25 09:43:53 localhost systemd[1]: systemd-vconsole-setup.service: Deactivated successfully.
Nov 25 09:43:53 localhost systemd[1]: Stopped Setup Virtual Console.
Notice: journal has been rotated since unit was started, output may be incomplete.

● tuned.service - Dynamic System Tuning Daemon
     Loaded: loaded (/usr/lib/systemd/system/tuned.service; enabled; preset: enabled)
     Active: active (running) since Tue 2025-11-25 10:13:41 UTC; 9min ago
       Docs: man:tuned(8)
             man:tuned.conf(5)
             man:tuned-adm(8)
   Main PID: 43704 (tuned)
         IO: 0B read, 0B written
      Tasks: 4 (limit: 48640)
     Memory: 14.0M (peak: 16.1M)
        CPU: 699ms
     CGroup: /system.slice/tuned.service
             └─43704 /usr/bin/python3 -Es /usr/sbin/tuned -l -P

Nov 25 10:13:40 compute-0 systemd[1]: Starting Dynamic System Tuning Daemon...
Nov 25 10:13:41 compute-0 systemd[1]: Started Dynamic System Tuning Daemon.

○ unbound-anchor.service - update of the root trust anchor for DNSSEC validation in unbound
     Loaded: loaded (/usr/lib/systemd/system/unbound-anchor.service; static)
     Active: inactive (dead)
TriggeredBy: ● unbound-anchor.timer
       Docs: man:unbound-anchor(8)

● user-runtime-dir@1000.service - User Runtime Directory /run/user/1000
     Loaded: loaded (/usr/lib/systemd/system/user-runtime-dir@.service; static)
     Active: active (exited) since Tue 2025-11-25 09:44:39 UTC; 38min ago
       Docs: man:user@.service(5)
   Main PID: 4306 (code=exited, status=0/SUCCESS)
        CPU: 15ms

Nov 25 09:44:39 np0005534776.novalocal systemd[1]: Starting User Runtime Directory /run/user/1000...
Nov 25 09:44:39 np0005534776.novalocal systemd[1]: Finished User Runtime Directory /run/user/1000.

● user-runtime-dir@42477.service - User Runtime Directory /run/user/42477
     Loaded: loaded (/usr/lib/systemd/system/user-runtime-dir@.service; static)
     Active: active (exited) since Tue 2025-11-25 10:19:34 UTC; 3min 58s ago
       Docs: man:user@.service(5)
   Main PID: 76732 (code=exited, status=0/SUCCESS)
        CPU: 17ms

Nov 25 10:19:34 compute-0 systemd[1]: Starting User Runtime Directory /run/user/42477...
Nov 25 10:19:34 compute-0 systemd[1]: Finished User Runtime Directory /run/user/42477.

● user@1000.service - User Manager for UID 1000
     Loaded: loaded (/usr/lib/systemd/system/user@.service; static)
    Drop-In: /usr/lib/systemd/system/user@.service.d
             └─10-login-barrier.conf
     Active: active (running) since Tue 2025-11-25 09:44:39 UTC; 38min ago
       Docs: man:user@.service(5)
   Main PID: 4307 (systemd)
     Status: "Ready."
         IO: 676.0K read, 4.0K written
      Tasks: 5
     Memory: 9.2M (peak: 16.8M)
        CPU: 1.566s
     CGroup: /user.slice/user-1000.slice/user@1000.service
             ├─app.slice
             │ └─dbus-broker.service
             │   ├─12489 /usr/bin/dbus-broker-launch --scope user
             │   └─12506 dbus-broker --log 4 --controller 9 --machine-id fee38d0f94bf6f4b17ec77ba536bd6ab --max-bytes 100000000000000 --max-fds 25000000000000 --max-matches 5000000000
             ├─init.scope
             │ ├─4307 /usr/lib/systemd/systemd --user
             │ └─4309 "(sd-pam)"
             └─user.slice
               └─podman-pause-9f2ee473.scope
                 └─12394 catatonit -P

Nov 25 09:54:12 np0005534776.novalocal dbus-broker-launch[12489]: Policy to allow eavesdropping in /usr/share/dbus-1/session.conf +31: Eavesdropping is deprecated and ignored
Nov 25 09:54:12 np0005534776.novalocal dbus-broker-launch[12489]: Policy to allow eavesdropping in /usr/share/dbus-1/session.conf +33: Eavesdropping is deprecated and ignored
Nov 25 09:54:12 np0005534776.novalocal systemd[4307]: Started D-Bus User Message Bus.
Nov 25 09:54:12 np0005534776.novalocal dbus-broker-lau[12489]: Ready
Nov 25 09:54:12 np0005534776.novalocal systemd[4307]: selinux: avc:  op=load_policy lsm=selinux seqno=6 res=1
Nov 25 09:54:12 np0005534776.novalocal systemd[4307]: Created slice Slice /user.
Nov 25 09:54:12 np0005534776.novalocal systemd[4307]: podman-12376.scope: unit configures an IP firewall, but not running as root.
Nov 25 09:54:12 np0005534776.novalocal systemd[4307]: (This warning is only shown for the first unit using IP firewalling.)
Nov 25 09:54:12 np0005534776.novalocal systemd[4307]: Started podman-12376.scope.
Nov 25 09:54:13 np0005534776.novalocal systemd[4307]: Started podman-pause-9f2ee473.scope.

● user@42477.service - User Manager for UID 42477
     Loaded: loaded (/usr/lib/systemd/system/user@.service; static)
    Drop-In: /usr/lib/systemd/system/user@.service.d
             └─10-login-barrier.conf
     Active: active (running) since Tue 2025-11-25 10:19:34 UTC; 3min 58s ago
       Docs: man:user@.service(5)
   Main PID: 76733 (systemd)
     Status: "Ready."
         IO: 0B read, 0B written
      Tasks: 2
     Memory: 7.0M (peak: 10.6M)
        CPU: 830ms
     CGroup: /user.slice/user-42477.slice/user@42477.service
             └─init.scope
               ├─76733 /usr/lib/systemd/systemd --user
               └─76736 "(sd-pam)"

Nov 25 10:19:34 compute-0 systemd[76733]: Starting Create User's Volatile Files and Directories...
Nov 25 10:19:34 compute-0 systemd[76733]: Finished Create User's Volatile Files and Directories.
Nov 25 10:19:34 compute-0 systemd[76733]: Listening on D-Bus User Message Bus Socket.
Nov 25 10:19:34 compute-0 systemd[76733]: Reached target Sockets.
Nov 25 10:19:34 compute-0 systemd[76733]: Reached target Basic System.
Nov 25 10:19:34 compute-0 systemd[76733]: Reached target Main User Target.
Nov 25 10:19:34 compute-0 systemd[76733]: Startup finished in 125ms.
Nov 25 10:19Unit ypbind.service could not be found.
Unit yppasswdd.service could not be found.
Unit ypserv.service could not be found.
Unit ypxfrd.service could not be found.
:34 compute-0 systemd[1]: Started User Manager for UID 42477.
Nov 25 10:21:34 compute-0 systemd[76733]: Starting Mark boot as successful...
Nov 25 10:21:34 compute-0 systemd[76733]: Finished Mark boot as successful.

● -.slice - Root Slice
     Loaded: loaded
     Active: active since Tue 2025-11-25 09:43:50 UTC; 39min ago
      Until: Tue 2025-11-25 09:43:50 UTC; 39min ago
       Docs: man:systemd.special(7)
      Tasks: 1290
     Memory: 2.1G
        CPU: 15min 50.100s
     CGroup: /
             ├─108356 turbostat --debug sleep 10
             ├─108359 sleep 10
             ├─init.scope
             │ └─1 /usr/lib/systemd/systemd --switched-root --system --deserialize 31
             ├─system.slice
             │ ├─NetworkManager.service
             │ │ └─49043 /usr/sbin/NetworkManager --no-daemon
             │ ├─auditd.service
             │ │ ├─700 /sbin/auditd
             │ │ └─702 /usr/sbin/sedispatch
             │ ├─chronyd.service
             │ │ └─58618 /usr/sbin/chronyd -F 2
             │ ├─crond.service
             │ │ ├─ 1012 /usr/sbin/crond -n
             │ │ └─30899 /usr/sbin/anacron -s
             │ ├─dbus-broker.service
             │ │ ├─810 /usr/bin/dbus-broker-launch --scope system --audit
             │ │ └─811 dbus-broker --log 4 --controller 9 --machine-id fee38d0f94bf6f4b17ec77ba536bd6ab --max-bytes 536870912 --max-fds 4096 --max-matches 131072 --audit
             │ ├─gssproxy.service
             │ │ └─885 /usr/sbin/gssproxy -D
             │ ├─irqbalance.service
             │ │ └─816 /usr/sbin/irqbalance
             │ ├─ovs-vswitchd.service
             │ │ └─47347 ovs-vswitchd unix:/var/run/openvswitch/db.sock -vconsole:emer -vsyslog:err -vfile:info --mlockall --user openvswitch:hugetlbfs --no-chdir --log-file=/var/log/openvswitch/ovs-vswitchd.log --pidfile=/var/run/openvswitch/ovs-vswitchd.pid --detach
             │ ├─ovsdb-server.service
             │ │ └─47265 ovsdb-server /etc/openvswitch/conf.db -vconsole:emer -vsyslog:err -vfile:info --remote=punix:/var/run/openvswitch/db.sock --private-key=db:Open_vSwitch,SSL,private_key --certificate=db:Open_vSwitch,SSL,certificate --bootstrap-ca-cert=db:Open_vSwitch,SSL,ca_cert --user openvswitch:hugetlbfs --no-chdir --log-file=/var/log/openvswitch/ovsdb-server.log --pidfile=/var/run/openvswitch/ovsdb-server.pid --detach
             │ ├─polkit.service
             │ │ └─43525 /usr/lib/polkit-1/polkitd --no-debug
             │ ├─rpcbind.service
             │ │ └─698 /usr/bin/rpcbind -w -f
             │ ├─rsyslog.service
             │ │ └─1008 /usr/sbin/rsyslogd -n
             │ ├─sshd.service
             │ │ └─1009 "sshd: /usr/sbin/sshd -D [listener] 0 of 10-100 startups"
             │ ├─system-ceph\x2d86651951\x2dd7ad\x2d54ed\x2d8d1a\x2d1eae0c6b599c.slice
             │ │ ├─ceph-86651951-d7ad-54ed-8d1a-1eae0c6b599c@crash.compute-0.service
             │ │ │ ├─libpod-payload-b14c4d0f9a091cf8a47fce60a36e80426e9a0297b91362c53f6634918d26917f
             │ │ │ │ ├─82984 /run/podman-init -- /usr/bin/ceph-crash -n client.crash.compute-0
             │ │ │ │ └─82986 /usr/bin/python3 -s /usr/bin/ceph-crash -n client.crash.compute-0
             │ │ │ └─runtime
             │ │ │   └─82982 /usr/bin/conmon --api-version 1 -c b14c4d0f9a091cf8a47fce60a36e80426e9a0297b91362c53f6634918d26917f -u b14c4d0f9a091cf8a47fce60a36e80426e9a0297b91362c53f6634918d26917f -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/b14c4d0f9a091cf8a47fce60a36e80426e9a0297b91362c53f6634918d26917f/userdata -p /run/containers/storage/overlay-containers/b14c4d0f9a091cf8a47fce60a36e80426e9a0297b91362c53f6634918d26917f/userdata/pidfile -n ceph-86651951-d7ad-54ed-8d1a-1eae0c6b599c-crash-compute-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/b14c4d0f9a091cf8a47fce60a36e80426e9a0297b91362c53f6634918d26917f --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/b14c4d0f9a091cf8a47fce60a36e80426e9a0297b91362c53f6634918d26917f/userdata/oci-log --conmon-pidfile /run/ceph-86651951-d7ad-54ed-8d1a-1eae0c6b599c@crash.compute-0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg b14c4d0f9a091cf8a47fce60a36e80426e9a0297b91362c53f6634918d26917f
             │ │ ├─ceph-86651951-d7ad-54ed-8d1a-1eae0c6b599c@mds.cephfs.compute-0.avwmrm.service
             │ │ │ ├─libpod-payload-ada03e038ff7afa81ce99514785a1f7f23ec1c616e608a0488afc3c61f4ad270
             │ │ │ │ ├─101346 /run/podman-init -- /usr/bin/ceph-mds -n mds.cephfs.compute-0.avwmrm -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ │ │ └─101348 /usr/bin/ceph-mds -n mds.cephfs.compute-0.avwmrm -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ │ └─runtime
             │ │ │   └─101344 /usr/bin/conmon --api-version 1 -c ada03e038ff7afa81ce99514785a1f7f23ec1c616e608a0488afc3c61f4ad270 -u ada03e038ff7afa81ce99514785a1f7f23ec1c616e608a0488afc3c61f4ad270 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/ada03e038ff7afa81ce99514785a1f7f23ec1c616e608a0488afc3c61f4ad270/userdata -p /run/containers/storage/overlay-containers/ada03e038ff7afa81ce99514785a1f7f23ec1c616e608a0488afc3c61f4ad270/userdata/pidfile -n ceph-86651951-d7ad-54ed-8d1a-1eae0c6b599c-mds-cephfs-compute-0-avwmrm --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/ada03e038ff7afa81ce99514785a1f7f23ec1c616e608a0488afc3c61f4ad270 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/ada03e038ff7afa81ce99514785a1f7f23ec1c616e608a0488afc3c61f4ad270/userdata/oci-log --conmon-pidfile /run/ceph-86651951-d7ad-54ed-8d1a-1eae0c6b599c@mds.cephfs.compute-0.avwmrm.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg ada03e038ff7afa81ce99514785a1f7f23ec1c616e608a0488afc3c61f4ad270
             │ │ ├─ceph-86651951-d7ad-54ed-8d1a-1eae0c6b599c@mgr.compute-0.oomwtk.service
             │ │ │ ├─libpod-payload-50fa81e3de3bbed9014b0ada17a3661d687d8263d45aa4c2cb331c53aa6e66c5
             │ │ │ │ ├─75454 /run/podman-init -- /usr/bin/ceph-mgr -n mgr.compute-0.oomwtk -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ │ │ └─75456 /usr/bin/ceph-mgr -n mgr.compute-0.oomwtk -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ │ └─runtime
             │ │ │   └─75452 /usr/bin/conmon --api-version 1 -c 50fa81e3de3bbed9014b0ada17a3661d687d8263d45aa4c2cb331c53aa6e66c5 -u 50fa81e3de3bbed9014b0ada17a3661d687d8263d45aa4c2cb331c53aa6e66c5 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/50fa81e3de3bbed9014b0ada17a3661d687d8263d45aa4c2cb331c53aa6e66c5/userdata -p /run/containers/storage/overlay-containers/50fa81e3de3bbed9014b0ada17a3661d687d8263d45aa4c2cb331c53aa6e66c5/userdata/pidfile -n ceph-86651951-d7ad-54ed-8d1a-1eae0c6b599c-mgr-compute-0-oomwtk --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/50fa81e3de3bbed9014b0ada17a3661d687d8263d45aa4c2cb331c53aa6e66c5 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/50fa81e3de3bbed9014b0ada17a3661d687d8263d45aa4c2cb331c53aa6e66c5/userdata/oci-log --conmon-pidfile /run/ceph-86651951-d7ad-54ed-8d1a-1eae0c6b599c@mgr.compute-0.oomwtk.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 50fa81e3de3bbed9014b0ada17a3661d687d8263d45aa4c2cb331c53aa6e66c5
             │ │ ├─ceph-86651951-d7ad-54ed-8d1a-1eae0c6b599c@mon.compute-0.service
             │ │ │ ├─libpod-payload-da26e6b5eb184e66ff0f01e858a9ba09086081279bc61dcc18804f12bf2746e5
             │ │ │ │ ├─75164 /run/podman-init -- /usr/bin/ceph-mon -n mon.compute-0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false
             │ │ │ │ └─75166 /usr/bin/ceph-mon -n mon.compute-0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false
             │ │ │ └─runtime
             │ │ │   └─75162 /usr/bin/conmon --api-version 1 -c da26e6b5eb184e66ff0f01e858a9ba09086081279bc61dcc18804f12bf2746e5 -u da26e6b5eb184e66ff0f01e858a9ba09086081279bc61dcc18804f12bf2746e5 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/da26e6b5eb184e66ff0f01e858a9ba09086081279bc61dcc18804f12bf2746e5/userdata -p /run/containers/storage/overlay-containers/da26e6b5eb184e66ff0f01e858a9ba09086081279bc61dcc18804f12bf2746e5/userdata/pidfile -n ceph-86651951-d7ad-54ed-8d1a-1eae0c6b599c-mon-compute-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/da26e6b5eb184e66ff0f01e858a9ba09086081279bc61dcc18804f12bf2746e5 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/da26e6b5eb184e66ff0f01e858a9ba09086081279bc61dcc18804f12bf2746e5/userdata/oci-log --conmon-pidfile /run/ceph-86651951-d7ad-54ed-8d1a-1eae0c6b599c@mon.compute-0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg da26e6b5eb184e66ff0f01e858a9ba09086081279bc61dcc18804f12bf2746e5
             │ │ ├─ceph-86651951-d7ad-54ed-8d1a-1eae0c6b599c@osd.0.service
             │ │ │ ├─libpod-payload-509125080fc4a991dad78f48c3bcc9760616adb551f766f6768ee97cdf6f087c
             │ │ │ │ ├─88858 /run/podman-init -- /usr/bin/ceph-osd -n osd.0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ │ │ └─88860 /usr/bin/ceph-osd -n osd.0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ │ └─runtime
             │ │ │   └─88856 /usr/bin/conmon --api-version 1 -c 509125080fc4a991dad78f48c3bcc9760616adb551f766f6768ee97cdf6f087c -u 509125080fc4a991dad78f48c3bcc9760616adb551f766f6768ee97cdf6f087c -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/509125080fc4a991dad78f48c3bcc9760616adb551f766f6768ee97cdf6f087c/userdata -p /run/containers/storage/overlay-containers/509125080fc4a991dad78f48c3bcc9760616adb551f766f6768ee97cdf6f087c/userdata/pidfile -n ceph-86651951-d7ad-54ed-8d1a-1eae0c6b599c-osd-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/509125080fc4a991dad78f48c3bcc9760616adb551f766f6768ee97cdf6f087c --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/509125080fc4a991dad78f48c3bcc9760616adb551f766f6768ee97cdf6f087c/userdata/oci-log --conmon-pidfile /run/ceph-86651951-d7ad-54ed-8d1a-1eae0c6b599c@osd.0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 509125080fc4a991dad78f48c3bcc9760616adb551f766f6768ee97cdf6f087c
             │ │ ├─ceph-86651951-d7ad-54ed-8d1a-1eae0c6b599c@osd.1.service
             │ │ │ ├─libpod-payload-53527432356f881cb3f7f18463b7737b38bc93fa92677dff67e318c4cdec67e3
             │ │ │ │ ├─89879 /run/podman-init -- /usr/bin/ceph-osd -n osd.1 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ │ │ └─89881 /usr/bin/ceph-osd -n osd.1 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ │ └─runtime
             │ │ │   └─89877 /usr/bin/conmon --api-version 1 -c 53527432356f881cb3f7f18463b7737b38bc93fa92677dff67e318c4cdec67e3 -u 53527432356f881cb3f7f18463b7737b38bc93fa92677dff67e318c4cdec67e3 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/53527432356f881cb3f7f18463b7737b38bc93fa92677dff67e318c4cdec67e3/userdata -p /run/containers/storage/overlay-containers/53527432356f881cb3f7f18463b7737b38bc93fa92677dff67e318c4cdec67e3/userdata/pidfile -n ceph-86651951-d7ad-54ed-8d1a-1eae0c6b599c-osd-1 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/53527432356f881cb3f7f18463b7737b38bc93fa92677dff67e318c4cdec67e3 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/53527432356f881cb3f7f18463b7737b38bc93fa92677dff67e318c4cdec67e3/userdata/oci-log --conmon-pidfile /run/ceph-86651951-d7ad-54ed-8d1a-1eae0c6b599c@osd.1.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 53527432356f881cb3f7f18463b7737b38bc93fa92677dff67e318c4cdec67e3
             │ │ ├─ceph-86651951-d7ad-54ed-8d1a-1eae0c6b599c@osd.2.service
             │ │ │ ├─libpod-payload-a38f2f8c61e9843df057582eeccd529fb52fee544a2f2daa3caa5266718af181
             │ │ │ │ ├─90887 /run/podman-init -- /usr/bin/ceph-osd -n osd.2 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ │ │ └─90889 /usr/bin/ceph-osd -n osd.2 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ │ └─runtime
             │ │ │   └─90885 /usr/bin/conmon --api-version 1 -c a38f2f8c61e9843df057582eeccd529fb52fee544a2f2daa3caa5266718af181 -u a38f2f8c61e9843df057582eeccd529fb52fee544a2f2daa3caa5266718af181 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/a38f2f8c61e9843df057582eeccd529fb52fee544a2f2daa3caa5266718af181/userdata -p /run/containers/storage/overlay-containers/a38f2f8c61e9843df057582eeccd529fb52fee544a2f2daa3caa5266718af181/userdata/pidfile -n ceph-86651951-d7ad-54ed-8d1a-1eae0c6b599c-osd-2 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/a38f2f8c61e9843df057582eeccd529fb52fee544a2f2daa3caa5266718af181 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/a38f2f8c61e9843df057582eeccd529fb52fee544a2f2daa3caa5266718af181/userdata/oci-log --conmon-pidfile /run/ceph-86651951-d7ad-54ed-8d1a-1eae0c6b599c@osd.2.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg a38f2f8c61e9843df057582eeccd529fb52fee544a2f2daa3caa5266718af181
             │ │ └─ceph-86651951-d7ad-54ed-8d1a-1eae0c6b599c@rgw.rgw.compute-0.nmzhvu.service
             │ │   ├─libpod-payload-37d8d4d4e35122aa7398826ce5305d40a9c1b412301945f14e7b5b9738d21ef6
             │ │   │ ├─100884 /run/podman-init -- /usr/bin/radosgw -n client.rgw.rgw.compute-0.nmzhvu -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │   │ └─100886 /usr/bin/radosgw -n client.rgw.rgw.compute-0.nmzhvu -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │   └─runtime
             │ │     └─100882 /usr/bin/conmon --api-version 1 -c 37d8d4d4e35122aa7398826ce5305d40a9c1b412301945f14e7b5b9738d21ef6 -u 37d8d4d4e35122aa7398826ce5305d40a9c1b412301945f14e7b5b9738d21ef6 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/37d8d4d4e35122aa7398826ce5305d40a9c1b412301945f14e7b5b9738d21ef6/userdata -p /run/containers/storage/overlay-containers/37d8d4d4e35122aa7398826ce5305d40a9c1b412301945f14e7b5b9738d21ef6/userdata/pidfile -n ceph-86651951-d7ad-54ed-8d1a-1eae0c6b599c-rgw-rgw-compute-0-nmzhvu --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/37d8d4d4e35122aa7398826ce5305d40a9c1b412301945f14e7b5b9738d21ef6 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/37d8d4d4e35122aa7398826ce5305d40a9c1b412301945f14e7b5b9738d21ef6/userdata/oci-log --conmon-pidfile /run/ceph-86651951-d7ad-54ed-8d1a-1eae0c6b599c@rgw.rgw.compute-0.nmzhvu.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 37d8d4d4e35122aa7398826ce5305d40a9c1b412301945f14e7b5b9738d21ef6
             │ ├─system-getty.slice
             │ │ └─getty@tty1.service
             │ │   └─1013 /sbin/agetty -o "-p -- \\u" --noclear - linux
             │ ├─system-serial\x2dgetty.slice
             │ │ └─serial-getty@ttyS0.service
             │ │   └─2566 /sbin/agetty -o "-p -- \\u" --keep-baud 115200,57600,38400,9600 - vt220
             │ ├─systemd-hostnamed.service
             │ │ └─106828 /usr/lib/systemd/systemd-hostnamed
             │ ├─systemd-journald.service
             │ │ └─678 /usr/lib/systemd/systemd-journald
             │ ├─systemd-logind.service
             │ │ └─820 /usr/lib/systemd/systemd-logind
             │ ├─systemd-udevd.service
             │ │ └─udev
             │ │   └─728 /usr/lib/systemd/systemd-udevd
             │ └─tuned.service
             │   └─43704 /usr/bin/python3 -Es /usr/sbin/tuned -l -P
             └─user.slice
               ├─user-1000.slice
               │ ├─session-1.scope
               │ │ └─4518 /usr/bin/python3
               │ ├─session-17.scope
               │ │ ├─71412 "sshd-session: zuul [priv]"
               │ │ └─71415 "sshd-session: zuul@notty"
               │ ├─session-33.scope
               │ │ ├─105525 "sshd-session: zuul [priv]"
               │ │ ├─105528 "sshd-session: zuul@notty"
               │ │ ├─105529 sudo bash -c "rm -rf /var/tmp/sos-osp && mkdir /var/tmp/sos-osp && sos report --batch --all-logs --tmp-dir=/var/tmp/sos-osp  -p container,openstack_edpm,system,storage,virt"
               │ │ ├─105553 /usr/bin/python3 -s /sbin/sos report --batch --all-logs --tmp-dir=/var/tmp/sos-osp -p container,openstack_edpm,system,storage,virt
               │ │ ├─108355 timeout 15s turbostat --debug sleep 10
               │ │ ├─108762 timeout 300s systemctl status --all
               │ │ ├─108763 systemctl status --all
               │ │ ├─108764 timeout 300s semanage interface -l
               │ │ └─108765 /usr/bin/python3 -EsI /usr/sbin/semanage interface -l
               │ └─user@1000.service
               │   ├─app.slice
               │   │ └─dbus-broker.service
               │   │   ├─12489 /usr/bin/dbus-broker-launch --scope user
               │   │   └─12506 dbus-broker --log 4 --controller 9 --machine-id fee38d0f94bf6f4b17ec77ba536bd6ab --max-bytes 100000000000000 --max-fds 25000000000000 --max-matches 5000000000
               │   ├─init.scope
               │   │ ├─4307 /usr/lib/systemd/systemd --user
               │   │ └─4309 "(sd-pam)"
               │   └─user.slice
               │     └─podman-pause-9f2ee473.scope
               │       └─12394 catatonit -P
               └─user-42477.slice
                 ├─session-20.scope
                 │ ├─76729 "sshd-session: ceph-admin [priv]"
                 │ └─76752 "sshd-session: ceph-admin"
                 ├─session-22.scope
                 │ ├─76747 "sshd-session: ceph-admin [priv]"
                 │ └─76753 "sshd-session: ceph-admin@notty"
                 ├─session-23.scope
                 │ ├─76804 "sshd-session: ceph-admin [priv]"
                 │ └─76807 "sshd-session: ceph-admin@notty"
                 ├─session-24.scope
                 │ ├─76858 "sshd-session: ceph-admin [priv]"
                 │ └─76861 "sshd-session: ceph-admin@notty"
                 ├─session-25.scope
                 │ ├─76912 "sshd-session: ceph-admin [priv]"
                 │ └─76915 "sshd-session: ceph-admin@notty"
                 ├─session-26.scope
                 │ ├─76966 "sshd-session: ceph-admin [priv]"
                 │ └─76969 "sshd-session: ceph-admin@notty"
                 ├─session-27.scope
                 │ ├─77020 "sshd-session: ceph-admin [priv]"
                 │ └─77023 "sshd-session: ceph-admin@notty"
                 ├─session-28.scope
                 │ ├─77074 "sshd-session: ceph-admin [priv]"
                 │ └─77077 "sshd-session: ceph-admin@notty"
                 ├─session-29.scope
                 │ ├─77128 "sshd-session: ceph-admin [priv]"
                 │ └─77131 "sshd-session: ceph-admin@notty"
                 ├─session-30.scope
                 │ ├─77182 "sshd-session: ceph-admin [priv]"
                 │ └─77185 "sshd-session: ceph-admin@notty"
                 ├─session-31.scope
                 │ ├─77209 "sshd-session: ceph-admin [priv]"
                 │ └─77212 "sshd-session: ceph-admin@notty"
                 ├─session-32.scope
                 │ ├─77263 "sshd-session: ceph-admin [priv]"
                 │ └─77266 "sshd-session: ceph-admin@notty"
                 └─user@42477.service
                   └─init.scope
                     ├─76733 /usr/lib/systemd/systemd --user
                     └─76736 "(sd-pam)"

Nov 25 10:22:45 compute-0 systemd[1]: var-lib-containers-storage-overlay-89fe432e4be816f92b04c86830e75b38fc47de13d02f7add26c604a0e90f4941-merged.mount: Deactivated successfully.
Nov 25 10:22:45 compute-0 systemd[1]: libpod-conmon-56584dd1178cbf0ac05ccd30ccb9408f8c01a098b370e0743280ab45dfd65d4b.scope: Deactivated successfully.
Nov 25 10:22:46 compute-0 systemd[1]: Started libpod-conmon-f87ebeb79efd0301f20165a35827c52b1fe3d2422e6d2d79040e29a142d471cc.scope.
Nov 25 10:22:46 compute-0 systemd[1]: Started libcrun container.
Nov 25 10:22:47 compute-0 systemd[1]: libpod-f87ebeb79efd0301f20165a35827c52b1fe3d2422e6d2d79040e29a142d471cc.scope: Deactivated successfully.
Nov 25 10:22:47 compute-0 systemd[1]: var-lib-containers-storage-overlay-a37b201cda1e39348ff64384d03f527a19d3ae981b1104bb0b7fab9c142c620d-merged.mount: Deactivated successfully.
Nov 25 10:22:47 compute-0 systemd[1]: libpod-conmon-f87ebeb79efd0301f20165a35827c52b1fe3d2422e6d2d79040e29a142d471cc.scope: Deactivated successfully.
Nov 25 10:23:08 compute-0 systemd[1]: Started Session 33 of User zuul.
Nov 25 10:23:19 compute-0 systemd[1]: Starting Hostname Service...
Nov 25 10:23:19 compute-0 systemd[1]: Started Hostname Service.

● machine.slice - Virtual Machine and Container Slice
     Loaded: loaded (/usr/lib/systemd/system/machine.slice; static)
     Active: active since Tue 2025-11-25 10:18:31 UTC; 5min ago
      Until: Tue 2025-11-25 10:18:31 UTC; 5min ago
       Docs: man:systemd.special(7)
         IO: 86.4M read, 17.2M written
      Tasks: 0
     Memory: 21.0M (peak: 199.0M)
        CPU: 1min 24.352s
     CGroup: /machine.slice

Nov 25 10:22:47 compute-0 xenodochial_blackburn[105425]:     "921c18b1-5d01-4df4-b435-89254be1776d": {
Nov 25 10:22:47 compute-0 xenodochial_blackburn[105425]:         "ceph_fsid": "86651951-d7ad-54ed-8d1a-1eae0c6b599c",
Nov 25 10:22:47 compute-0 xenodochial_blackburn[105425]:         "device": "/dev/mapper/ceph_vg0-ceph_lv0",
Nov 25 10:22:47 compute-0 xenodochial_blackburn[105425]:         "osd_id": 0,
Nov 25 10:22:47 compute-0 xenodochial_blackburn[105425]:         "osd_uuid": "921c18b1-5d01-4df4-b435-89254be1776d",
Nov 25 10:22:47 compute-0 xenodochial_blackburn[105425]:         "type": "bluestore"
Nov 25 10:22:47 compute-0 xenodochial_blackburn[105425]:     }
Nov 25 10:22:47 compute-0 xenodochial_blackburn[105425]: }
Nov 25 10:22:47 compute-0 podman[105458]: 2025-11-25 10:22:47.12366741 +0000 UTC m=+0.023395899 container died f87ebeb79efd0301f20165a35827c52b1fe3d2422e6d2d79040e29a142d471cc (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=xenodochial_blackburn, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, CEPH_REF=reef, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, org.label-schema.build-date=20250507, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2)
Nov 25 10:22:47 compute-0 podman[105458]: 2025-11-25 10:22:47.174135528 +0000 UTC m=+0.073864027 container remove f87ebeb79efd0301f20165a35827c52b1fe3d2422e6d2d79040e29a142d471cc (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=xenodochial_blackburn, org.label-schema.build-date=20250507, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, ceph=True, io.buildah.version=1.39.3, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)

● system-ceph\x2d86651951\x2dd7ad\x2d54ed\x2d8d1a\x2d1eae0c6b599c.slice - Slice /system/ceph-86651951-d7ad-54ed-8d1a-1eae0c6b599c
     Loaded: loaded
     Active: active since Tue 2025-11-25 10:18:36 UTC; 4min 56s ago
      Until: Tue 2025-11-25 10:18:36 UTC; 4min 56s ago
         IO: 14.3M read, 3.8G written
      Tasks: 991
     Memory: 2.1G (peak: 2.1G)
        CPU: 1min 17.195s
     CGroup: /system.slice/system-ceph\x2d86651951\x2dd7ad\x2d54ed\x2d8d1a\x2d1eae0c6b599c.slice
             ├─ceph-86651951-d7ad-54ed-8d1a-1eae0c6b599c@crash.compute-0.service
             │ ├─libpod-payload-b14c4d0f9a091cf8a47fce60a36e80426e9a0297b91362c53f6634918d26917f
             │ │ ├─82984 /run/podman-init -- /usr/bin/ceph-crash -n client.crash.compute-0
             │ │ └─82986 /usr/bin/python3 -s /usr/bin/ceph-crash -n client.crash.compute-0
             │ └─runtime
             │   └─82982 /usr/bin/conmon --api-version 1 -c b14c4d0f9a091cf8a47fce60a36e80426e9a0297b91362c53f6634918d26917f -u b14c4d0f9a091cf8a47fce60a36e80426e9a0297b91362c53f6634918d26917f -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/b14c4d0f9a091cf8a47fce60a36e80426e9a0297b91362c53f6634918d26917f/userdata -p /run/containers/storage/overlay-containers/b14c4d0f9a091cf8a47fce60a36e80426e9a0297b91362c53f6634918d26917f/userdata/pidfile -n ceph-86651951-d7ad-54ed-8d1a-1eae0c6b599c-crash-compute-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/b14c4d0f9a091cf8a47fce60a36e80426e9a0297b91362c53f6634918d26917f --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/b14c4d0f9a091cf8a47fce60a36e80426e9a0297b91362c53f6634918d26917f/userdata/oci-log --conmon-pidfile /run/ceph-86651951-d7ad-54ed-8d1a-1eae0c6b599c@crash.compute-0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg b14c4d0f9a091cf8a47fce60a36e80426e9a0297b91362c53f6634918d26917f
             ├─ceph-86651951-d7ad-54ed-8d1a-1eae0c6b599c@mds.cephfs.compute-0.avwmrm.service
             │ ├─libpod-payload-ada03e038ff7afa81ce99514785a1f7f23ec1c616e608a0488afc3c61f4ad270
             │ │ ├─101346 /run/podman-init -- /usr/bin/ceph-mds -n mds.cephfs.compute-0.avwmrm -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ └─101348 /usr/bin/ceph-mds -n mds.cephfs.compute-0.avwmrm -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ └─runtime
             │   └─101344 /usr/bin/conmon --api-version 1 -c ada03e038ff7afa81ce99514785a1f7f23ec1c616e608a0488afc3c61f4ad270 -u ada03e038ff7afa81ce99514785a1f7f23ec1c616e608a0488afc3c61f4ad270 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/ada03e038ff7afa81ce99514785a1f7f23ec1c616e608a0488afc3c61f4ad270/userdata -p /run/containers/storage/overlay-containers/ada03e038ff7afa81ce99514785a1f7f23ec1c616e608a0488afc3c61f4ad270/userdata/pidfile -n ceph-86651951-d7ad-54ed-8d1a-1eae0c6b599c-mds-cephfs-compute-0-avwmrm --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/ada03e038ff7afa81ce99514785a1f7f23ec1c616e608a0488afc3c61f4ad270 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/ada03e038ff7afa81ce99514785a1f7f23ec1c616e608a0488afc3c61f4ad270/userdata/oci-log --conmon-pidfile /run/ceph-86651951-d7ad-54ed-8d1a-1eae0c6b599c@mds.cephfs.compute-0.avwmrm.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg ada03e038ff7afa81ce99514785a1f7f23ec1c616e608a0488afc3c61f4ad270
             ├─ceph-86651951-d7ad-54ed-8d1a-1eae0c6b599c@mgr.compute-0.oomwtk.service
             │ ├─libpod-payload-50fa81e3de3bbed9014b0ada17a3661d687d8263d45aa4c2cb331c53aa6e66c5
             │ │ ├─75454 /run/podman-init -- /usr/bin/ceph-mgr -n mgr.compute-0.oomwtk -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ └─75456 /usr/bin/ceph-mgr -n mgr.compute-0.oomwtk -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ └─runtime
             │   └─75452 /usr/bin/conmon --api-version 1 -c 50fa81e3de3bbed9014b0ada17a3661d687d8263d45aa4c2cb331c53aa6e66c5 -u 50fa81e3de3bbed9014b0ada17a3661d687d8263d45aa4c2cb331c53aa6e66c5 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/50fa81e3de3bbed9014b0ada17a3661d687d8263d45aa4c2cb331c53aa6e66c5/userdata -p /run/containers/storage/overlay-containers/50fa81e3de3bbed9014b0ada17a3661d687d8263d45aa4c2cb331c53aa6e66c5/userdata/pidfile -n ceph-86651951-d7ad-54ed-8d1a-1eae0c6b599c-mgr-compute-0-oomwtk --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/50fa81e3de3bbed9014b0ada17a3661d687d8263d45aa4c2cb331c53aa6e66c5 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/50fa81e3de3bbed9014b0ada17a3661d687d8263d45aa4c2cb331c53aa6e66c5/userdata/oci-log --conmon-pidfile /run/ceph-86651951-d7ad-54ed-8d1a-1eae0c6b599c@mgr.compute-0.oomwtk.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 50fa81e3de3bbed9014b0ada17a3661d687d8263d45aa4c2cb331c53aa6e66c5
             ├─ceph-86651951-d7ad-54ed-8d1a-1eae0c6b599c@mon.compute-0.service
             │ ├─libpod-payload-da26e6b5eb184e66ff0f01e858a9ba09086081279bc61dcc18804f12bf2746e5
             │ │ ├─75164 /run/podman-init -- /usr/bin/ceph-mon -n mon.compute-0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false
             │ │ └─75166 /usr/bin/ceph-mon -n mon.compute-0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false
             │ └─runtime
             │   └─75162 /usr/bin/conmon --api-version 1 -c da26e6b5eb184e66ff0f01e858a9ba09086081279bc61dcc18804f12bf2746e5 -u da26e6b5eb184e66ff0f01e858a9ba09086081279bc61dcc18804f12bf2746e5 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/da26e6b5eb184e66ff0f01e858a9ba09086081279bc61dcc18804f12bf2746e5/userdata -p /run/containers/storage/overlay-containers/da26e6b5eb184e66ff0f01e858a9ba09086081279bc61dcc18804f12bf2746e5/userdata/pidfile -n ceph-86651951-d7ad-54ed-8d1a-1eae0c6b599c-mon-compute-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/da26e6b5eb184e66ff0f01e858a9ba09086081279bc61dcc18804f12bf2746e5 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/da26e6b5eb184e66ff0f01e858a9ba09086081279bc61dcc18804f12bf2746e5/userdata/oci-log --conmon-pidfile /run/ceph-86651951-d7ad-54ed-8d1a-1eae0c6b599c@mon.compute-0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg da26e6b5eb184e66ff0f01e858a9ba09086081279bc61dcc18804f12bf2746e5
             ├─ceph-86651951-d7ad-54ed-8d1a-1eae0c6b599c@osd.0.service
             │ ├─libpod-payload-509125080fc4a991dad78f48c3bcc9760616adb551f766f6768ee97cdf6f087c
             │ │ ├─88858 /run/podman-init -- /usr/bin/ceph-osd -n osd.0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ └─88860 /usr/bin/ceph-osd -n osd.0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ └─runtime
             │   └─88856 /usr/bin/conmon --api-version 1 -c 509125080fc4a991dad78f48c3bcc9760616adb551f766f6768ee97cdf6f087c -u 509125080fc4a991dad78f48c3bcc9760616adb551f766f6768ee97cdf6f087c -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/509125080fc4a991dad78f48c3bcc9760616adb551f766f6768ee97cdf6f087c/userdata -p /run/containers/storage/overlay-containers/509125080fc4a991dad78f48c3bcc9760616adb551f766f6768ee97cdf6f087c/userdata/pidfile -n ceph-86651951-d7ad-54ed-8d1a-1eae0c6b599c-osd-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/509125080fc4a991dad78f48c3bcc9760616adb551f766f6768ee97cdf6f087c --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/509125080fc4a991dad78f48c3bcc9760616adb551f766f6768ee97cdf6f087c/userdata/oci-log --conmon-pidfile /run/ceph-86651951-d7ad-54ed-8d1a-1eae0c6b599c@osd.0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 509125080fc4a991dad78f48c3bcc9760616adb551f766f6768ee97cdf6f087c
             ├─ceph-86651951-d7ad-54ed-8d1a-1eae0c6b599c@osd.1.service
             │ ├─libpod-payload-53527432356f881cb3f7f18463b7737b38bc93fa92677dff67e318c4cdec67e3
             │ │ ├─89879 /run/podman-init -- /usr/bin/ceph-osd -n osd.1 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ └─89881 /usr/bin/ceph-osd -n osd.1 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ └─runtime
             │   └─89877 /usr/bin/conmon --api-version 1 -c 53527432356f881cb3f7f18463b7737b38bc93fa92677dff67e318c4cdec67e3 -u 53527432356f881cb3f7f18463b7737b38bc93fa92677dff67e318c4cdec67e3 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/53527432356f881cb3f7f18463b7737b38bc93fa92677dff67e318c4cdec67e3/userdata -p /run/containers/storage/overlay-containers/53527432356f881cb3f7f18463b7737b38bc93fa92677dff67e318c4cdec67e3/userdata/pidfile -n ceph-86651951-d7ad-54ed-8d1a-1eae0c6b599c-osd-1 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/53527432356f881cb3f7f18463b7737b38bc93fa92677dff67e318c4cdec67e3 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/53527432356f881cb3f7f18463b7737b38bc93fa92677dff67e318c4cdec67e3/userdata/oci-log --conmon-pidfile /run/ceph-86651951-d7ad-54ed-8d1a-1eae0c6b599c@osd.1.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 53527432356f881cb3f7f18463b7737b38bc93fa92677dff67e318c4cdec67e3
             ├─ceph-86651951-d7ad-54ed-8d1a-1eae0c6b599c@osd.2.service
             │ ├─libpod-payload-a38f2f8c61e9843df057582eeccd529fb52fee544a2f2daa3caa5266718af181
             │ │ ├─90887 /run/podman-init -- /usr/bin/ceph-osd -n osd.2 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ └─90889 /usr/bin/ceph-osd -n osd.2 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ └─runtime
             │   └─90885 /usr/bin/conmon --api-version 1 -c a38f2f8c61e9843df057582eeccd529fb52fee544a2f2daa3caa5266718af181 -u a38f2f8c61e9843df057582eeccd529fb52fee544a2f2daa3caa5266718af181 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/a38f2f8c61e9843df057582eeccd529fb52fee544a2f2daa3caa5266718af181/userdata -p /run/containers/storage/overlay-containers/a38f2f8c61e9843df057582eeccd529fb52fee544a2f2daa3caa5266718af181/userdata/pidfile -n ceph-86651951-d7ad-54ed-8d1a-1eae0c6b599c-osd-2 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/a38f2f8c61e9843df057582eeccd529fb52fee544a2f2daa3caa5266718af181 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/a38f2f8c61e9843df057582eeccd529fb52fee544a2f2daa3caa5266718af181/userdata/oci-log --conmon-pidfile /run/ceph-86651951-d7ad-54ed-8d1a-1eae0c6b599c@osd.2.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg a38f2f8c61e9843df057582eeccd529fb52fee544a2f2daa3caa5266718af181
             └─ceph-86651951-d7ad-54ed-8d1a-1eae0c6b599c@rgw.rgw.compute-0.nmzhvu.service
               ├─libpod-payload-37d8d4d4e35122aa7398826ce5305d40a9c1b412301945f14e7b5b9738d21ef6
               │ ├─100884 /run/podman-init -- /usr/bin/radosgw -n client.rgw.rgw.compute-0.nmzhvu -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
               │ └─100886 /usr/bin/radosgw -n client.rgw.rgw.compute-0.nmzhvu -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
               └─runtime
                 └─100882 /usr/bin/conmon --api-version 1 -c 37d8d4d4e35122aa7398826ce5305d40a9c1b412301945f14e7b5b9738d21ef6 -u 37d8d4d4e35122aa7398826ce5305d40a9c1b412301945f14e7b5b9738d21ef6 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/37d8d4d4e35122aa7398826ce5305d40a9c1b412301945f14e7b5b9738d21ef6/userdata -p /run/containers/storage/overlay-containers/37d8d4d4e35122aa7398826ce5305d40a9c1b412301945f14e7b5b9738d21ef6/userdata/pidfile -n ceph-86651951-d7ad-54ed-8d1a-1eae0c6b599c-rgw-rgw-compute-0-nmzhvu --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/37d8d4d4e35122aa7398826ce5305d40a9c1b412301945f14e7b5b9738d21ef6 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/37d8d4d4e35122aa7398826ce5305d40a9c1b412301945f14e7b5b9738d21ef6/userdata/oci-log --conmon-pidfile /run/ceph-86651951-d7ad-54ed-8d1a-1eae0c6b599c@rgw.rgw.compute-0.nmzhvu.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 37d8d4d4e35122aa7398826ce5305d40a9c1b412301945f14e7b5b9738d21ef6

Nov 25 10:23:32 compute-0 ceph-mgr[75456]: log_channel(cluster) log [DBG] : pgmap v230: 305 pgs: 305 active+clean; 455 KiB data, 140 MiB used, 60 GiB / 60 GiB avail
Nov 25 10:23:32 compute-0 ceph-mon[75166]: mon.compute-0@0(leader) e1 handle_command mon_command({"prefix": "osd pool set", "pool": "default.rgw.log", "var": "pgp_num_actual", "val": "31"} v 0) v1
Nov 25 10:23:32 compute-0 ceph-mon[75166]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/1840679491' entity='mgr.compute-0.oomwtk' cmd=[{"prefix": "osd pool set", "pool": "default.rgw.log", "var": "pgp_num_actual", "val": "31"}]: dispatch
Nov 25 10:23:32 compute-0 ceph-osd[88860]: log_channel(cluster) log [DBG] : 10.9 scrub starts
Nov 25 10:23:32 compute-0 ceph-osd[88860]: log_channel(cluster) log [DBG] : 10.9 scrub ok
Nov 25 10:23:32 compute-0 ceph-mon[75166]: mon.compute-0@0(leader).osd e117 do_prune osdmap full prune enabled
Nov 25 10:23:32 compute-0 ceph-mon[75166]: log_channel(audit) log [INF] : from='mgr.14130 192.168.122.100:0/1840679491' entity='mgr.compute-0.oomwtk' cmd='[{"prefix": "osd pool set", "pool": "default.rgw.log", "var": "pgp_num_actual", "val": "31"}]': finished
Nov 25 10:23:32 compute-0 ceph-mon[75166]: mon.compute-0@0(leader).osd e118 e118: 3 total, 3 up, 3 in
Nov 25 10:23:32 compute-0 ceph-mon[75166]: log_channel(cluster) log [DBG] : osdmap e118: 3 total, 3 up, 3 in
Nov 25 10:23:32 compute-0 ceph-osd[88860]: osd.0 pg_epoch: 118 pg[9.1c( v 45'385 (0'0,45'385] local-lis/les=117/118 n=5 ec=50/35 lis/c=115/85 les/c/f=116/86/0 sis=117) [0] r=0 lpr=117 pi=[85,117)/1 crt=45'385 mlcod 0'0 active mbc={}] state<Started/Primary/Active>: react AllReplicasActivated Activating complete

● system-getty.slice - Slice /system/getty
     Loaded: loaded
     Active: active since Tue 2025-11-25 09:43:54 UTC; 39min ago
      Until: Tue 2025-11-25 09:43:54 UTC; 39min ago
         IO: 0B read, 0B written
      Tasks: 1
     Memory: 300.0K (peak: 540.0K)
        CPU: 9ms
     CGroup: /system.slice/system-getty.slice
             └─getty@tty1.service
               └─1013 /sbin/agetty -o "-p -- \\u" --noclear - linux

● system-modprobe.slice - Slice /system/modprobe
     Loaded: loaded
     Active: active since Tue 2025-11-25 09:43:51 UTC; 39min ago
      Until: Tue 2025-11-25 09:43:51 UTC; 39min ago
         IO: 0B read, 0B written
      Tasks: 0
     Memory: 412.0K (peak: 12.2M)
        CPU: 145ms
     CGroup: /system.slice/system-modprobe.slice

Nov 25 09:43:51 localhost systemd[1]: Created slice Slice /system/modprobe.

● system-serial\x2dgetty.slice - Slice /system/serial-getty
     Loaded: loaded
     Active: active since Tue 2025-11-25 09:43:54 UTC; 39min ago
      Until: Tue 2025-11-25 09:43:54 UTC; 39min ago
         IO: 0B read, 0B written
      Tasks: 1
     Memory: 260.0K (peak: 748.0K)
        CPU: 8ms
     CGroup: /system.slice/system-serial\x2dgetty.slice
             └─serial-getty@ttyS0.service
               └─2566 /sbin/agetty -o "-p -- \\u" --keep-baud 115200,57600,38400,9600 - vt220

● system-sshd\x2dkeygen.slice - Slice /system/sshd-keygen
     Loaded: loaded
     Active: active since Tue 2025-11-25 09:43:54 UTC; 39min ago
      Until: Tue 2025-11-25 09:43:54 UTC; 39min ago
         IO: 0B read, 0B written
      Tasks: 0
     Memory: 0B (peak: 0B)
        CPU: 0
     CGroup: /system.slice/system-sshd\x2dkeygen.slice

● system.slice - System Slice
     Loaded: loaded
     Active: active since Tue 2025-11-25 09:43:50 UTC; 39min ago
      Until: Tue 2025-11-25 09:43:50 UTC; 39min ago
       Docs: man:systemd.special(7)
         IO: 190.2M read, 3.9G written
      Tasks: 1052
     Memory: 2.7G (peak: 2.7G)
        CPU: 3min 31.618s
     CGroup: /system.slice
             ├─NetworkManager.service
             │ └─49043 /usr/sbin/NetworkManager --no-daemon
             ├─auditd.service
             │ ├─700 /sbin/auditd
             │ └─702 /usr/sbin/sedispatch
             ├─chronyd.service
             │ └─58618 /usr/sbin/chronyd -F 2
             ├─crond.service
             │ ├─ 1012 /usr/sbin/crond -n
             │ └─30899 /usr/sbin/anacron -s
             ├─dbus-broker.service
             │ ├─810 /usr/bin/dbus-broker-launch --scope system --audit
             │ └─811 dbus-broker --log 4 --controller 9 --machine-id fee38d0f94bf6f4b17ec77ba536bd6ab --max-bytes 536870912 --max-fds 4096 --max-matches 131072 --audit
             ├─gssproxy.service
             │ └─885 /usr/sbin/gssproxy -D
             ├─irqbalance.service
             │ └─816 /usr/sbin/irqbalance
             ├─ovs-vswitchd.service
             │ └─47347 ovs-vswitchd unix:/var/run/openvswitch/db.sock -vconsole:emer -vsyslog:err -vfile:info --mlockall --user openvswitch:hugetlbfs --no-chdir --log-file=/var/log/openvswitch/ovs-vswitchd.log --pidfile=/var/run/openvswitch/ovs-vswitchd.pid --detach
             ├─ovsdb-server.service
             │ └─47265 ovsdb-server /etc/openvswitch/conf.db -vconsole:emer -vsyslog:err -vfile:info --remote=punix:/var/run/openvswitch/db.sock --private-key=db:Open_vSwitch,SSL,private_key --certificate=db:Open_vSwitch,SSL,certificate --bootstrap-ca-cert=db:Open_vSwitch,SSL,ca_cert --user openvswitch:hugetlbfs --no-chdir --log-file=/var/log/openvswitch/ovsdb-server.log --pidfile=/var/run/openvswitch/ovsdb-server.pid --detach
             ├─polkit.service
             │ └─43525 /usr/lib/polkit-1/polkitd --no-debug
             ├─rpcbind.service
             │ └─698 /usr/bin/rpcbind -w -f
             ├─rsyslog.service
             │ └─1008 /usr/sbin/rsyslogd -n
             ├─sshd.service
             │ └─1009 "sshd: /usr/sbin/sshd -D [listener] 0 of 10-100 startups"
             ├─system-ceph\x2d86651951\x2dd7ad\x2d54ed\x2d8d1a\x2d1eae0c6b599c.slice
             │ ├─ceph-86651951-d7ad-54ed-8d1a-1eae0c6b599c@crash.compute-0.service
             │ │ ├─libpod-payload-b14c4d0f9a091cf8a47fce60a36e80426e9a0297b91362c53f6634918d26917f
             │ │ │ ├─82984 /run/podman-init -- /usr/bin/ceph-crash -n client.crash.compute-0
             │ │ │ └─82986 /usr/bin/python3 -s /usr/bin/ceph-crash -n client.crash.compute-0
             │ │ └─runtime
             │ │   └─82982 /usr/bin/conmon --api-version 1 -c b14c4d0f9a091cf8a47fce60a36e80426e9a0297b91362c53f6634918d26917f -u b14c4d0f9a091cf8a47fce60a36e80426e9a0297b91362c53f6634918d26917f -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/b14c4d0f9a091cf8a47fce60a36e80426e9a0297b91362c53f6634918d26917f/userdata -p /run/containers/storage/overlay-containers/b14c4d0f9a091cf8a47fce60a36e80426e9a0297b91362c53f6634918d26917f/userdata/pidfile -n ceph-86651951-d7ad-54ed-8d1a-1eae0c6b599c-crash-compute-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/b14c4d0f9a091cf8a47fce60a36e80426e9a0297b91362c53f6634918d26917f --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/b14c4d0f9a091cf8a47fce60a36e80426e9a0297b91362c53f6634918d26917f/userdata/oci-log --conmon-pidfile /run/ceph-86651951-d7ad-54ed-8d1a-1eae0c6b599c@crash.compute-0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg b14c4d0f9a091cf8a47fce60a36e80426e9a0297b91362c53f6634918d26917f
             │ ├─ceph-86651951-d7ad-54ed-8d1a-1eae0c6b599c@mds.cephfs.compute-0.avwmrm.service
             │ │ ├─libpod-payload-ada03e038ff7afa81ce99514785a1f7f23ec1c616e608a0488afc3c61f4ad270
             │ │ │ ├─101346 /run/podman-init -- /usr/bin/ceph-mds -n mds.cephfs.compute-0.avwmrm -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ │ └─101348 /usr/bin/ceph-mds -n mds.cephfs.compute-0.avwmrm -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ └─runtime
             │ │   └─101344 /usr/bin/conmon --api-version 1 -c ada03e038ff7afa81ce99514785a1f7f23ec1c616e608a0488afc3c61f4ad270 -u ada03e038ff7afa81ce99514785a1f7f23ec1c616e608a0488afc3c61f4ad270 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/ada03e038ff7afa81ce99514785a1f7f23ec1c616e608a0488afc3c61f4ad270/userdata -p /run/containers/storage/overlay-containers/ada03e038ff7afa81ce99514785a1f7f23ec1c616e608a0488afc3c61f4ad270/userdata/pidfile -n ceph-86651951-d7ad-54ed-8d1a-1eae0c6b599c-mds-cephfs-compute-0-avwmrm --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/ada03e038ff7afa81ce99514785a1f7f23ec1c616e608a0488afc3c61f4ad270 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/ada03e038ff7afa81ce99514785a1f7f23ec1c616e608a0488afc3c61f4ad270/userdata/oci-log --conmon-pidfile /run/ceph-86651951-d7ad-54ed-8d1a-1eae0c6b599c@mds.cephfs.compute-0.avwmrm.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg ada03e038ff7afa81ce99514785a1f7f23ec1c616e608a0488afc3c61f4ad270
             │ ├─ceph-86651951-d7ad-54ed-8d1a-1eae0c6b599c@mgr.compute-0.oomwtk.service
             │ │ ├─libpod-payload-50fa81e3de3bbed9014b0ada17a3661d687d8263d45aa4c2cb331c53aa6e66c5
             │ │ │ ├─75454 /run/podman-init -- /usr/bin/ceph-mgr -n mgr.compute-0.oomwtk -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ │ └─75456 /usr/bin/ceph-mgr -n mgr.compute-0.oomwtk -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ └─runtime
             │ │   └─75452 /usr/bin/conmon --api-version 1 -c 50fa81e3de3bbed9014b0ada17a3661d687d8263d45aa4c2cb331c53aa6e66c5 -u 50fa81e3de3bbed9014b0ada17a3661d687d8263d45aa4c2cb331c53aa6e66c5 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/50fa81e3de3bbed9014b0ada17a3661d687d8263d45aa4c2cb331c53aa6e66c5/userdata -p /run/containers/storage/overlay-containers/50fa81e3de3bbed9014b0ada17a3661d687d8263d45aa4c2cb331c53aa6e66c5/userdata/pidfile -n ceph-86651951-d7ad-54ed-8d1a-1eae0c6b599c-mgr-compute-0-oomwtk --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/50fa81e3de3bbed9014b0ada17a3661d687d8263d45aa4c2cb331c53aa6e66c5 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/50fa81e3de3bbed9014b0ada17a3661d687d8263d45aa4c2cb331c53aa6e66c5/userdata/oci-log --conmon-pidfile /run/ceph-86651951-d7ad-54ed-8d1a-1eae0c6b599c@mgr.compute-0.oomwtk.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 50fa81e3de3bbed9014b0ada17a3661d687d8263d45aa4c2cb331c53aa6e66c5
             │ ├─ceph-86651951-d7ad-54ed-8d1a-1eae0c6b599c@mon.compute-0.service
             │ │ ├─libpod-payload-da26e6b5eb184e66ff0f01e858a9ba09086081279bc61dcc18804f12bf2746e5
             │ │ │ ├─75164 /run/podman-init -- /usr/bin/ceph-mon -n mon.compute-0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false
             │ │ │ └─75166 /usr/bin/ceph-mon -n mon.compute-0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false
             │ │ └─runtime
             │ │   └─75162 /usr/bin/conmon --api-version 1 -c da26e6b5eb184e66ff0f01e858a9ba09086081279bc61dcc18804f12bf2746e5 -u da26e6b5eb184e66ff0f01e858a9ba09086081279bc61dcc18804f12bf2746e5 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/da26e6b5eb184e66ff0f01e858a9ba09086081279bc61dcc18804f12bf2746e5/userdata -p /run/containers/storage/overlay-containers/da26e6b5eb184e66ff0f01e858a9ba09086081279bc61dcc18804f12bf2746e5/userdata/pidfile -n ceph-86651951-d7ad-54ed-8d1a-1eae0c6b599c-mon-compute-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/da26e6b5eb184e66ff0f01e858a9ba09086081279bc61dcc18804f12bf2746e5 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/da26e6b5eb184e66ff0f01e858a9ba09086081279bc61dcc18804f12bf2746e5/userdata/oci-log --conmon-pidfile /run/ceph-86651951-d7ad-54ed-8d1a-1eae0c6b599c@mon.compute-0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg da26e6b5eb184e66ff0f01e858a9ba09086081279bc61dcc18804f12bf2746e5
             │ ├─ceph-86651951-d7ad-54ed-8d1a-1eae0c6b599c@osd.0.service
             │ │ ├─libpod-payload-509125080fc4a991dad78f48c3bcc9760616adb551f766f6768ee97cdf6f087c
             │ │ │ ├─88858 /run/podman-init -- /usr/bin/ceph-osd -n osd.0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ │ └─88860 /usr/bin/ceph-osd -n osd.0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ └─runtime
             │ │   └─88856 /usr/bin/conmon --api-version 1 -c 509125080fc4a991dad78f48c3bcc9760616adb551f766f6768ee97cdf6f087c -u 509125080fc4a991dad78f48c3bcc9760616adb551f766f6768ee97cdf6f087c -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/509125080fc4a991dad78f48c3bcc9760616adb551f766f6768ee97cdf6f087c/userdata -p /run/containers/storage/overlay-containers/509125080fc4a991dad78f48c3bcc9760616adb551f766f6768ee97cdf6f087c/userdata/pidfile -n ceph-86651951-d7ad-54ed-8d1a-1eae0c6b599c-osd-0 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/509125080fc4a991dad78f48c3bcc9760616adb551f766f6768ee97cdf6f087c --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/509125080fc4a991dad78f48c3bcc9760616adb551f766f6768ee97cdf6f087c/userdata/oci-log --conmon-pidfile /run/ceph-86651951-d7ad-54ed-8d1a-1eae0c6b599c@osd.0.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 509125080fc4a991dad78f48c3bcc9760616adb551f766f6768ee97cdf6f087c
             │ ├─ceph-86651951-d7ad-54ed-8d1a-1eae0c6b599c@osd.1.service
             │ │ ├─libpod-payload-53527432356f881cb3f7f18463b7737b38bc93fa92677dff67e318c4cdec67e3
             │ │ │ ├─89879 /run/podman-init -- /usr/bin/ceph-osd -n osd.1 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ │ └─89881 /usr/bin/ceph-osd -n osd.1 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ └─runtime
             │ │   └─89877 /usr/bin/conmon --api-version 1 -c 53527432356f881cb3f7f18463b7737b38bc93fa92677dff67e318c4cdec67e3 -u 53527432356f881cb3f7f18463b7737b38bc93fa92677dff67e318c4cdec67e3 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/53527432356f881cb3f7f18463b7737b38bc93fa92677dff67e318c4cdec67e3/userdata -p /run/containers/storage/overlay-containers/53527432356f881cb3f7f18463b7737b38bc93fa92677dff67e318c4cdec67e3/userdata/pidfile -n ceph-86651951-d7ad-54ed-8d1a-1eae0c6b599c-osd-1 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/53527432356f881cb3f7f18463b7737b38bc93fa92677dff67e318c4cdec67e3 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/53527432356f881cb3f7f18463b7737b38bc93fa92677dff67e318c4cdec67e3/userdata/oci-log --conmon-pidfile /run/ceph-86651951-d7ad-54ed-8d1a-1eae0c6b599c@osd.1.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 53527432356f881cb3f7f18463b7737b38bc93fa92677dff67e318c4cdec67e3
             │ ├─ceph-86651951-d7ad-54ed-8d1a-1eae0c6b599c@osd.2.service
             │ │ ├─libpod-payload-a38f2f8c61e9843df057582eeccd529fb52fee544a2f2daa3caa5266718af181
             │ │ │ ├─90887 /run/podman-init -- /usr/bin/ceph-osd -n osd.2 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ │ └─90889 /usr/bin/ceph-osd -n osd.2 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │ │ └─runtime
             │ │   └─90885 /usr/bin/conmon --api-version 1 -c a38f2f8c61e9843df057582eeccd529fb52fee544a2f2daa3caa5266718af181 -u a38f2f8c61e9843df057582eeccd529fb52fee544a2f2daa3caa5266718af181 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/a38f2f8c61e9843df057582eeccd529fb52fee544a2f2daa3caa5266718af181/userdata -p /run/containers/storage/overlay-containers/a38f2f8c61e9843df057582eeccd529fb52fee544a2f2daa3caa5266718af181/userdata/pidfile -n ceph-86651951-d7ad-54ed-8d1a-1eae0c6b599c-osd-2 --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/a38f2f8c61e9843df057582eeccd529fb52fee544a2f2daa3caa5266718af181 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/a38f2f8c61e9843df057582eeccd529fb52fee544a2f2daa3caa5266718af181/userdata/oci-log --conmon-pidfile /run/ceph-86651951-d7ad-54ed-8d1a-1eae0c6b599c@osd.2.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg a38f2f8c61e9843df057582eeccd529fb52fee544a2f2daa3caa5266718af181
             │ └─ceph-86651951-d7ad-54ed-8d1a-1eae0c6b599c@rgw.rgw.compute-0.nmzhvu.service
             │   ├─libpod-payload-37d8d4d4e35122aa7398826ce5305d40a9c1b412301945f14e7b5b9738d21ef6
             │   │ ├─100884 /run/podman-init -- /usr/bin/radosgw -n client.rgw.rgw.compute-0.nmzhvu -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │   │ └─100886 /usr/bin/radosgw -n client.rgw.rgw.compute-0.nmzhvu -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false
             │   └─runtime
             │     └─100882 /usr/bin/conmon --api-version 1 -c 37d8d4d4e35122aa7398826ce5305d40a9c1b412301945f14e7b5b9738d21ef6 -u 37d8d4d4e35122aa7398826ce5305d40a9c1b412301945f14e7b5b9738d21ef6 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/37d8d4d4e35122aa7398826ce5305d40a9c1b412301945f14e7b5b9738d21ef6/userdata -p /run/containers/storage/overlay-containers/37d8d4d4e35122aa7398826ce5305d40a9c1b412301945f14e7b5b9738d21ef6/userdata/pidfile -n ceph-86651951-d7ad-54ed-8d1a-1eae0c6b599c-rgw-rgw-compute-0-nmzhvu --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/37d8d4d4e35122aa7398826ce5305d40a9c1b412301945f14e7b5b9738d21ef6 --full-attach -l journald --log-level warning --syslog --runtime-arg --log-format=json --runtime-arg --log --runtime-arg=/run/containers/storage/overlay-containers/37d8d4d4e35122aa7398826ce5305d40a9c1b412301945f14e7b5b9738d21ef6/userdata/oci-log --conmon-pidfile /run/ceph-86651951-d7ad-54ed-8d1a-1eae0c6b599c@rgw.rgw.compute-0.nmzhvu.service-pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg warning --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg "" --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --hooks-dir --exit-command-arg /usr/share/containers/oci/hooks.d --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg container --exit-command-arg cleanup --exit-command-arg --stopped-only --exit-command-arg --rm --exit-command-arg 37d8d4d4e35122aa7398826ce5305d40a9c1b412301945f14e7b5b9738d21ef6
             ├─system-getty.slice
             │ └─getty@tty1.service
             │   └─1013 /sbin/agetty -o "-p -- \\u" --noclear - linux
             ├─system-serial\x2dgetty.slice
             │ └─serial-getty@ttyS0.service
             │   └─2566 /sbin/agetty -o "-p -- \\u" --keep-baud 115200,57600,38400,9600 - vt220
             ├─systemd-hostnamed.service
             │ └─106828 /usr/lib/systemd/systemd-hostnamed
             ├─systemd-journald.service
             │ └─678 /usr/lib/systemd/systemd-journald
             ├─systemd-logind.service
             │ └─820 /usr/lib/systemd/systemd-logind
             ├─systemd-udevd.service
             │ └─udev
             │   └─728 /usr/lib/systemd/systemd-udevd
             └─tuned.service
               └─43704 /usr/bin/python3 -Es /usr/sbin/tuned -l -P

Nov 25 10:23:01 compute-0 sshd-session[105523]: Disconnected from authenticating user root 200.8.228.57 port 56966 [preauth]
Nov 25 10:23:08 compute-0 sshd-session[105525]: Accepted publickey for zuul from 192.168.122.10 port 57774 ssh2: ECDSA SHA256:oovU9KaTaKTik/Ga7gdISZ8d5PJoBUjNphenPJURrck
Nov 25 10:23:08 compute-0 systemd-logind[820]: New session 33 of user zuul.
Nov 25 10:23:08 compute-0 sshd-session[105525]: pam_unix(sshd:session): session opened for user zuul(uid=1000) by zuul(uid=0)
Nov 25 10:23:14 compute-0 lvm[106058]: PV /dev/loop5 online, VG ceph_vg2 is complete.
Nov 25 10:23:14 compute-0 lvm[106058]: VG ceph_vg2 finished
Nov 25 10:23:14 compute-0 lvm[106079]: PV /dev/loop3 online, VG ceph_vg0 is complete.
Nov 25 10:23:14 compute-0 lvm[106079]: VG ceph_vg0 finished
Nov 25 10:23:14 compute-0 lvm[106094]: PV /dev/loop4 online, VG ceph_vg1 is complete.
Nov 25 10:23:14 compute-0 lvm[106094]: VG ceph_vg1 finished

● user-1000.slice - User Slice of UID 1000
     Loaded: loaded
    Drop-In: /usr/lib/systemd/system/user-.slice.d
             └─10-defaults.conf
     Active: active since Tue 2025-11-25 09:44:39 UTC; 38min ago
      Until: Tue 2025-11-25 09:44:39 UTC; 38min ago
       Docs: man:user@.service(5)
         IO: 299.1M read, 4.3G written
      Tasks: 23 (limit: 20064)
     Memory: 3.3G (peak: 3.5G)
        CPU: 9min 18.213s
     CGroup: /user.slice/user-1000.slice
             ├─session-1.scope
             │ └─4518 /usr/bin/python3
             ├─session-17.scope
             │ ├─71412 "sshd-session: zuul [priv]"
             │ └─71415 "sshd-session: zuul@notty"
             ├─session-33.scope
             │ ├─105525 "sshd-session: zuul [priv]"
             │ ├─105528 "sshd-session: zuul@notty"
             │ ├─105529 sudo bash -c "rm -rf /var/tmp/sos-osp && mkdir /var/tmp/sos-osp && sos report --batch --all-logs --tmp-dir=/var/tmp/sos-osp  -p container,openstack_edpm,system,storage,virt"
             │ ├─105553 /usr/bin/python3 -s /sbin/sos report --batch --all-logs --tmp-dir=/var/tmp/sos-osp -p container,openstack_edpm,system,storage,virt
             │ ├─108355 timeout 15s turbostat --debug sleep 10
             │ ├─108762 timeout 300s systemctl status --all
             │ ├─108763 systemctl status --all
             │ ├─108764 timeout 300s semanage interface -l
             │ └─108765 "[semanage]"
             └─user@1000.service
               ├─app.slice
               │ └─dbus-broker.service
               │   ├─12489 /usr/bin/dbus-broker-launch --scope user
               │   └─12506 dbus-broker --log 4 --controller 9 --machine-id fee38d0f94bf6f4b17ec77ba536bd6ab --max-bytes 100000000000000 --max-fds 25000000000000 --max-matches 5000000000
               ├─init.scope
               │ ├─4307 /usr/lib/systemd/systemd --user
               │ └─4309 "(sd-pam)"
               └─user.slice
                 └─podman-pause-9f2ee473.scope
                   └─12394 catatonit -P

Nov 25 10:21:33 compute-0 podman[103643]: 2025-11-25 10:21:33.495572851 +0000 UTC m=+0.150043646 container attach e598db5de9f634ee14f58ccec8aec9e3c39459ce19a85ffa417416c9bddb013e (image=quay.io/ceph/ceph:v18, name=naughty_feistel, io.buildah.version=1.39.3, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef, org.label-schema.build-date=20250507, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.label-schema.schema-version=1.0, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Nov 25 10:21:34 compute-0 podman[103643]: 2025-11-25 10:21:34.096737857 +0000 UTC m=+0.751208652 container died e598db5de9f634ee14f58ccec8aec9e3c39459ce19a85ffa417416c9bddb013e (image=quay.io/ceph/ceph:v18, name=naughty_feistel, org.label-schema.build-date=20250507, org.label-schema.schema-version=1.0, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_REF=reef, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.39.3, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/)
Nov 25 10:21:34 compute-0 podman[103643]: 2025-11-25 10:21:34.133680889 +0000 UTC m=+0.788151684 container remove e598db5de9f634ee14f58ccec8aec9e3c39459ce19a85ffa417416c9bddb013e (image=quay.io/ceph/ceph:v18, name=naughty_feistel, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, OSD_FLAVOR=default, ceph=True, CEPH_REF=reef, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, io.buildah.version=1.39.3, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>)
Nov 25 10:21:34 compute-0 sudo[103609]: pam_unix(sudo:session): session closed for user root
Nov 25 10:23:08 compute-0 sudo[105529]:     zuul : PWD=/home/zuul ; USER=root ; COMMAND=/bin/bash -c 'rm -rf /var/tmp/sos-osp && mkdir /var/tmp/sos-osp && sos report --batch --all-logs --tmp-dir=/var/tmp/sos-osp  -p container,openstack_edpm,system,storage,virt'
Nov 25 10:23:08 compute-0 sudo[105529]: pam_unix(sudo:session): session opened for user root(uid=0) by zuul(uid=1000)
Nov 25 10:23:12 compute-0 ovs-vsctl[105728]: ovs|00001|db_ctl_base|ERR|no key "dpdk-init" in Open_vSwitch record "." column other_config
Nov 25 10:23:23 compute-0 ovs-appctl[107646]: ovs|00001|daemon_unix|WARN|/var/run/openvswitch/ovs-monitor-ipsec.pid: open: No such file or directory
Nov 25 10:23:23 compute-0 ovs-appctl[107653]: ovs|00001|daemon_unix|WARN|/var/run/openvswitch/ovs-monitor-ipsec.pid: open: No such file or directory
Nov 25 10:23:23 compute-0 ovs-appctl[107658]: ovs|00001|daemon_unix|WARN|/var/run/openvswitch/ovs-monitor-ipsec.pid: open: No such file or directory

● user-42477.slice - User Slice of UID 42477
     Loaded: loaded
    Drop-In: /usr/lib/systemd/system/user-.slice.d
             └─10-defaults.conf
     Active: active since Tue 2025-11-25 10:19:34 UTC; 3min 58s ago
      Until: Tue 2025-11-25 10:19:34 UTC; 3min 58s ago
       Docs: man:user@.service(5)
         IO: 48.0K read, 34.7M written
      Tasks: 26 (limit: 20064)
     Memory: 23.9M (peak: 83.4M)
        CPU: 58.656s
     CGroup: /user.slice/user-42477.slice
             ├─session-20.scope
             │ ├─76729 "sshd-session: ceph-admin [priv]"
             │ └─76752 "sshd-session: ceph-admin"
             ├─session-22.scope
             │ ├─76747 "sshd-session: ceph-admin [priv]"
             │ └─76753 "sshd-session: ceph-admin@notty"
             ├─session-23.scope
             │ ├─76804 "sshd-session: ceph-admin [priv]"
             │ └─76807 "sshd-session: ceph-admin@notty"
             ├─session-24.scope
             │ ├─76858 "sshd-session: ceph-admin [priv]"
             │ └─76861 "sshd-session: ceph-admin@notty"
             ├─session-25.scope
             │ ├─76912 "sshd-session: ceph-admin [priv]"
             │ └─76915 "sshd-session: ceph-admin@notty"
             ├─session-26.scope
             │ ├─76966 "sshd-session: ceph-admin [priv]"
             │ └─76969 "sshd-session: ceph-admin@notty"
             ├─session-27.scope
             │ ├─77020 "sshd-session: ceph-admin [priv]"
             │ └─77023 "sshd-session: ceph-admin@notty"
             ├─session-28.scope
             │ ├─77074 "sshd-session: ceph-admin [priv]"
             │ └─77077 "sshd-session: ceph-admin@notty"
             ├─session-29.scope
             │ ├─77128 "sshd-session: ceph-admin [priv]"
             │ └─77131 "sshd-session: ceph-admin@notty"
             ├─session-30.scope
             │ ├─77182 "sshd-session: ceph-admin [priv]"
             │ └─77185 "sshd-session: ceph-admin@notty"
             ├─session-31.scope
             │ ├─77209 "sshd-session: ceph-admin [priv]"
             │ └─77212 "sshd-session: ceph-admin@notty"
             ├─session-32.scope
             │ ├─77263 "sshd-session: ceph-admin [priv]"
             │ └─77266 "sshd-session: ceph-admin@notty"
             └─user@42477.service
               └─init.scope
                 ├─76733 /usr/lib/systemd/systemd --user
                 └─76736 "(sd-pam)"

Nov 25 10:22:46 compute-0 podman[105409]: 2025-11-25 10:22:45.987120009 +0000 UTC m=+0.020799455 image pull 0f5473a1e726b0feaff0f41f8de8341c0a94f60365d4584f4c10bd6b40d44bc1 quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
Nov 25 10:22:46 compute-0 podman[105409]: 2025-11-25 10:22:46.086649459 +0000 UTC m=+0.120328915 container start f87ebeb79efd0301f20165a35827c52b1fe3d2422e6d2d79040e29a142d471cc (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=xenodochial_blackburn, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, io.buildah.version=1.39.3, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, org.label-schema.vendor=CentOS, org.label-schema.build-date=20250507, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef, org.label-schema.schema-version=1.0)
Nov 25 10:22:46 compute-0 podman[105409]: 2025-11-25 10:22:46.090957435 +0000 UTC m=+0.124636861 container attach f87ebeb79efd0301f20165a35827c52b1fe3d2422e6d2d79040e29a142d471cc (image=quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0, name=xenodochial_blackburn, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=reef, org.label-schema.schema-version=1.0, org.label-schema.build-date=20250507, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.39.3, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team <ceph-maintainers@ceph.io>, CEPH_GIT_REPO=https://github.com/ceph/ceph.git, CEPH_SHA1=6b0e988052ec84cf2d4a54ff9bbbc5e720b621ad, org.label-schema.license=GPLv2)
Nov 25 10:22:47 compute-0 sudo[105305]: pam_unix(sudo:session): session closed for user root
Nov 25 10:22:47 compute-0 sudo[105471]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/true
Nov 25 10:22:47 compute-0 sudo[105471]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Nov 25 10:22:47 compute-0 sudo[105471]: pam_unix(sudo:session): session closed for user root
Nov 25 10:22:47 compute-0 sudo[105496]: ceph-admin : PWD=/home/ceph-admin ; USER=root ; COMMAND=/bin/ls /etc/sysctl.d
Nov 25 10:22:47 compute-0 sudo[105496]: pam_unix(sudo:session): session opened for user root(uid=0) by ceph-admin(uid=42477)
Nov 25 10:22:47 compute-0 sudo[105496]: pam_unix(sudo:session): session closed for user root

● user.slice - User and Session Slice
     Loaded: loaded (/usr/lib/systemd/system/user.slice; static)
     Active: active since Tue 2025-11-25 09:43:54 UTC; 39min ago
      Until: Tue 2025-11-25 09:43:54 UTC; 39min ago
       Docs: man:systemd.special(7)
         IO: 299.1M read, 4.4G written
      Tasks: 48
     Memory: 3.4G (peak: 3.5G)
        CPU: 10min 17.149s
     CGroup: /user.slice
             ├─user-1000.slice
             │ ├─session-1.scope
             │ │ └─4518 /usr/bin/python3
             │ ├─session-17.scope
             │ │ ├─71412 "sshd-session: zuul [priv]"
             │ │ └─71415 "sshd-session: zuul@notty"
             │ ├─session-33.scope
             │ │ ├─105525 "sshd-session: zuul [priv]"
             │ │ ├─105528 "sshd-session: zuul@notty"
             │ │ ├─105529 sudo bash -c "rm -rf /var/tmp/sos-osp && mkdir /var/tmp/sos-osp && sos report --batch --all-logs --tmp-dir=/var/tmp/sos-osp  -p container,openstack_edpm,system,storage,virt"
             │ │ ├─105553 /usr/bin/python3 -s /sbin/sos report --batch --all-logs --tmp-dir=/var/tmp/sos-osp -p container,openstack_edpm,system,storage,virt
             │ │ ├─108355 timeout 15s turbostat --debug sleep 10
             │ │ ├─108762 timeout 300s systemctl status --all
             │ │ ├─108763 systemctl status --all
             │ │ └─108766 /usr/bin/python3 -s /sbin/sos report --batch --all-logs --tmp-dir=/var/tmp/sos-osp -p container,openstack_edpm,system,storage,virt
             │ └─user@1000.service
             │   ├─app.slice
             │   │ └─dbus-broker.service
             │   │   ├─12489 /usr/bin/dbus-broker-launch --scope user
             │   │   └─12506 dbus-broker --log 4 --controller 9 --machine-id fee38d0f94bf6f4b17ec77ba536bd6ab --max-bytes 100000000000000 --max-fds 25000000000000 --max-matches 5000000000
             │   ├─init.scope
             │   │ ├─4307 /usr/lib/systemd/systemd --user
             │   │ └─4309 "(sd-pam)"
             │   └─user.slice
             │     └─podman-pause-9f2ee473.scope
             │       └─12394 catatonit -P
             └─user-42477.slice
               ├─session-20.scope
               │ ├─76729 "sshd-session: ceph-admin [priv]"
               │ └─76752 "sshd-session: ceph-admin"
               ├─session-22.scope
               │ ├─76747 "sshd-session: ceph-admin [priv]"
               │ └─76753 "sshd-session: ceph-admin@notty"
               ├─session-23.scope
               │ ├─76804 "sshd-session: ceph-admin [priv]"
               │ └─76807 "sshd-session: ceph-admin@notty"
               ├─session-24.scope
               │ ├─76858 "sshd-session: ceph-admin [priv]"
               │ └─76861 "sshd-session: ceph-admin@notty"
               ├─session-25.scope
               │ ├─76912 "sshd-session: ceph-admin [priv]"
               │ └─76915 "sshd-session: ceph-admin@notty"
               ├─session-26.scope
               │ ├─76966 "sshd-session: ceph-admin [priv]"
               │ └─76969 "sshd-session: ceph-admin@notty"
               ├─session-27.scope
               │ ├─77020 "sshd-session: ceph-admin [priv]"
               │ └─77023 "sshd-session: ceph-admin@notty"
               ├─session-28.scope
               │ ├─77074 "sshd-session: ceph-admin [priv]"
               │ └─77077 "sshd-session: ceph-admin@notty"
               ├─session-29.scope
               │ ├─77128 "sshd-session: ceph-admin [priv]"
               │ └─77131 "sshd-session: ceph-admin@notty"
               ├─session-30.scope
               │ ├─77182 "sshd-session: ceph-admin [priv]"
               │ └─77185 "sshd-session: ceph-admin@notty"
               ├─session-31.scope
               │ ├─77209 "sshd-session: ceph-admin [priv]"
               │ └─77212 "sshd-session: ceph-admin@notty"
               ├─session-32.scope
               │ ├─77263 "sshd-session: ceph-admin [priv]"
               │ └─77266 "sshd-session: ceph-admin@notty"
               └─user@42477.service
                 └─init.scope
                   ├─76733 /usr/lib/systemd/systemd --user
                   └─76736 "(sd-pam)"

● dbus.socket - D-Bus System Message Bus Socket
     Loaded: loaded (/usr/lib/systemd/system/dbus.socket; enabled; preset: enabled)
     Active: active (running) since Tue 2025-11-25 09:43:56 UTC; 39min ago
      Until: Tue 2025-11-25 09:43:56 UTC; 39min ago
   Triggers: ● dbus-broker.service
     Listen: /run/dbus/system_bus_socket (Stream)
     CGroup: /system.slice/dbus.socket

Nov 25 09:43:56 localhost systemd[1]: Listening on D-Bus System Message Bus Socket.

● dm-event.socket - Device-mapper event daemon FIFOs
     Loaded: loaded (/usr/lib/systemd/system/dm-event.socket; enabled; preset: enabled)
     Active: active (listening) since Tue 2025-11-25 10:11:52 UTC; 11min ago
      Until: Tue 2025-11-25 10:11:52 UTC; 11min ago
   Triggers: ● dm-event.service
       Docs: man:dmeventd(8)
     Listen: /run/dmeventd-server (FIFO)
             /run/dmeventd-client (FIFO)
     CGroup: /system.slice/dm-event.socket

Nov 25 10:11:52 compute-0 systemd[1]: Listening on Device-mapper event daemon FIFOs.

● lvm2-lvmpolld.socket - LVM2 poll daemon socket
     Loaded: loaded (/usr/lib/systemd/system/lvm2-lvmpolld.socket; enabled; preset: enabled)
     Active: active (listening) since Tue 2025-11-25 10:11:53 UTC; 11min ago
      Until: Tue 2025-11-25 10:11:53 UTC; 11min ago
   Triggers: ● lvm2-lvmpolld.service
       Docs: man:lvmpolld(8)
     Listen: /run/lvm/lvmpolld.socket (Stream)
     CGroup: /system.slice/lvm2-lvmpolld.socket

Nov 25 10:11:53 compute-0 systemd[1]: Listening on LVM2 poll daemon socket.

● rpcbind.socket - RPCbind Server Activation Socket
     Loaded: loaded (/usr/lib/systemd/system/rpcbind.socket; enabled; preset: enabled)
     Active: active (running) since Tue 2025-11-25 09:43:54 UTC; 39min ago
      Until: Tue 2025-11-25 09:43:54 UTC; 39min ago
   Triggers: ● rpcbind.service
     Listen: /run/rpcbind.sock (Stream)
             0.0.0.0:111 (Stream)
             0.0.0.0:111 (Datagram)
             [::]:111 (Stream)
             [::]:111 (Datagram)
         IO: 0B read, 0B written
      Tasks: 0 (limit: 48640)
     Memory: 16.0K (peak: 288.0K)
        CPU: 5ms
     CGroup: /system.slice/rpcbind.socket

● sssd-kcm.socket - SSSD Kerberos Cache Manager responder socket
     Loaded: loaded (/usr/lib/systemd/system/sssd-kcm.socket; enabled; preset: enabled)
     Active: active (listening) since Tue 2025-11-25 09:43:56 UTC; 39min ago
      Until: Tue 2025-11-25 09:43:56 UTC; 39min ago
   Triggers: ● sssd-kcm.service
       Docs: man:sssd-kcm(8)
     Listen: /run/.heim_org.h5l.kcm-socket (Stream)
     CGroup: /system.slice/sssd-kcm.socket

Nov 25 09:43:56 localhost systemd[1]: Listening on SSSD Kerberos Cache Manager responder socket.

○ syslog.socket - Syslog Socket
     Loaded: loaded (/usr/lib/systemd/system/syslog.socket; static)
     Active: inactive (dead)
   Triggers: ● syslog.service
       Docs: man:systemd.special(7)
             https://www.freedesktop.org/wiki/Software/systemd/syslog
     Listen: /run/systemd/journal/syslog (Datagram)

● systemd-coredump.socket - Process Core Dump Socket
     Loaded: loaded (/usr/lib/systemd/system/systemd-coredump.socket; static)
     Active: active (listening) since Tue 2025-11-25 09:43:55 UTC; 39min ago
      Until: Tue 2025-11-25 09:43:55 UTC; 39min ago
       Docs: man:systemd-coredump(8)
     Listen: /run/systemd/coredump (SequentialPacket)
   Accepted: 0; Connected: 0;
     CGroup: /system.slice/systemd-coredump.socket

● systemd-initctl.socket - initctl Compatibility Named Pipe
     Loaded: loaded (/usr/lib/systemd/system/systemd-initctl.socket; static)
     Active: active (listening) since Tue 2025-11-25 09:43:55 UTC; 39min ago
      Until: Tue 2025-11-25 09:43:55 UTC; 39min ago
   Triggers: ● systemd-initctl.service
       Docs: man:systemd-initctl.socket(8)
     Listen: /run/initctl (FIFO)
     CGroup: /system.slice/systemd-initctl.socket

● systemd-journald-dev-log.socket - Journal Socket (/dev/log)
     Loaded: loaded (/usr/lib/systemd/system/systemd-journald-dev-log.socket; static)
     Active: active (running) since Tue 2025-11-25 09:43:51 UTC; 39min ago
      Until: Tue 2025-11-25 09:43:51 UTC; 39min ago
   Triggers: ● systemd-journald.service
       Docs: man:systemd-journald.service(8)
             man:journald.conf(5)
     Listen: /run/systemd/journal/dev-log (Datagram)
     CGroup: /system.slice/systemd-journald-dev-log.socket

Notice: journal has been rotated since unit was started, output may be incomplete.

● systemd-journald.socket - Journal Socket
     Loaded: loaded (/usr/lib/systemd/system/systemd-journald.socket; static)
     Active: active (running) since Tue 2025-11-25 09:43:51 UTC; 39min ago
      Until: Tue 2025-11-25 09:43:51 UTC; 39min ago
   Triggers: ● systemd-journald.service
       Docs: man:systemd-journald.service(8)
             man:journald.conf(5)
     Listen: /run/systemd/journal/socket (Datagram)
             /run/systemd/journal/stdout (Stream)
     CGroup: /system.slice/systemd-journald.socket

Notice: journal has been rotated since unit was started, output may be incomplete.

● systemd-rfkill.socket - Load/Save RF Kill Switch Status /dev/rfkill Watch
     Loaded: loaded (/usr/lib/systemd/system/systemd-rfkill.socket; static)
     Active: active (listening) since Tue 2025-11-25 09:43:59 UTC; 39min ago
      Until: Tue 2025-11-25 09:43:59 UTC; 39min ago
   Triggers: ● systemd-rfkill.service
       Docs: man:systemd-rfkill.socket(8)
     Listen: /dev/rfkill (Special)
     CGroup: /system.slice/systemd-rfkill.socket

Nov 25 09:43:59 np0005534776.novalocal systemd[1]: Listening on Load/Save RF Kill Switch Status /dev/rfkill Watch.

● systemd-udevd-control.socket - udev Control Socket
     Loaded: loaded (/usr/lib/systemd/system/systemd-udevd-control.socket; static)
     Active: active (running) since Tue 2025-11-25 09:43:55 UTC; 39min ago
      Until: Tue 2025-11-25 09:43:55 UTC; 39min ago
   Triggers: ● systemd-udevd.service
       Docs: man:systemd-udevd-control.socket(8)
             man:udev(7)
     Listen: /run/udev/control (SequentialPacket)
     CGroup: /system.slice/systemd-udevd-control.socket

● systemd-udevd-kernel.socket - udev Kernel Socket
     Loaded: loaded (/usr/lib/systemd/system/systemd-udevd-kernel.socket; static)
     Active: active (running) since Tue 2025-11-25 09:43:55 UTC; 39min ago
      Until: Tue 2025-11-25 09:43:55 UTC; 39min ago
   Triggers: ● systemd-udevd.service
       Docs: man:systemd-udevd-kernel.socket(8)
             man:udev(7)
     Listen: kobject-uevent 1 (Netlink)
     CGroup: /system.slice/systemd-udevd-kernel.socket

● swap.swap - /swap
     Loaded: loaded (/etc/fstab; generated)
     Active: active since Tue 2025-11-25 10:13:46 UTC; 9min ago
      Until: Tue 2025-11-25 10:13:46 UTC; 9min ago
       What: /swap
       Docs: man:fstab(5)
             man:systemd-fstab-generator(8)

● basic.target - Basic System
     Loaded: loaded (/usr/lib/systemd/system/basic.target; static)
     Active: active since Tue 2025-11-25 09:43:56 UTC; 39min ago
      Until: Tue 2025-11-25 09:43:56 UTC; 39min ago
       Docs: man:systemd.special(7)

Nov 25 09:43:56 localhost systemd[1]: Reached target Basic System.

○ blockdev@dev-disk-by\x2duuid-47e3724e\x2d7a1b\x2d439a\x2d9543\x2db98c9a290709.target - Block Device Preparation for /dev/disk/by-uuid/47e3724e-7a1b-439a-9543-b98c9a290709
     Loaded: loaded (/usr/lib/systemd/system/blockdev@.target; static)
     Active: inactive (dead)
       Docs: man:systemd.special(7)

○ blockdev@dev-vda1.target - Block Device Preparation for /dev/vda1
     Loaded: loaded (/usr/lib/systemd/system/blockdev@.target; static)
     Active: inactive (dead)
       Docs: man:systemd.special(7)

● ceph-86651951-d7ad-54ed-8d1a-1eae0c6b599c.target - Ceph cluster 86651951-d7ad-54ed-8d1a-1eae0c6b599c
     Loaded: loaded (/etc/systemd/system/ceph-86651951-d7ad-54ed-8d1a-1eae0c6b599c.target; enabled; preset: disabled)
     Active: active since Tue 2025-11-25 10:18:35 UTC; 4min 56s ago
      Until: Tue 2025-11-25 10:18:35 UTC; 4min 56s ago

Nov 25 10:18:35 compute-0 systemd[1]: Reached target Ceph cluster 86651951-d7ad-54ed-8d1a-1eae0c6b599c.

● ceph.target - All Ceph clusters and services
     Loaded: loaded (/etc/systemd/system/ceph.target; enabled; preset: disabled)
     Active: active since Tue 2025-11-25 10:18:35 UTC; 4min 57s ago
      Until: Tue 2025-11-25 10:18:35 UTC; 4min 57s ago

Nov 25 10:18:35 compute-0 systemd[1]: Reached target All Ceph clusters and services.

● cloud-config.target - Cloud-config availability
     Loaded: loaded (/usr/lib/systemd/system/cloud-config.target; static)
     Active: active since Tue 2025-11-25 09:44:04 UTC; 39min ago
      Until: Tue 2025-11-25 09:44:04 UTC; 39min ago

Nov 25 09:44:04 np0005534776.novalocal systemd[1]: Reached target Cloud-config availability.

● cloud-init.target - Cloud-init target
     Loaded: loaded (/usr/lib/systemd/system/cloud-init.target; enabled-runtime; preset: disabled)
     Active: active since Tue 2025-11-25 09:44:05 UTC; 39min ago
      Until: Tue 2025-11-25 09:44:05 UTC; 39min ago

Nov 25 09:44:05 np0005534776.novalocal systemd[1]: Reached target Cloud-init target.

○ cryptsetup-pre.target - Local Encrypted Volumes (Pre)
     Loaded: loaded (/usr/lib/systemd/system/cryptsetup-pre.target; static)
     Active: inactive (dead)
       Docs: man:systemd.special(7)

● cryptsetup.target - Local Encrypted Volumes
     Loaded: loaded (/usr/lib/systemd/system/cryptsetup.target; static)
     Active: active since Tue 2025-11-25 09:43:54 UTC; 39min ago
      Until: Tue 2025-11-25 09:43:54 UTC; 39min ago
       Docs: man:systemd.special(7)

○ emergency.target - Emergency Mode
     Loaded: loaded (/usr/lib/systemd/system/emergency.target; static)
     Active: inactive (dead)
       Docs: man:systemd.special(7)

○ first-boot-complete.target - First Boot Complete
     Loaded: loaded (/usr/lib/systemd/system/first-boot-complete.target; static)
     Active: inactive (dead)
  Condition: start condition failed at Tue 2025-11-25 09:43:55 UTC; 39min ago
       Docs: man:systemd.special(7)

Nov 25 09:43:55 localhost systemd[1]: First Boot Complete was skipped because of an unmet condition check (ConditionFirstBoot=yes).

○ getty-pre.target - Preparation for Logins
     Loaded: loaded (/usr/lib/systemd/system/getty-pre.target; static)
     Active: inactive (dead)
       Docs: man:systemd.special(7)
             man:systemd-getty-generator(8)
             http://0pointer.de/blog/projects/serial-console.html

● getty.target - Login Prompts
     Loaded: loaded (/usr/lib/systemd/system/getty.target; static)
     Active: active since Tue 2025-11-25 09:44:04 UTC; 39min ago
      Until: Tue 2025-11-25 09:44:04 UTC; 39min ago
       Docs: man:systemd.special(7)
             man:systemd-getty-generator(8)
             http://0pointer.de/blog/projects/serial-console.html

Nov 25 09:44:04 np0005534776.novalocal systemd[1]: Reached target Login Prompts.

○ graphical.target - Graphical Interface
     Loaded: loaded (/usr/lib/systemd/system/graphical.target; static)
     Active: inactive (dead)
       Docs: man:systemd.special(7)

○ initrd-fs.target - Initrd File Systems
     Loaded: loaded (/usr/lib/systemd/system/initrd-fs.target; static)
     Active: inactive (dead) since Tue 2025-11-25 09:43:54 UTC; 39min ago
       Docs: man:systemd.special(7)

Nov 25 09:43:53 localhost systemd[1]: Reached target Initrd File Systems.

○ initrd-root-device.target - Initrd Root Device
     Loaded: loaded (/usr/lib/systemd/system/initrd-root-device.target; static)
     Active: inactive (dead) since Tue 2025-11-25 09:43:53 UTC; 39min ago
       Docs: man:systemd.special(7)

Nov 25 09:43:51 localhost systemd[1]: Reached target Initrd Root Device.
Nov 25 09:43:53 localhost systemd[1]: Stopped target Initrd Root Device.

○ initrd-root-fs.target - Initrd Root File System
     Loaded: loaded (/usr/lib/systemd/system/initrd-root-fs.target; static)
     Active: inactive (dead) since Tue 2025-11-25 09:43:54 UTC; 39min ago
       Docs: man:systemd.special(7)

Nov 25 09:43:53 localhost systemd[1]: Reached target Initrd Root File System.

○ initrd-switch-root.target - Switch Root
     Loaded: loaded (/usr/lib/systemd/system/initrd-switch-root.target; static)
     Active: inactive (dead) since Tue 2025-11-25 09:43:54 UTC; 39min ago

Nov 25 09:43:53 localhost systemd[1]: Reached target Switch Root.

○ initrd-usr-fs.target - Initrd /usr File System
     Loaded: loaded (/usr/lib/systemd/system/initrd-usr-fs.target; static)
     Active: inactive (dead) since Tue 2025-11-25 09:43:53 UTC; 39min ago
       Docs: man:systemd.special(7)

Nov 25 09:43:53 localhost systemd[1]: Stopped target Initrd /usr File System.
Notice: journal has been rotated since unit was started, output may be incomplete.

○ initrd.target - Initrd Default Target
     Loaded: loaded (/usr/lib/systemd/system/initrd.target; static)
     Active: inactive (dead) since Tue 2025-11-25 09:43:53 UTC; 39min ago
       Docs: man:systemd.special(7)

Nov 25 09:43:53 localhost systemd[1]: Reached target Initrd Default Target.
Nov 25 09:43:53 localhost systemd[1]: Stopped target Initrd Default Target.

● integritysetup.target - Local Integrity Protected Volumes
     Loaded: loaded (/usr/lib/systemd/system/integritysetup.target; static)
     Active: active since Tue 2025-11-25 09:43:54 UTC; 39min ago
      Until: Tue 2025-11-25 09:43:54 UTC; 39min ago
       Docs: man:systemd.special(7)

● local-fs-pre.target - Preparation for Local File Systems
     Loaded: loaded (/usr/lib/systemd/system/local-fs-pre.target; static)
     Active: active since Tue 2025-11-25 09:43:55 UTC; 39min ago
      Until: Tue 2025-11-25 09:43:55 UTC; 39min ago
       Docs: man:systemd.special(7)

Nov 25 09:43:55 localhost systemd[1]: Reached target Preparation for Local File Systems.

● local-fs.target - Local File Systems
     Loaded: loaded (/usr/lib/systemd/system/local-fs.target; static)
     Active: active since Tue 2025-11-25 09:43:55 UTC; 39min ago
      Until: Tue 2025-11-25 09:43:55 UTC; 39min ago
       Docs: man:systemd.special(7)

Nov 25 09:43:55 localhost systemd[1]: Reached target Local File Systems.

● multi-user.target - Multi-User System
     Loaded: loaded (/usr/lib/systemd/system/multi-user.target; indirect; preset: disabled)
     Active: active since Tue 2025-11-25 09:44:04 UTC; 39min ago
      Until: Tue 2025-11-25 09:44:04 UTC; 39min ago
       Docs: man:systemd.special(7)

Nov 25 09:44:04 np0005534776.novalocal systemd[1]: Reached target Multi-User System.

● network-online.target - Network is Online
     Loaded: loaded (/usr/lib/systemd/system/network-online.target; static)
     Active: active since Tue 2025-11-25 09:44:04 UTC; 39min ago
      Until: Tue 2025-11-25 09:44:04 UTC; 39min ago
       Docs: man:systemd.special(7)
             https://systemd.io/NETWORK_ONLINE

Nov 25 09:44:04 np0005534776.novalocal systemd[1]: Reached target Network is Online.

● network-pre.target - Preparation for Network
     Loaded: loaded (/usr/lib/systemd/system/network-pre.target; static)
     Active: active since Tue 2025-11-25 09:43:59 UTC; 39min ago
      Until: Tue 2025-11-25 09:43:59 UTC; 39min ago
       Docs: man:systemd.special(7)
             https://systemd.io/NETWORK_ONLINE

Nov 25 09:43:59 np0005534776.novalocal systemd[1]: Reached target Preparation for Network.

● network.target - Network
     Loaded: loaded (/usr/lib/systemd/system/network.target; static)
     Active: active since Tue 2025-11-25 09:43:59 UTC; 39min ago
      Until: Tue 2025-11-25 09:43:59 UTC; 39min ago
       Docs: man:systemd.special(7)
             https://systemd.io/NETWORK_ONLINE

Nov 25 09:43:59 np0005534776.novalocal systemd[1]: Reached target Network.

● nfs-client.target - NFS client services
     Loaded: loaded (/usr/lib/systemd/system/nfs-client.target; enabled; preset: disabled)
     Active: active since Tue 2025-11-25 09:43:59 UTC; 39min ago
      Until: Tue 2025-11-25 09:43:59 UTC; 39min ago

Nov 25 09:43:59 np0005534776.novalocal systemd[1]: Reached target NFS client services.

○ nss-lookup.target - Host and Network Name Lookups
     Loaded: loaded (/usr/lib/systemd/system/nss-lookup.target; static)
     Active: inactive (dead)
       Docs: man:systemd.special(7)

● nss-user-lookup.target - User and Group Name Lookups
     Loaded: loaded (/usr/lib/systemd/system/nss-user-lookup.target; static)
     Active: active since Tue 2025-11-25 09:43:56 UTC; 39min ago
      Until: Tue 2025-11-25 09:43:56 UTC; 39min ago
       Docs: man:systemd.special(7)

Nov 25 09:43:56 localhost systemd[1]: Reached target User and Group Name Lookups.

● paths.target - Path Units
     Loaded: loaded (/usr/lib/systemd/system/paths.target; static)
     Active: active since Tue 2025-11-25 09:43:54 UTC; 39min ago
      Until: Tue 2025-11-25 09:43:54 UTC; 39min ago
       Docs: man:systemd.special(7)

○ remote-cryptsetup.target - Remote Encrypted Volumes
     Loaded: loaded (/usr/lib/systemd/system/remote-cryptsetup.target; disabled; preset: enabled)
     Active: inactive (dead) since Tue 2025-11-25 09:43:53 UTC; 39min ago
       Docs: man:systemd.special(7)

Nov 25 09:43:52 localhost systemd[1]: Reached target Remote Encrypted Volumes.
Nov 25 09:43:53 localhost systemd[1]: Stopped target Remote Encrypted Volumes.

● remote-fs-pre.target - Preparation for Remote File Systems
     LUnit syslog.target could not be found.
oaded: loaded (/usr/lib/systemd/system/remote-fs-pre.target; static)
     Active: active since Tue 2025-11-25 09:43:59 UTC; 39min ago
      Until: Tue 2025-11-25 09:43:59 UTC; 39min ago
       Docs: man:systemd.special(7)

Nov 25 09:43:59 np0005534776.novalocal systemd[1]: Reached target Preparation for Remote File Systems.

● remote-fs.target - Remote File Systems
     Loaded: loaded (/usr/lib/systemd/system/remote-fs.target; enabled; preset: enabled)
     Active: active since Tue 2025-11-25 09:43:59 UTC; 39min ago
      Until: Tue 2025-11-25 09:43:59 UTC; 39min ago
       Docs: man:systemd.special(7)

Nov 25 09:43:59 np0005534776.novalocal systemd[1]: Reached target Remote File Systems.

○ remote-veritysetup.target - Remote Verity Protected Volumes
     Loaded: loaded (/usr/lib/systemd/system/remote-veritysetup.target; disabled; preset: disabled)
     Active: inactive (dead)
       Docs: man:systemd.special(7)

○ rescue.target - Rescue Mode
     Loaded: loaded (/usr/lib/systemd/system/rescue.target; static)
     Active: inactive (dead)
       Docs: man:systemd.special(7)

● rpc_pipefs.target
     Loaded: loaded (/usr/lib/systemd/system/rpc_pipefs.target; static)
     Active: active since Tue 2025-11-25 09:43:54 UTC; 39min ago
      Until: Tue 2025-11-25 09:43:54 UTC; 39min ago

● rpcbind.target - RPC Port Mapper
     Loaded: loaded (/usr/lib/systemd/system/rpcbind.target; static)
     Active: active since Tue 2025-11-25 09:43:55 UTC; 39min ago
      Until: Tue 2025-11-25 09:43:55 UTC; 39min ago
       Docs: man:systemd.special(7)

○ shutdown.target - System Shutdown
     Loaded: loaded (/usr/lib/systemd/system/shutdown.target; static)
     Active: inactive (dead)
       Docs: man:systemd.special(7)

● slices.target - Slice Units
     Loaded: loaded (/usr/lib/systemd/system/slices.target; static)
     Active: active since Tue 2025-11-25 09:43:54 UTC; 39min ago
      Until: Tue 2025-11-25 09:43:54 UTC; 39min ago
       Docs: man:systemd.special(7)

● sockets.target - Socket Units
     Loaded: loaded (/usr/lib/systemd/system/sockets.target; static)
     Active: active since Tue 2025-11-25 09:43:56 UTC; 39min ago
      Until: Tue 2025-11-25 09:43:56 UTC; 39min ago
       Docs: man:systemd.special(7)

Nov 25 09:43:56 localhost systemd[1]: Reached target Socket Units.

● sshd-keygen.target
     Loaded: loaded (/usr/lib/systemd/system/sshd-keygen.target; static)
     Active: active since Tue 2025-11-25 09:43:56 UTC; 39min ago
      Until: Tue 2025-11-25 09:43:56 UTC; 39min ago

Nov 25 09:43:56 localhost systemd[1]: Reached target sshd-keygen.target.

● swap.target - Swaps
     Loaded: loaded (/usr/lib/systemd/system/swap.target; static)
     Active: active since Tue 2025-11-25 09:43:54 UTC; 39min ago
      Until: Tue 2025-11-25 09:43:54 UTC; 39min ago
       Docs: man:systemd.special(7)

● sysinit.target - System Initialization
     Loaded: loaded (/usr/lib/systemd/system/sysinit.target; static)
     Active: active since Tue 2025-11-25 09:43:56 UTC; 39min ago
      Until: Tue 2025-11-25 09:43:56 UTC; 39min ago
       Docs: man:systemd.special(7)

Nov 25 09:43:56 localhost systemd[1]: Reached target System Initialization.

● time-set.target - System Time Set
     Loaded: loaded (/usr/lib/systemd/system/time-set.target; static)
     Active: active since Tue 2025-11-25 10:18:36 UTC; 4min 56s ago
      Until: Tue 2025-11-25 10:18:36 UTC; 4min 56s ago
       Docs: man:systemd.special(7)

Nov 25 10:18:36 compute-0 systemd[1]: Reached target System Time Set.

● time-sync.target - System Time Synchronized
     Loaded: loaded (/usr/lib/systemd/system/time-sync.target; static)
     Active: active since Tue 2025-11-25 10:18:36 UTC; 4min 56s ago
      Until: Tue 2025-11-25 10:18:36 UTC; 4min 56s ago
       Docs: man:systemd.special(7)

Nov 25 10:18:36 compute-0 systemd[1]: Reached target System Time Synchronized.

● timers.target - Timer Units
     Loaded: loaded (/usr/lib/systemd/system/timers.target; static)
     Active: active since Tue 2025-11-25 09:43:56 UTC; 39min ago
      Until: Tue 2025-11-25 09:43:56 UTC; 39min ago
       Docs: man:systemd.special(7)

Nov 25 09:43:56 localhost systemd[1]: Reached target Timer Units.

○ umount.target - Unmount All Filesystems
     Loaded: loaded (/usr/lib/systemd/system/umount.target; static)
     Active: inactive (dead)
       Docs: man:systemd.special(7)

○ veritysetup-pre.target - Local Verity Protected Volumes (Pre)
     Loaded: loaded (/usr/lib/systemd/system/veritysetup-pre.target; static)
     Active: inactive (dead)
       Docs: man:systemd.special(7)

● veritysetup.target - Local Verity Protected Volumes
     Loaded: loaded (/usr/lib/systemd/system/veritysetup.target; static)
     Active: active since Tue 2025-11-25 09:43:54 UTC; 39min ago
      Until: Tue 2025-11-25 09:43:54 UTC; 39min ago
       Docs: man:systemd.special(7)

● dnf-makecache.timer - dnf makecache --timer
     Loaded: loaded (/usr/lib/systemd/system/dnf-makecache.timer; enabled; preset: enabled)
     Active: active (waiting) since Tue 2025-11-25 09:43:56 UTC; 39min ago
      Until: Tue 2025-11-25 09:43:56 UTC; 39min ago
    Trigger: Tue 2025-11-25 11:20:07 UTC; 56min left
   Triggers: ● dnf-makecache.service

Nov 25 09:43:56 localhost systemd[1]: Started dnf makecache --timer.

● logrotate.timer - Daily rotation of log files
     Loaded: loaded (/usr/lib/systemd/system/logrotate.timer; enabled; preset: enabled)
     Active: active (waiting) since Tue 2025-11-25 09:43:56 UTC; 39min ago
      Until: Tue 2025-11-25 09:43:56 UTC; 39min ago
    Trigger: Wed 2025-11-26 00:00:00 UTC; 13h left
   Triggers: ● logrotate.service
       Docs: man:logrotate(8)
             man:logrotate.conf(5)

Nov 25 09:43:56 localhost systemd[1]: Started Daily rotation of log files.

○ sysstat-collect.timer - Run system activity accounting tool every 10 minutes
     Loaded: loaded (/usr/lib/systemd/system/sysstat-collect.timer; enabled; preset: disabled)
     Active: inactive (dead)
    Trigger: n/a
   Triggers: ● sysstat-collect.service

○ sysstat-summary.timer - Generate summary of yesterday's process accounting
     Loaded: loaded (/usr/lib/systemd/system/sysstat-summary.timer; enabled; preset: disabled)
     Active: inactive (dead)
    Trigger: n/a
   Triggers: ● sysstat-summary.service

● systemd-tmpfiles-clean.timer - Daily Cleanup of Temporary Directories
     Loaded: loaded (/usr/lib/systemd/system/systemd-tmpfiles-clean.timer; static)
     Active: active (waiting) since Tue 2025-11-25 09:43:56 UTC; 39min ago
      Until: Tue 2025-11-25 09:43:56 UTC; 39min ago
    Trigger: Wed 2025-11-26 09:59:04 UTC; 23h left
   Triggers: ● systemd-tmpfiles-clean.service
       Docs: man:tmpfiles.d(5)
             man:systemd-tmpfiles(8)

Nov 25 09:43:56 localhost systemd[1]: Started Daily Cleanup of Temporary Directories.

● unbound-anchor.timer - daily update of the root trust anchor for DNSSEC
     Loaded: loaded (/usr/lib/systemd/system/unbound-anchor.timer; enabled; preset: enabled)
     Active: active (waiting) since Tue 2025-11-25 10:14:20 UTC; 9min ago
      Until: Tue 2025-11-25 10:14:20 UTC; 9min ago
    Trigger: Wed 2025-11-26 00:00:00 UTC; 13h left
   Triggers: ● unbound-anchor.service
       Docs: man:unbound-anchor(8)

Nov 25 10:14:20 compute-0 systemd[1]: Started daily update of the root trust anchor for DNSSEC.
